Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/crankshaft/OWNERS b/src/crankshaft/OWNERS
new file mode 100644
index 0000000..2918ddd
--- /dev/null
+++ b/src/crankshaft/OWNERS
@@ -0,0 +1,7 @@
+set noparent
+
+bmeurer@chromium.org
+danno@chromium.org
+jarin@chromium.org
+jkummerow@chromium.org
+verwaest@chromium.org
diff --git a/src/crankshaft/arm/OWNERS b/src/crankshaft/arm/OWNERS
new file mode 100644
index 0000000..906a5ce
--- /dev/null
+++ b/src/crankshaft/arm/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/src/crankshaft/arm/lithium-arm.cc b/src/crankshaft/arm/lithium-arm.cc
new file mode 100644
index 0000000..cd736ec
--- /dev/null
+++ b/src/crankshaft/arm/lithium-arm.cc
@@ -0,0 +1,2636 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/arm/lithium-arm.h"
+
+#include <sstream>
+
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "shl-t";
+    case Token::SAR: return "sar-t";
+    case Token::SHR: return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind)  {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                             LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateResultInstruction<1>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator,
+                                           &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseRegisterAtStart(right_value);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift
+    // by 0 and the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), d0);
+    LOperand* right = UseFixedDouble(instr->right(), d1);
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return MarkAsCall(DefineFixedDouble(result, d0), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return DefineAsRegister(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, r1);
+  LOperand* right_operand = UseFixed(right, r0);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* value = UseRegister(instr->value());
+  return DefineAsRegister(new(zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
+  LOperand* receiver = UseFixed(instr->receiver(), r0);
+  LOperand* length = UseFixed(instr->length(), r2);
+  LOperand* elements = UseFixed(instr->elements(), r3);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                receiver,
+                                                length,
+                                                elements);
+  return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = Use(instr->argument(i));
+    AddInstruction(new(zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r1);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), cp);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+      descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), r1);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, r0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFloor* result = new(zone()) LMathFloor(input);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempDoubleRegister();
+  LMathRound* result = new(zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  Representation r = instr->value()->representation();
+  LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+      ? NULL
+      : UseFixed(instr->context(), cp);
+  LOperand* input = UseRegister(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LMathAbs(context, input));
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d0);
+  return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new(zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* double_temp = TempDoubleRegister();
+  LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathSqrt* result = new(zone()) LMathSqrt(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathPowHalf* result = new(zone()) LMathPowHalf(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), r1);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), r1);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(r3);
+    vector = FixedTemp(r2);
+  }
+
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineAsRegister(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+          dividend, divisor));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp =
+      CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       (!CpuFeatures::IsSupported(SUDIV) ||
+        !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) ||
+      (!instr->IsMathFloorOfDiv() &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp =
+      CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor, temp));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       (!CpuFeatures::IsSupported(SUDIV) ||
+        !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+          dividend, divisor));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp =
+      CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+  LOperand* temp2 =
+      CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
+  LInstruction* result = DefineAsRegister(new(zone()) LModI(
+          dividend, divisor, temp, temp2));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    HValue* left = instr->BetterLeftOperand();
+    HValue* right = instr->BetterRightOperand();
+    LOperand* left_op;
+    LOperand* right_op;
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    int32_t constant_value = 0;
+    if (right->IsConstant()) {
+      HConstant* constant = HConstant::cast(right);
+      constant_value = constant->Integer32Value();
+      // Constants -1, 0 and 1 can be optimized if the result can overflow.
+      // For other constants, it can be optimized only without overflow.
+      if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+        left_op = UseRegisterAtStart(left);
+        right_op = UseConstant(right);
+      } else {
+        if (bailout_on_minus_zero) {
+          left_op = UseRegister(left);
+        } else {
+          left_op = UseRegisterAtStart(left);
+        }
+        right_op = UseRegister(right);
+      }
+    } else {
+      if (bailout_on_minus_zero) {
+        left_op = UseRegister(left);
+      } else {
+        left_op = UseRegisterAtStart(left);
+      }
+      right_op = UseRegister(right);
+    }
+    LMulI* mul = new(zone()) LMulI(left_op, right_op);
+    if (right_op->IsConstantOperand()
+            ? ((can_overflow && constant_value == -1) ||
+               (bailout_on_minus_zero && constant_value <= 0))
+            : (can_overflow || bailout_on_minus_zero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
+
+  } else if (instr->representation().IsDouble()) {
+    if (instr->HasOneUse() && (instr->uses().value()->IsAdd() ||
+                               instr->uses().value()->IsSub())) {
+      HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
+
+      if (use->IsAdd() && instr == use->left()) {
+        // This mul is the lhs of an add. The add and mul will be folded into a
+        // multiply-add in DoAdd.
+        return NULL;
+      }
+      if (instr == use->right() && use->IsAdd() && !use->left()->IsMul()) {
+        // This mul is the rhs of an add, where the lhs is not another mul.
+        // The add and mul will be folded into a multiply-add in DoAdd.
+        return NULL;
+      }
+      if (instr == use->right() && use->IsSub()) {
+        // This mul is the rhs of a sub. The sub and mul will be folded into a
+        // multiply-sub in DoSub.
+        return NULL;
+      }
+    }
+
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+    if (instr->left()->IsConstant()) {
+      // If lhs is constant, do reverse subtraction instead.
+      return DoRSub(instr);
+    }
+
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new(zone()) LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
+      return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
+    }
+
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+  // Note: The lhs of the subtraction becomes the rhs of the
+  // reverse-subtraction.
+  LOperand* left = UseRegisterAtStart(instr->right());
+  LOperand* right = UseOrConstantAtStart(instr->left());
+  LRSubI* rsb = new(zone()) LRSubI(left, right);
+  LInstruction* result = DefineAsRegister(rsb);
+  if (instr->CheckFlag(HValue::kCanOverflow)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+  LOperand* addend_op = UseRegisterAtStart(addend);
+  return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+                                                     multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+  LOperand* minuend_op = UseRegisterAtStart(minuend);
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+
+  return DefineSameAsFirst(new(zone()) LMultiplySubD(minuend_op,
+                                                     multiplier_op,
+                                                     multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
+      return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+    }
+
+    if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
+      DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
+      return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+    }
+
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), d0);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), d1)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, d2),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+  return new(zone()) LClassOfTestAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegisterAtStart(instr->index())
+      : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  return new(zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+      ? UseRegisterOrConstantAtStart(instr->length())
+      : UseRegisterAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempDoubleRegister();
+        LInstruction* result =
+            DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve d1 explicitly.
+    LClampTToUint8* result =
+        new(zone()) LClampTToUint8(reg, TempDoubleRegister());
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub()
+      ? UseFixed(instr->context(), cp)
+      : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(UseFixed(instr->value(), r0), context,
+                             parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new(zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = NULL;
+    if (instr->representation().IsDouble()) {
+      obj = UseRegister(instr->elements());
+    } else {
+      DCHECK(instr->representation().IsSmiOrTagged());
+      obj = UseRegisterAtStart(instr->elements());
+    }
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK(
+        (instr->representation().IsInteger32() &&
+         !IsDoubleOrFloatElementsKind(elements_kind)) ||
+        (instr->representation().IsDouble() &&
+         IsDoubleOrFloatElementsKind(elements_kind)));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  r0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* key = NULL;
+    LOperand* val = NULL;
+
+    if (instr->value()->representation().IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      val = UseRegister(instr->value());
+      key = UseRegisterOrConstantAtStart(instr->key());
+    } else {
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new (zone()) LStoreKeyed(object, key, val, nullptr);
+  }
+
+  DCHECK(
+      (instr->value()->representation().IsInteger32() &&
+       !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+      (instr->value()->representation().IsDouble() &&
+       IsDoubleOrFloatElementsKind(instr->elements_kind())));
+  DCHECK(instr->elements()->representation().IsExternal());
+  LOperand* val = UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result =
+      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), r0);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, r0);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map = instr->has_transition() &&
+      instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map
+        ? UseRegister(instr->object())
+        : UseRegisterAtStart(instr->object());
+  }
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new(zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r1);
+  LOperand* right = UseFixed(instr->right(), r0);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LStringAdd(context, left, right), r0),
+      instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), r0);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* value = UseFixed(instr->value(), r3);
+  LTypeof* result = new (zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, r0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseFixed(instr->enumerable(), r0);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, r0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/arm/lithium-arm.h b/src/crankshaft/arm/lithium-arm.h
new file mode 100644
index 0000000..6329f36
--- /dev/null
+++ b/src/crankshaft/arm/lithium-arm.h
@@ -0,0 +1,2788 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckNonSmi)                             \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(MultiplySubD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(RSubI)                                   \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {
+  }
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator support.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const override { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = addend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* addend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = minuend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* minuend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFloor(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LMathExp(LOperand* value,
+           LOperand* double_temp,
+           LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = double_temp;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathPowHalf(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LRSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LRSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+  uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTaggedToI(LOperand* value,
+             LOperand* temp,
+             LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+              LOperand* backing_store_owner) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  bool NeedsCanonicalization() {
+    if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+        hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+      return false;
+    }
+    return hydrogen()->NeedsCanonicalization();
+  }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* new_map_temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+  LOperand* view() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* context,
+            LOperand* size,
+            LOperand* temp1,
+            LOperand* temp2) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph) { }
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+  LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+  LInstruction* DoRSub(HSub* instr);
+
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM_LITHIUM_ARM_H_
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
new file mode 100644
index 0000000..2bd0788
--- /dev/null
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -0,0 +1,5607 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ vstr(DoubleRegister::from_code(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ vldr(DoubleRegister::from_code(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
+#endif
+
+    // r1: Callee's JS function.
+    // cp: Callee's context.
+    // pp: Callee's constant pool pointer (if enabled)
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
+  }
+
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue());
+    }
+    frame_is_built_ = true;
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ sub(sp,  sp, Operand(slots * kPointerSize));
+      __ push(r0);
+      __ push(r1);
+      __ add(r0, sp, Operand(slots *  kPointerSize));
+      __ mov(r1, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ sub(r0, r0, Operand(kPointerSize));
+      __ str(r1, MemOperand(r0, 2 * kPointerSize));
+      __ cmp(r0, sp);
+      __ b(ne, &loop);
+      __ pop(r1);
+      __ pop(r0);
+    } else {
+      __ sub(sp,  sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info()->scope()->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in r1.
+    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ push(r1);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(r1);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in both r0 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ mov(cp, r0);
+    __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ ldr(r0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextMemOperand(cp, var->index());
+        __ str(r0, target);
+        // Update the write barrier. This clobbers r3 and r0.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+              cp,
+              target.offset(),
+              r0,
+              r3,
+              GetLinkRegisterState(),
+              kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r0, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ sub(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ PushFixedFrame();
+        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ pop(ip);
+        __ PopFixedFrame();
+        frame_is_built_ = false;
+      }
+      __ jmp(code->exit());
+    }
+  }
+
+  // Force constant pool emission at the end of the deferred code to make
+  // sure that no constant pools are emitted after.
+  masm()->CheckConstPool(true, false);
+
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  // Check that the jump table is accessible from everywhere in the function
+  // code, i.e. that offsets to the table can be encoded in the 24bit signed
+  // immediate of a branch instruction.
+  // To simplify we consider the code size from the first instruction to the
+  // end of the jump table. We also don't consider the pc load delta.
+  // Each entry in the jump table generates one instruction and inlines one
+  // 32bit data after it.
+  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+                jump_table_.length() * 7)) {
+    Abort(kGeneratedCodeIsTooLarge);
+  }
+
+  if (jump_table_.length() > 0) {
+    Label needs_frame, call_deopt_entry;
+
+    Comment(";;; -------------------- Jump table --------------------");
+    Address base = jump_table_[0].address;
+
+    Register entry_offset = scratch0();
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
+
+      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->deopt_info);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load an immediate
+      // offset which will be added to the base address later.
+      __ mov(entry_offset, Operand(entry - base));
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        Comment(";;; call deopt with frame");
+        __ PushFixedFrame();
+        __ bl(&needs_frame);
+      } else {
+        __ bl(&call_deopt_entry);
+      }
+      info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                   table_entry->deopt_info.inlining_id);
+      masm()->CheckConstPool(false, false);
+    }
+
+    if (needs_frame.is_linked()) {
+      __ bind(&needs_frame);
+      // This variant of deopt can only be used with stubs. Since we don't
+      // have a function pointer to install in the stack frame that we're
+      // building, install a special marker there instead.
+      DCHECK(info()->IsStub());
+      __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
+      __ push(ip);
+      __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+    }
+
+    Comment(";;; call deopt");
+    __ bind(&call_deopt_entry);
+
+    if (info()->saves_caller_doubles()) {
+      DCHECK(info()->IsStub());
+      RestoreCallerDoubles();
+    }
+
+    // Add the base address to the offset previously loaded in entry_offset.
+    __ add(entry_offset, entry_offset,
+           Operand(ExternalReference::ForDeoptEntry(base)));
+    __ bx(entry_offset);
+  }
+
+  // Force constant pool emission at the end of the deopt jump table to make
+  // sure that no constant pools are emitted after.
+  masm()->CheckConstPool(true, false);
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int code) const {
+  return Register::from_code(code);
+}
+
+
+DwVfpRegister LCodeGen::ToDoubleRegister(int code) const {
+  return DwVfpRegister::from_code(code);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      AllowDeferredHandleDereference get_number;
+      DCHECK(literal->IsNumber());
+      __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsDouble()) {
+      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+    } else {
+      DCHECK(r.IsSmiOrTagged());
+      __ Move(scratch, literal);
+    }
+    return scratch;
+  } else if (op->IsStackSlot()) {
+    __ ldr(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DwVfpRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                               SwVfpRegister flt_scratch,
+                                               DwVfpRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      DCHECK(literal->IsNumber());
+      __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
+      __ vmov(flt_scratch, ip);
+      __ vcvt_f64_s32(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort(kUnsupportedDoubleImmediate);
+    } else if (r.IsTagged()) {
+      Abort(kUnsupportedTaggedImmediate);
+    }
+  } else if (op->IsStackSlot()) {
+    // TODO(regis): Why is vldr not taking a MemOperand?
+    // __ vldr(dbl_scratch, ToMemOperand(op));
+    MemOperand mem_op = ToMemOperand(op);
+    __ vldr(dbl_scratch, mem_op.rn(), mem_op.offset());
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                   const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand::Zero();
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand::Zero();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(
+        sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+int LCodeGen::CallCodeSize(Handle<Code> code, RelocInfo::Mode mode) {
+  int size = masm()->CallSize(code, mode);
+  if (code->kind() == Code::BINARY_OP_IC ||
+      code->kind() == Code::COMPARE_IC) {
+    size += Assembler::kInstrSize;  // extra nop() added in CallCodeGeneric.
+  }
+  return size;
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr,
+                        TargetAddressStorageMode storage_mode) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, storage_mode);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode,
+                               TargetAddressStorageMode storage_mode) {
+  DCHECK(instr != NULL);
+  // Block literal pool emission to ensure nop indicating no inlined smi code
+  // is in the correct position.
+  Assembler::BlockConstPoolScope block_const_pool(masm());
+  __ Call(code, mode, TypeFeedbackId::None(), al, storage_mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::BINARY_OP_IC ||
+      code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr,
+                           SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Move(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ ldr(cp, ToMemOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    Register scratch = scratch0();
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+    // Store the condition on the stack if necessary
+    if (condition != al) {
+      __ mov(scratch, Operand::Zero(), LeaveCC, NegateCondition(condition));
+      __ mov(scratch, Operand(1), LeaveCC, condition);
+      __ push(scratch);
+    }
+
+    __ push(r1);
+    __ mov(scratch, Operand(count));
+    __ ldr(r1, MemOperand(scratch));
+    __ sub(r1, r1, Operand(1), SetCC);
+    __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
+    __ str(r1, MemOperand(scratch));
+    __ pop(r1);
+
+    if (condition != al) {
+      // Clean up the stack before the deoptimizer call
+      __ pop(scratch);
+    }
+
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY, eq);
+
+    // 'Restore' the condition in a slightly hacky way. (It would be better
+    // to use 'msr' and 'mrs' instructions here, but they are not supported by
+    // our ARM simulator).
+    if (condition != al) {
+      condition = ne;
+      __ cmp(scratch, Operand::Zero());
+    }
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    __ stop("trap_on_deopt", condition);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (condition == al && frame_is_built_ &&
+      !info()->saves_caller_doubles()) {
+    DeoptComment(deopt_info);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ b(condition, &jump_table_.last().label);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason) {
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  DeoptimizeIf(condition, instr, deopt_reason, bailout_type);
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(r0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ cmp(dividend, Operand::Zero());
+    __ b(pl, &dividend_is_not_negative);
+    // Note that this is correct even for kMinInt operands.
+    __ rsb(dividend, dividend, Operand::Zero());
+    __ and_(dividend, dividend, Operand(mask));
+    __ rsb(dividend, dividend, Operand::Zero(), SetCC);
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+    __ b(&done);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ and_(dividend, dividend, Operand(mask));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ mov(ip, Operand(Abs(divisor)));
+  __ smull(result, ip, result, ip);
+  __ sub(result, dividend, result, SetCC);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ b(ne, &remainder_not_zero);
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+  if (CpuFeatures::IsSupported(SUDIV)) {
+    CpuFeatureScope scope(masm(), SUDIV);
+
+    Register left_reg = ToRegister(instr->left());
+    Register right_reg = ToRegister(instr->right());
+    Register result_reg = ToRegister(instr->result());
+
+    Label done;
+    // Check for x % 0, sdiv might signal an exception. We have to deopt in this
+    // case because we can't return a NaN.
+    if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+      __ cmp(right_reg, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+    }
+
+    // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
+    // want. We have to deopt if we care about -0, because we can't return that.
+    if (hmod->CheckFlag(HValue::kCanOverflow)) {
+      Label no_overflow_possible;
+      __ cmp(left_reg, Operand(kMinInt));
+      __ b(ne, &no_overflow_possible);
+      __ cmp(right_reg, Operand(-1));
+      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+        DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+      } else {
+        __ b(ne, &no_overflow_possible);
+        __ mov(result_reg, Operand::Zero());
+        __ jmp(&done);
+      }
+      __ bind(&no_overflow_possible);
+    }
+
+    // For 'r3 = r1 % r2' we can have the following ARM code:
+    //   sdiv r3, r1, r2
+    //   mls r3, r3, r2, r1
+
+    __ sdiv(result_reg, left_reg, right_reg);
+    __ Mls(result_reg, result_reg, right_reg, left_reg);
+
+    // If we care about -0, test if the dividend is <0 and the result is 0.
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ cmp(result_reg, Operand::Zero());
+      __ b(ne, &done);
+      __ cmp(left_reg, Operand::Zero());
+      DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    }
+    __ bind(&done);
+
+  } else {
+    // General case, without any SDIV support.
+    Register left_reg = ToRegister(instr->left());
+    Register right_reg = ToRegister(instr->right());
+    Register result_reg = ToRegister(instr->result());
+    Register scratch = scratch0();
+    DCHECK(!scratch.is(left_reg));
+    DCHECK(!scratch.is(right_reg));
+    DCHECK(!scratch.is(result_reg));
+    DwVfpRegister dividend = ToDoubleRegister(instr->temp());
+    DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
+    DCHECK(!divisor.is(dividend));
+    LowDwVfpRegister quotient = double_scratch0();
+    DCHECK(!quotient.is(dividend));
+    DCHECK(!quotient.is(divisor));
+
+    Label done;
+    // Check for x % 0, we have to deopt in this case because we can't return a
+    // NaN.
+    if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+      __ cmp(right_reg, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+    }
+
+    __ Move(result_reg, left_reg);
+    // Load the arguments in VFP registers. The divisor value is preloaded
+    // before. Be careful that 'right_reg' is only live on entry.
+    // TODO(svenpanne) The last comments seems to be wrong nowadays.
+    __ vmov(double_scratch0().low(), left_reg);
+    __ vcvt_f64_s32(dividend, double_scratch0().low());
+    __ vmov(double_scratch0().low(), right_reg);
+    __ vcvt_f64_s32(divisor, double_scratch0().low());
+
+    // We do not care about the sign of the divisor. Note that we still handle
+    // the kMinInt % -1 case correctly, though.
+    __ vabs(divisor, divisor);
+    // Compute the quotient and round it to a 32bit integer.
+    __ vdiv(quotient, dividend, divisor);
+    __ vcvt_s32_f64(quotient.low(), quotient);
+    __ vcvt_f64_s32(quotient, quotient.low());
+
+    // Compute the remainder in result.
+    __ vmul(double_scratch0(), divisor, quotient);
+    __ vcvt_s32_f64(double_scratch0().low(), double_scratch0());
+    __ vmov(scratch, double_scratch0().low());
+    __ sub(result_reg, left_reg, scratch, SetCC);
+
+    // If we care about -0, test if the dividend is <0 and the result is 0.
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ b(ne, &done);
+      __ cmp(left_reg, Operand::Zero());
+      DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    __ cmp(dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ tst(dividend, Operand(mask));
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ rsb(result, dividend, Operand(0));
+    return;
+  }
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (shift == 0) {
+    __ mov(result, dividend);
+  } else if (shift == 1) {
+    __ add(result, dividend, Operand(dividend, LSR, 31));
+  } else {
+    __ mov(result, Operand(dividend, ASR, 31));
+    __ add(result, dividend, Operand(result, LSR, 32 - shift));
+  }
+  if (shift > 0) __ mov(result, Operand(result, ASR, shift));
+  if (divisor < 0) __ rsb(result, result, Operand(0));
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ rsb(result, result, Operand::Zero());
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ mov(ip, Operand(divisor));
+    __ smull(scratch0(), ip, result, ip);
+    __ sub(scratch0(), scratch0(), dividend, SetCC);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmp(divisor, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label positive;
+    if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+      // Do the test only if it hadn't be done above.
+      __ cmp(divisor, Operand::Zero());
+    }
+    __ b(pl, &positive);
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    __ bind(&positive);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      (!CpuFeatures::IsSupported(SUDIV) ||
+       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    // We don't need to check for overflow when truncating with sdiv
+    // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
+    __ cmp(dividend, Operand(kMinInt));
+    __ cmp(divisor, Operand(-1), eq);
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+
+  if (CpuFeatures::IsSupported(SUDIV)) {
+    CpuFeatureScope scope(masm(), SUDIV);
+    __ sdiv(result, dividend, divisor);
+  } else {
+    DoubleRegister vleft = ToDoubleRegister(instr->temp());
+    DoubleRegister vright = double_scratch0();
+    __ vmov(double_scratch0().low(), dividend);
+    __ vcvt_f64_s32(vleft, double_scratch0().low());
+    __ vmov(double_scratch0().low(), divisor);
+    __ vcvt_f64_s32(vright, double_scratch0().low());
+    __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
+    __ vcvt_s32_f64(double_scratch0().low(), vleft);
+    __ vmov(result, double_scratch0().low());
+  }
+
+  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    // Compute remainder and deopt if it's not zero.
+    Register remainder = scratch0();
+    __ Mls(remainder, result, divisor, dividend);
+    __ cmp(remainder, Operand::Zero());
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+  DwVfpRegister addend = ToDoubleRegister(instr->addend());
+  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+  // This is computed in-place.
+  DCHECK(addend.is(ToDoubleRegister(instr->result())));
+
+  __ vmla(addend, multiplier, multiplicand);
+}
+
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+  DwVfpRegister minuend = ToDoubleRegister(instr->minuend());
+  DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+  // This is computed in-place.
+  DCHECK(minuend.is(ToDoubleRegister(instr->result())));
+
+  __ vmls(minuend, multiplier, multiplicand);
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  Register result = ToRegister(instr->result());
+  int32_t divisor = instr->divisor();
+
+  // If the divisor is 1, return the dividend.
+  if (divisor == 1) {
+    __ Move(result, dividend);
+    return;
+  }
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ mov(result, Operand(dividend, ASR, shift));
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  __ rsb(result, dividend, Operand::Zero(), SetCC);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ mov(result, Operand(result, ASR, shift));
+    return;
+  }
+
+  __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs);
+  __ mov(result, Operand(result, ASR, shift), LeaveCC, vc);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ rsb(result, result, Operand::Zero());
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(!temp.is(dividend) && !temp.is(result));
+  Label needs_adjustment, done;
+  __ cmp(dividend, Operand::Zero());
+  __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ rsb(result, result, Operand::Zero());
+  __ jmp(&done);
+  __ bind(&needs_adjustment);
+  __ add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ rsb(result, result, Operand::Zero());
+  __ sub(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register left = ToRegister(instr->dividend());
+  Register right = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmp(right, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label positive;
+    if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
+      // Do the test only if it hadn't be done above.
+      __ cmp(right, Operand::Zero());
+    }
+    __ b(pl, &positive);
+    __ cmp(left, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    __ bind(&positive);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      (!CpuFeatures::IsSupported(SUDIV) ||
+       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    // We don't need to check for overflow when truncating with sdiv
+    // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
+    __ cmp(left, Operand(kMinInt));
+    __ cmp(right, Operand(-1), eq);
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+
+  if (CpuFeatures::IsSupported(SUDIV)) {
+    CpuFeatureScope scope(masm(), SUDIV);
+    __ sdiv(result, left, right);
+  } else {
+    DoubleRegister vleft = ToDoubleRegister(instr->temp());
+    DoubleRegister vright = double_scratch0();
+    __ vmov(double_scratch0().low(), left);
+    __ vcvt_f64_s32(vleft, double_scratch0().low());
+    __ vmov(double_scratch0().low(), right);
+    __ vcvt_f64_s32(vright, double_scratch0().low());
+    __ vdiv(vleft, vleft, vright);  // vleft now contains the result.
+    __ vcvt_s32_f64(double_scratch0().low(), vleft);
+    __ vmov(result, double_scratch0().low());
+  }
+
+  Label done;
+  Register remainder = scratch0();
+  __ Mls(remainder, result, right, left);
+  __ cmp(remainder, Operand::Zero());
+  __ b(eq, &done);
+  __ eor(remainder, remainder, Operand(right));
+  __ add(result, result, Operand(remainder, ASR, 31));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      __ cmp(left, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+
+    switch (constant) {
+      case -1:
+        if (overflow) {
+          __ rsb(result, left, Operand::Zero(), SetCC);
+          DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+        } else {
+          __ rsb(result, left, Operand::Zero());
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          __ cmp(left, Operand::Zero());
+          DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+        }
+        __ mov(result, Operand::Zero());
+        break;
+      case 1:
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ mov(result, Operand(left, LSL, shift));
+          // Correct the sign of the result is the constant is negative.
+          if (constant < 0)  __ rsb(result, result, Operand::Zero());
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ add(result, left, Operand(left, LSL, shift));
+          // Correct the sign of the result is the constant is negative.
+          if (constant < 0)  __ rsb(result, result, Operand::Zero());
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ rsb(result, left, Operand(left, LSL, shift));
+          // Correct the sign of the result is the constant is negative.
+          if (constant < 0)  __ rsb(result, result, Operand::Zero());
+        } else {
+          // Generate standard code.
+          __ mov(ip, Operand(constant));
+          __ mul(result, left, ip);
+        }
+    }
+
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (overflow) {
+      Register scratch = scratch0();
+      // scratch:result = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ smull(result, scratch, result, right);
+      } else {
+        __ smull(result, scratch, left, right);
+      }
+      __ cmp(scratch, Operand(result, ASR, 31));
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+    } else {
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ mul(result, result, right);
+      } else {
+        __ mul(result, left, right);
+      }
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+      __ teq(left, Operand(right));
+      __ b(pl, &done);
+      // Bail out if the result is minus zero.
+      __ cmp(result, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->left();
+  LOperand* right_op = instr->right();
+  DCHECK(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot()) {
+    right = Operand(EmitLoadRegister(right_op, ip));
+  } else {
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ and_(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ orr(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+        __ mvn(result, Operand(left));
+      } else {
+        __ eor(result, left, right);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  if (right_op->IsRegister()) {
+    // Mask the right_op operand.
+    __ and_(scratch, ToRegister(right_op), Operand(0x1F));
+    switch (instr->op()) {
+      case Token::ROR:
+        __ mov(result, Operand(left, ROR, scratch));
+        break;
+      case Token::SAR:
+        __ mov(result, Operand(left, ASR, scratch));
+        break;
+      case Token::SHR:
+        if (instr->can_deopt()) {
+          __ mov(result, Operand(left, LSR, scratch), SetCC);
+          DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
+        } else {
+          __ mov(result, Operand(left, LSR, scratch));
+        }
+        break;
+      case Token::SHL:
+        __ mov(result, Operand(left, LSL, scratch));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+          if (shift_count != 0) {
+          __ mov(result, Operand(left, ROR, shift_count));
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ mov(result, Operand(left, ASR, shift_count));
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ mov(result, Operand(left, LSR, shift_count));
+        } else {
+          if (instr->can_deopt()) {
+            __ tst(left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          if (instr->hydrogen_value()->representation().IsSmi() &&
+              instr->can_deopt()) {
+            if (shift_count != 1) {
+              __ mov(result, Operand(left, LSL, shift_count - 1));
+              __ SmiTag(result, result, SetCC);
+            } else {
+              __ SmiTag(result, left, SetCC);
+            }
+            DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+          } else {
+            __ mov(result, Operand(left, LSL, shift_count));
+          }
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  SBit set_cond = can_overflow ? SetCC : LeaveCC;
+
+  if (right->IsStackSlot()) {
+    Register right_reg = EmitLoadRegister(right, ip);
+    __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+  } else {
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+  }
+
+  if (can_overflow) {
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  SBit set_cond = can_overflow ? SetCC : LeaveCC;
+
+  if (right->IsStackSlot()) {
+    Register right_reg = EmitLoadRegister(right, ip);
+    __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+  } else {
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+  }
+
+  if (can_overflow) {
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+#if V8_HOST_ARCH_IA32
+  // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
+  // builds.
+  uint64_t bits = instr->bits();
+  if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
+      V8_UINT64_C(0x7FF0000000000000)) {
+    uint32_t lo = static_cast<uint32_t>(bits);
+    uint32_t hi = static_cast<uint32_t>(bits >> 32);
+    __ mov(ip, Operand(lo));
+    __ mov(scratch0(), Operand(hi));
+    __ vmov(result, ip, scratch0());
+    return;
+  }
+#endif
+  double v = instr->value();
+  __ Vmov(result, v, scratch0());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ Move(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+                                           LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+  Register scratch = scratch0();
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ add(scratch, string, Operand(ToRegister(index)));
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    __ add(scratch, string, Operand(ToRegister(index), LSL, 1));
+  }
+  return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    __ and_(scratch, scratch,
+            Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ cmp(scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+                            ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ ldrb(result, operand);
+  } else {
+    __ ldrh(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+
+  if (FLAG_debug_code) {
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ strb(value, operand);
+  } else {
+    __ strh(value, operand);
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  SBit set_cond = can_overflow ? SetCC : LeaveCC;
+
+  if (right->IsStackSlot()) {
+    Register right_reg = EmitLoadRegister(right, ip);
+    __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
+  } else {
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
+  }
+
+  if (can_overflow) {
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+    Register left_reg = ToRegister(left);
+    Operand right_op = (right->IsRegister() || right->IsConstantOperand())
+        ? ToOperand(right)
+        : Operand(EmitLoadRegister(right, ip));
+    Register result_reg = ToRegister(instr->result());
+    __ cmp(left_reg, right_op);
+    __ Move(result_reg, left_reg, condition);
+    __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    DwVfpRegister left_reg = ToDoubleRegister(left);
+    DwVfpRegister right_reg = ToDoubleRegister(right);
+    DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+    Label result_is_nan, return_left, return_right, check_zero, done;
+    __ VFPCompareAndSetFlags(left_reg, right_reg);
+    if (operation == HMathMinMax::kMathMin) {
+      __ b(mi, &return_left);
+      __ b(gt, &return_right);
+    } else {
+      __ b(mi, &return_right);
+      __ b(gt, &return_left);
+    }
+    __ b(vs, &result_is_nan);
+    // Left equals right => check for -0.
+    __ VFPCompareAndSetFlags(left_reg, 0.0);
+    if (left_reg.is(result_reg) || right_reg.is(result_reg)) {
+      __ b(ne, &done);  // left == right != 0.
+    } else {
+      __ b(ne, &return_left);  // left == right != 0.
+    }
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      // We could use a single 'vorr' instruction here if we had NEON support.
+      __ vneg(left_reg, left_reg);
+      __ vsub(result_reg, left_reg, right_reg);
+      __ vneg(result_reg, result_reg);
+    } else {
+      // Since we operate on +0 and/or -0, vadd and vand have the same effect;
+      // the decision for vadd is easy because vand is a NEON instruction.
+      __ vadd(result_reg, left_reg, right_reg);
+    }
+    __ b(&done);
+
+    __ bind(&result_is_nan);
+    __ vadd(result_reg, left_reg, right_reg);
+    __ b(&done);
+
+    __ bind(&return_right);
+    __ Move(result_reg, right_reg);
+    if (!left_reg.is(result_reg)) {
+      __ b(&done);
+    }
+
+    __ bind(&return_left);
+    __ Move(result_reg, left_reg);
+
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DwVfpRegister left = ToDoubleRegister(instr->left());
+  DwVfpRegister right = ToDoubleRegister(instr->right());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ vadd(result, left, right);
+      break;
+    case Token::SUB:
+      __ vsub(result, left, right);
+      break;
+    case Token::MUL:
+      __ vmul(result, left, right);
+      break;
+    case Token::DIV:
+      __ vdiv(result, left, right);
+      break;
+    case Token::MOD: {
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ MovToFloatParameters(left, right);
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()),
+          0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(result);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r1));
+  DCHECK(ToRegister(instr->right()).is(r0));
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  // Block literal pool emission to ensure nop indicating no inlined smi code
+  // is in the correct position.
+  Assembler::BlockConstPoolScope block_const_pool(masm());
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block || condition == al) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ b(NegateCondition(condition), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ b(condition, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ b(condition, chunk_->GetAssemblyLabel(left_block));
+    __ b(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition) {
+  int true_block = instr->TrueDestination(chunk_);
+  __ b(condition, chunk_->GetAssemblyLabel(true_block));
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ b(condition, chunk_->GetAssemblyLabel(false_block));
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ stop("LBreak");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32() || r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ cmp(reg, Operand::Zero());
+    EmitBranch(instr, ne);
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    DwVfpRegister reg = ToDoubleRegister(instr->value());
+    // Test the double value. Zero and NaN are false.
+    __ VFPCompareAndSetFlags(reg, 0.0);
+    __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN -> false)
+    EmitBranch(instr, ne);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      __ cmp(reg, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, al);
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      DwVfpRegister dbl_scratch = double_scratch0();
+      __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
+      __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN)
+      EmitBranch(instr, ne);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
+      __ cmp(ip, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+        __ b(eq, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+        __ b(eq, instr->TrueLabel(chunk_));
+        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+        __ b(eq, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ CompareRoot(reg, Heap::kNullValueRootIndex);
+        __ b(eq, instr->FalseLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ cmp(reg, Operand::Zero());
+        __ b(eq, instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ SmiTst(reg);
+        DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ tst(ip, Operand(1 << Map::kIsUndetectable));
+          __ b(ne, instr->FalseLabel(chunk_));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
+        __ b(ge, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+        __ b(ge, &not_string);
+        __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
+        __ cmp(ip, Operand::Zero());
+        __ b(ne, instr->TrueLabel(chunk_));
+        __ b(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+        __ b(eq, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+        __ b(eq, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DwVfpRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+        __ b(ne, &not_heap_number);
+        __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ VFPCompareAndSetFlags(dbl_scratch, 0.0);
+        __ cmp(r0, r0, vs);  // NaN -> false.
+        __ b(eq, instr->FalseLabel(chunk_));  // +0, -0 -> false.
+        __ b(instr->TrueLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right operands as doubles and load the
+      // resulting flags into the normal status register.
+      __ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
+      // If a NaN is involved, i.e. the result is unordered (V set),
+      // jump to false block label.
+      __ b(vs, instr->FalseLabel(chunk_));
+    } else {
+      if (right->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          __ cmp(ToRegister(left), Operand(Smi::FromInt(value)));
+        } else {
+          __ cmp(ToRegister(left), Operand(value));
+        }
+      } else if (left->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          __ cmp(ToRegister(right), Operand(Smi::FromInt(value)));
+        } else {
+          __ cmp(ToRegister(right), Operand(value));
+        }
+        // We commuted the operands, so commute the condition.
+        cond = CommuteCondition(cond);
+      } else {
+        __ cmp(ToRegister(left), ToRegister(right));
+      }
+    }
+    EmitBranch(instr, cond);
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  __ cmp(left, Operand(right));
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ mov(ip, Operand(factory()->the_hole_value()));
+    __ cmp(input_reg, ip);
+    EmitBranch(instr, eq);
+    return;
+  }
+
+  DwVfpRegister input_reg = ToDoubleRegister(instr->object());
+  __ VFPCompareAndSetFlags(input_reg, input_reg);
+  EmitFalseBranch(instr, vc);
+
+  Register scratch = scratch0();
+  __ VmovHigh(scratch, input_reg);
+  __ cmp(scratch, Operand(kHoleNanUpper32));
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    DwVfpRegister value = ToDoubleRegister(instr->value());
+    __ VFPCompareAndSetFlags(value, 0.0);
+    EmitFalseBranch(instr, ne);
+    __ VmovHigh(scratch, value);
+    __ cmp(scratch, Operand(0x80000000));
+  } else {
+    Register value = ToRegister(instr->value());
+    __ CheckMap(value,
+                scratch,
+                Heap::kHeapNumberMapRootIndex,
+                instr->FalseLabel(chunk()),
+                DO_SMI_CHECK);
+    __ ldr(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    __ ldr(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+    __ cmp(scratch, Operand(0x80000000));
+    __ cmp(ip, Operand(0x00000000), eq);
+  }
+  EmitBranch(instr, eq);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register input_reg = EmitLoadRegister(instr->value(), ip);
+  __ SmiTst(input_reg);
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ tst(temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(instr, ne);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r1));
+  DCHECK(ToRegister(instr->right()).is(r0));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  __ cmp(r0, Operand::Zero());
+
+  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ ldr(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ ldr(scratch,
+         FieldMemOperand(input, String::kHashFieldOffset));
+  __ tst(scratch, Operand(String::kContainsCachedArrayIndexMask));
+  EmitBranch(instr, eq);
+}
+
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ b(eq, is_true);
+  } else {
+    __ b(eq, is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  Register instance_type = ip;
+  __ GetMapConstructor(temp, temp, temp2, instance_type);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ cmp(instance_type, Operand(JS_FUNCTION_TYPE));
+  if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
+    __ b(ne, is_true);
+  } else {
+    __ b(ne, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ldr(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  __ cmp(temp, Operand(class_name));
+  // End with the answer in flags.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temp());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+      class_name, input, temp, temp2);
+
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ ldr(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ cmp(temp, Operand(instr->map()));
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(r0));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = scratch0();
+  Register const object_instance_type = ip;
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ SmiTst(object);
+    EmitFalseBranch(instr, eq);
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ ldrb(object_instance_type,
+          FieldMemOperand(object_map, Map::kBitFieldOffset));
+  __ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
+  DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+  // Deoptimize for proxies.
+  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+  DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+
+  __ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, eq);
+  __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+  EmitFalseBranch(instr, eq);
+  __ ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+  __ b(&loop);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // This instruction also signals no smi code inlined.
+  __ cmp(r0, Operand::Zero());
+
+  Condition condition = ComputeCompareCondition(op);
+  __ LoadRoot(ToRegister(instr->result()),
+              Heap::kTrueValueRootIndex,
+              condition);
+  __ LoadRoot(ToRegister(instr->result()),
+              Heap::kFalseValueRootIndex,
+              NegateCondition(condition));
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in r0.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(r0);
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  if (NeedsEagerFrame()) {
+    masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+  }
+  { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+    if (instr->has_constant_parameter_count()) {
+      int parameter_count = ToInteger32(instr->constant_parameter_count());
+      int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+      if (sp_delta != 0) {
+        __ add(sp, sp, Operand(sp_delta));
+      }
+    } else {
+      DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+      Register reg = ToRegister(instr->parameter_count());
+      // The argument count parameter is a smi
+      __ SmiUntag(reg);
+      __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
+    }
+
+    __ Jump(lr);
+  }
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(r0));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ ldr(result, ContextMemOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    } else {
+      __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ ldr(scratch, target);
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(scratch, ip);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    } else {
+      __ b(ne, &skip_assignment);
+    }
+  }
+
+  __ str(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch,
+                              GetLinkRegisterState(),
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = MemOperand(object, offset);
+    __ Load(result, operand, access.representation());
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DwVfpRegister result = ToDoubleRegister(instr->result());
+    __ vldr(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+  MemOperand operand = FieldMemOperand(object, offset);
+  __ Load(result, operand, access.representation());
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  // Name is always in r2.
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ ldr(result,
+         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(result, ip);
+  DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+  __ b(ne, &done);
+
+  // Get the prototype from the initial map.
+  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them add one more.
+  if (instr->length()->IsConstantOperand()) {
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (instr->index()->IsConstantOperand()) {
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int index = (const_length - const_index) + 1;
+      __ ldr(result, MemOperand(arguments, index * kPointerSize));
+    } else {
+      Register index = ToRegister(instr->index());
+      __ rsb(result, index, Operand(const_length + 1));
+      __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+    }
+  } else if (instr->index()->IsConstantOperand()) {
+      Register length = ToRegister(instr->length());
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int loc = const_index - 1;
+      if (loc != 0) {
+        __ sub(result, length, Operand(loc));
+        __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+      } else {
+        __ ldr(result, MemOperand(arguments, length, LSL, kPointerSizeLog2));
+      }
+    } else {
+    Register length = ToRegister(instr->length());
+    Register index = ToRegister(instr->index());
+    __ sub(result, length, index);
+    __ add(result, result, Operand(1));
+    __ ldr(result, MemOperand(arguments, result, LSL, kPointerSizeLog2));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - kSmiTagSize) : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    DwVfpRegister result = ToDoubleRegister(instr->result());
+    Operand operand = key_is_constant
+        ? Operand(constant_key << element_size_shift)
+        : Operand(key, LSL, shift_size);
+    __ add(scratch0(), external_pointer, operand);
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ vldr(double_scratch0().low(), scratch0(), base_offset);
+      __ vcvt_f64_f32(result, double_scratch0().low());
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ vldr(result, scratch0(), base_offset);
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size, base_offset);
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ ldrsb(result, mem_operand);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ ldrb(result, mem_operand);
+        break;
+      case INT16_ELEMENTS:
+        __ ldrsh(result, mem_operand);
+        break;
+      case UINT16_ELEMENTS:
+        __ ldrh(result, mem_operand);
+        break;
+      case INT32_ELEMENTS:
+        __ ldr(result, mem_operand);
+        break;
+      case UINT32_ELEMENTS:
+        __ ldr(result, mem_operand);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          __ cmp(result, Operand(0x80000000));
+          DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+  int base_offset = instr->base_offset();
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    base_offset += constant_key * kDoubleSize;
+  }
+  __ add(scratch, elements, Operand(base_offset));
+
+  if (!key_is_constant) {
+    key = ToRegister(instr->key());
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - kSmiTagSize) : element_size_shift;
+    __ add(scratch, scratch, Operand(key, LSL, shift_size));
+  }
+
+  __ vldr(result, scratch, 0);
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+    __ cmp(scratch, Operand(kHoleNanUpper32));
+    DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    Register key = ToRegister(instr->key());
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+      __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
+    } else {
+      __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+    }
+  }
+  __ ldr(result, MemOperand(store_base, offset));
+
+  // Check for the hole value.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+      __ SmiTst(result);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+    } else {
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      __ cmp(result, scratch);
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, scratch);
+    __ b(ne, &done);
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+      __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+      DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+    }
+    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+                                         Register base,
+                                         bool key_is_constant,
+                                         int constant_key,
+                                         int element_size,
+                                         int shift_size,
+                                         int base_offset) {
+  if (key_is_constant) {
+    return MemOperand(base, (constant_key << element_size) + base_offset);
+  }
+
+  if (base_offset == 0) {
+    if (shift_size >= 0) {
+      return MemOperand(base, key, LSL, shift_size);
+    } else {
+      DCHECK_EQ(-1, shift_size);
+      return MemOperand(base, key, LSR, 1);
+    }
+  }
+
+  if (shift_size >= 0) {
+    __ add(scratch0(), base, Operand(key, LSL, shift_size));
+    return MemOperand(scratch0(), base_offset);
+  } else {
+    DCHECK_EQ(-1, shift_size);
+    __ add(scratch0(), base, Operand(key, ASR, 1));
+    return MemOperand(scratch0(), base_offset);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ sub(result, sp, Operand(2 * kPointerSize));
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ ldr(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ ldr(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ cmp(result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ mov(result, fp, LeaveCC, ne);
+    __ mov(result, scratch, LeaveCC, eq);
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ cmp(fp, elem);
+  __ mov(result, Operand(scope()->num_parameters()));
+  __ b(eq, &done);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ ldr(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ldr(result,
+         MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, result_in_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions.
+    __ ldr(scratch,
+           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(scratch,
+           FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    int mask = 1 << (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+    __ tst(scratch, Operand(mask));
+    __ b(ne, &result_in_receiver);
+
+    // Do not transform the receiver to object for builtins.
+    __ tst(scratch, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
+    __ b(ne, &result_in_receiver);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ b(eq, &global_object);
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ b(eq, &global_object);
+
+  // Deoptimize if the receiver is not a JS object.
+  __ SmiTst(receiver);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+  __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
+  DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+
+  __ b(&result_in_receiver);
+  __ bind(&global_object);
+  __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+  __ ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
+  if (result.is(receiver)) {
+    __ bind(&result_in_receiver);
+  } else {
+    Label result_ok;
+    __ b(&result_ok);
+    __ bind(&result_in_receiver);
+    __ mov(result, receiver);
+    __ bind(&result_ok);
+  }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DCHECK(receiver.is(r0));  // Used for parameter count.
+  DCHECK(function.is(r1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmp(length, Operand(kArgumentsLimit));
+  DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ mov(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ add(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ cmp(length, Operand::Zero());
+  __ b(eq, &invoke);
+  __ bind(&loop);
+  __ ldr(scratch, MemOperand(elements, length, LSL, 2));
+  __ push(scratch);
+  __ sub(length, length, Operand(1), SetCC);
+  __ b(ne, &loop);
+
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is r0, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort(kDoPushArgumentNotImplementedForDoubleType);
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, ip);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  __ Move(scratch0(), instr->hydrogen()->pairs());
+  __ push(scratch0());
+  __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+  __ push(scratch0());
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = r1;
+
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ mov(r0, Operand(arity));
+
+    // Invoke function.
+    __ ldr(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+    __ Call(ip);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch, Operand(ip));
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ tst(exponent, Operand(HeapNumber::kSignMask));
+  // Move the input to the result if necessary.
+  __ Move(result, input);
+  __ b(eq, &done);
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(r1) ? r0 : r1;
+    Register tmp2 = input.is(r2) ? r0 : r2;
+    Register tmp3 = input.is(r3) ? r0 : r3;
+    Register tmp4 = input.is(r4) ? r0 : r4;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ b(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ bic(exponent, exponent, Operand(HeapNumber::kSignMask));
+    __ str(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ ldr(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ str(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ cmp(input, Operand::Zero());
+  __ Move(result, input, pl);
+  // We can make rsb conditional because the previous cmp instruction
+  // will clear the V (overflow) flag and rsb won't set this flag
+  // if input is positive.
+  __ rsb(result, input, Operand::Zero(), SetCC, mi);
+  // Deoptimize on overflow.
+  DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    DwVfpRegister input = ToDoubleRegister(instr->value());
+    DwVfpRegister result = ToDoubleRegister(instr->result());
+    __ vabs(result, input);
+  } else if (r.IsSmiOrInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register input_high = scratch0();
+  Label done, exact;
+
+  __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
+  DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+  __ bind(&exact);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ cmp(result, Operand::Zero());
+    __ b(ne, &done);
+    __ cmp(input_high, Operand::Zero());
+    DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
+  DwVfpRegister input_plus_dot_five = double_scratch1;
+  Register input_high = scratch0();
+  DwVfpRegister dot_five = double_scratch0();
+  Label convert, done;
+
+  __ Vmov(dot_five, 0.5, scratch0());
+  __ vabs(double_scratch1, input);
+  __ VFPCompareAndSetFlags(double_scratch1, dot_five);
+  // If input is in [-0.5, -0], the result is -0.
+  // If input is in [+0, +0.5[, the result is +0.
+  // If the input is +0.5, the result is 1.
+  __ b(hi, &convert);  // Out of [-0.5, +0.5].
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ VmovHigh(input_high, input);
+    __ cmp(input_high, Operand::Zero());
+    // [-0.5, -0].
+    DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+  }
+  __ VFPCompareAndSetFlags(input, dot_five);
+  __ mov(result, Operand(1), LeaveCC, eq);  // +0.5.
+  // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+  // flag kBailoutOnMinusZero.
+  __ mov(result, Operand::Zero(), LeaveCC, ne);
+  __ b(&done);
+
+  __ bind(&convert);
+  __ vadd(input_plus_dot_five, input, dot_five);
+  // Reuse dot_five (double_scratch0) as we no longer need this value.
+  __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
+                   &done, &done);
+  DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+  DwVfpRegister output_reg = ToDoubleRegister(instr->result());
+  LowDwVfpRegister scratch = double_scratch0();
+  __ vcvt_f32_f64(scratch.low(), input_reg);
+  __ vcvt_f64_f32(output_reg, scratch.low());
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  __ vsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DwVfpRegister temp = double_scratch0();
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ vmov(temp, -V8_INFINITY, scratch0());
+  __ VFPCompareAndSetFlags(input, temp);
+  __ vneg(result, temp, eq);
+  __ b(&done, eq);
+
+  // Add +0 to convert -0 to +0.
+  __ vadd(result, input, kDoubleRegZero);
+  __ vsqrt(result, result);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(d1));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(d0));
+  DCHECK(ToDoubleRegister(instr->result()).is(d2));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!r6.is(tagged_exponent));
+    __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(r6, Operand(ip));
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DwVfpRegister input = ToDoubleRegister(instr->value());
+  DwVfpRegister result = ToDoubleRegister(instr->result());
+  DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DwVfpRegister double_scratch2 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(
+      masm(), input, result, double_scratch1, double_scratch2,
+      temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+                   0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ clz(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r1));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(r1, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      // Make sure we don't emit any additional entries in the constant pool
+      // before the call to ensure that the CallCodeSize() calculated the
+      // correct
+      // number of instructions for the constant pool load.
+      {
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+        __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      }
+      __ Jump(target);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      PlatformInterfaceDescriptor* call_descriptor =
+          instr->descriptor().platform_specific_descriptor();
+      if (call_descriptor != NULL) {
+        __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
+                call_descriptor->storage_mode());
+      } else {
+        __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al);
+      }
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      // Make sure we don't emit any additional entries in the constant pool
+      // before the call to ensure that the CallCodeSize() calculated the
+      // correct
+      // number of instructions for the constant pool load.
+      {
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+        __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      }
+      __ Call(target);
+    }
+    generator.AfterCall();
+  }
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(r1));
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  // Change context.
+  __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ mov(r0, Operand(instr->arity()));
+
+  // Load the code entry address
+  __ ldr(ip, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
+  __ Call(ip);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r1));
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(r3));
+    DCHECK(vector_register.is(r2));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ Move(vector_register, vector);
+    __ mov(slot_register, Operand(Smi::FromInt(index)));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ mov(r0, Operand(arity));
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(r1));
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  __ mov(r0, Operand(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ Move(r2, instr->hydrogen()->site());
+  } else {
+    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+  }
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here
+      // look at the first argument
+      __ ldr(r5, MemOperand(sp, 0));
+      __ cmp(r5, Operand::Zero());
+      __ b(eq, &packed_case);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ jmp(&done);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ add(code_object, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ str(code_object,
+         FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ add(result, base, Operand(ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ add(result, base, offset);
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  Register scratch = scratch0();
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register value = ToRegister(instr->value());
+    MemOperand operand = MemOperand(object, offset);
+    __ Store(value, operand, representation);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+  if (representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    DwVfpRegister value = ToDoubleRegister(instr->value());
+    __ vstr(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  if (instr->hydrogen()->has_transition()) {
+    Handle<Map> transition = instr->hydrogen()->transition_map();
+    AddDeprecationDependency(transition);
+    __ mov(scratch, Operand(transition));
+    __ str(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object,
+                           scratch,
+                           temp,
+                           GetLinkRegisterState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register value = ToRegister(instr->value());
+  if (access.IsInobject()) {
+    MemOperand operand = FieldMemOperand(object, offset);
+    __ Store(value, operand, representation);
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          scratch,
+                          GetLinkRegisterState(),
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          instr->hydrogen()->SmiCheckForWriteBarrier(),
+                          instr->hydrogen()->PointersToHereCheckForValue());
+    }
+  } else {
+    __ ldr(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    MemOperand operand = FieldMemOperand(scratch, offset);
+    __ Store(value, operand, representation);
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWriteField(scratch,
+                          offset,
+                          value,
+                          object,
+                          GetLinkRegisterState(),
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          instr->hydrogen()->SmiCheckForWriteBarrier(),
+                          instr->hydrogen()->PointersToHereCheckForValue());
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+  if (instr->index()->IsConstantOperand()) {
+    Operand index = ToOperand(instr->index());
+    Register length = ToRegister(instr->length());
+    __ cmp(length, index);
+    cc = CommuteCondition(cc);
+  } else {
+    Register index = ToRegister(instr->index());
+    Operand length = ToOperand(instr->length());
+    __ cmp(index, length);
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ b(NegateCondition(cc), &done);
+    __ stop("eliminated bounds check failed");
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - kSmiTagSize) : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    Register address = scratch0();
+    DwVfpRegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      if (constant_key != 0) {
+        __ add(address, external_pointer,
+               Operand(constant_key << element_size_shift));
+      } else {
+        address = external_pointer;
+      }
+    } else {
+      __ add(address, external_pointer, Operand(key, LSL, shift_size));
+    }
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ vcvt_f32_f64(double_scratch0().low(), value);
+      __ vstr(double_scratch0().low(), address, base_offset);
+    } else {  // Storing doubles, not floats.
+      __ vstr(value, address, base_offset);
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size,
+        base_offset);
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        __ strb(value, mem_operand);
+        break;
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        __ strh(value, mem_operand);
+        break;
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        __ str(value, mem_operand);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  DwVfpRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DwVfpRegister double_scratch = double_scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int base_offset = instr->base_offset();
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    __ add(scratch, elements,
+           Operand((constant_key << element_size_shift) + base_offset));
+  } else {
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - kSmiTagSize) : element_size_shift;
+    __ add(scratch, elements, Operand(base_offset));
+    __ add(scratch, scratch,
+           Operand(ToRegister(instr->key()), LSL, shift_size));
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    // Force a canonical NaN.
+    if (masm()->emit_debug_code()) {
+      __ vmrs(ip);
+      __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
+      __ Assert(ne, kDefaultNaNModeNotSet);
+    }
+    __ VFPCanonicalizeNaN(double_scratch, value);
+    __ vstr(double_scratch, scratch, 0);
+  } else {
+    __ vstr(value, scratch, 0);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+      : no_reg;
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+      __ add(scratch, elements, Operand::PointerOffsetFromSmiKey(key));
+    } else {
+      __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+    }
+  }
+  __ str(value, MemOperand(store_base, offset));
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ add(key, store_base, Operand(offset));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   GetLinkRegisterState(),
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed,
+                   instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases: external, fast double
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = r0;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ jmp(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ cmp(ToRegister(current_capacity), Operand(constant_key));
+    __ b(le, deferred->entry());
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ cmp(ToRegister(key), Operand(constant_capacity));
+    __ b(ge, deferred->entry());
+  } else {
+    __ cmp(ToRegister(key), ToRegister(current_capacity));
+    __ b(ge, deferred->entry());
+  }
+
+  if (instr->elements()->IsRegister()) {
+    __ Move(result, ToRegister(instr->elements()));
+  } else {
+    __ ldr(result, ToMemOperand(instr->elements()));
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = r0;
+  __ mov(result, Operand::Zero());
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsRegister()) {
+      __ Move(result, ToRegister(instr->object()));
+    } else {
+      __ ldr(result, ToMemOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ Move(r3, Operand(ToSmi(LConstantOperand::cast(key))));
+    } else {
+      __ Move(r3, ToRegister(key));
+      __ SmiTag(r3);
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  __ SmiTst(result);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ ldr(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(from_map));
+  __ b(ne, &not_applicable);
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ mov(new_map_reg, Operand(to_map));
+    __ str(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteForMap(object_reg,
+                         new_map_reg,
+                         scratch,
+                         GetLinkRegisterState(),
+                         kDontSaveFPRegs);
+  } else {
+    DCHECK(ToRegister(instr->context()).is(cp));
+    DCHECK(object_reg.is(r0));
+    PushSafepointRegistersScope scope(this);
+    __ Move(r1, to_map);
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+  DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r1));
+  DCHECK(ToRegister(instr->right()).is(r0));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr);
+
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ mov(scratch, Operand(Smi::FromInt(const_index)));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(r0);
+  __ SmiUntag(r0);
+  __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  DCHECK(!char_code.is(result));
+
+  __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
+  __ b(hi, deferred->entry());
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ add(result, result, Operand(char_code, LSL, kPointerSizeLog2));
+  __ ldr(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(result, ip);
+  __ b(eq, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  SwVfpRegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ ldr(scratch, ToMemOperand(input));
+    __ vmov(single_scratch, scratch);
+  } else {
+    __ vmov(single_scratch, ToRegister(input));
+  }
+  __ vcvt_f64_s32(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+
+  SwVfpRegister flt_scratch = double_scratch0().low();
+  __ vmov(flt_scratch, ToRegister(input));
+  __ vcvt_f64_u32(ToDoubleRegister(output), flt_scratch);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI final : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_,
+                                       instr_->value(),
+                                       instr_->temp1(),
+                                       instr_->temp2(),
+                                       SIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  Register src = ToRegister(instr->value());
+  Register dst = ToRegister(instr->result());
+
+  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+  __ SmiTag(dst, src, SetCC);
+  __ b(vs, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_,
+                                       instr_->value(),
+                                       instr_->temp1(),
+                                       instr_->temp2(),
+                                       UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+  __ cmp(input, Operand(Smi::kMaxValue));
+  __ b(hi, deferred->entry());
+  __ SmiTag(result, input);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+                                     LOperand* value,
+                                     LOperand* temp1,
+                                     LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register src = ToRegister(value);
+  Register dst = ToRegister(instr->result());
+  Register tmp1 = scratch0();
+  Register tmp2 = ToRegister(temp1);
+  Register tmp3 = ToRegister(temp2);
+  LowDwVfpRegister dbl_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    if (dst.is(src)) {
+      __ SmiUntag(src, dst);
+      __ eor(src, src, Operand(0x80000000));
+    }
+    __ vmov(dbl_scratch.low(), src);
+    __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low());
+  } else {
+    __ vmov(dbl_scratch.low(), src);
+    __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low());
+  }
+
+  if (FLAG_inline_new) {
+    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
+    __ b(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ mov(dst, Operand::Zero());
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ sub(r0, r0, Operand(kHeapObjectTag));
+    __ StoreToSafepointRegisterSlot(r0, dst);
+  }
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
+  __ add(dst, dst, Operand(kHeapObjectTag));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    // We want the untagged address first for performance
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+                          DONT_TAG_RESULT);
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ vstr(input_reg, reg, HeapNumber::kValueOffset);
+  // Now that we have finished with the object's real address tag it
+  __ add(reg, reg, Operand(kHeapObjectTag));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+  __ sub(r0, r0, Operand(kHeapObjectTag));
+  __ StoreToSafepointRegisterSlot(r0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ tst(input, Operand(0xc0000000));
+    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+  }
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ SmiTag(output, input, SetCC);
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  } else {
+    __ SmiTag(output, input);
+  }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, SmiUntag will set the carry flag.
+    __ SmiUntag(result, input, SetCC);
+    DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                DwVfpRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Register scratch = scratch0();
+  SwVfpRegister flt_scratch = double_scratch0().low();
+  DCHECK(!result_reg.is(double_scratch0()));
+  Label convert, load_smi, done;
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+    // Heap number map check.
+    __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(scratch, Operand(ip));
+    if (can_convert_undefined_to_nan) {
+      __ b(ne, &convert);
+    } else {
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+    }
+    // load heap number
+    __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
+    if (deoptimize_on_minus_zero) {
+      __ VmovLow(scratch, result_reg);
+      __ cmp(scratch, Operand::Zero());
+      __ b(ne, &done);
+      __ VmovHigh(scratch, result_reg);
+      __ cmp(scratch, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+    __ jmp(&done);
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+      // Convert undefined (and hole) to NaN.
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      __ cmp(input_reg, Operand(ip));
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
+      __ jmp(&done);
+    }
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ vmov(flt_scratch, scratch);
+  __ vcvt_f64_s32(result_reg, flt_scratch);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->value());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->temp());
+  LowDwVfpRegister double_scratch = double_scratch0();
+  DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // The input was optimistically untagged; revert it.
+  // The carry flag is set when we reach this deferred code as we just executed
+  // SmiUntag(heap_object, SetCC)
+  STATIC_ASSERT(kHeapObjectTag == 1);
+  __ adc(scratch2, input_reg, Operand(input_reg));
+
+  // Heap number map check.
+  __ ldr(scratch1, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch1, Operand(ip));
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label no_heap_number, check_bools, check_false;
+    __ b(ne, &no_heap_number);
+    __ TruncateHeapNumberToI(input_reg, scratch2);
+    __ b(&done);
+
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ bind(&no_heap_number);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(scratch2, Operand(ip));
+    __ b(ne, &check_bools);
+    __ mov(input_reg, Operand::Zero());
+    __ b(&done);
+
+    __ bind(&check_bools);
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(scratch2, Operand(ip));
+    __ b(ne, &check_false);
+    __ mov(input_reg, Operand(1));
+    __ b(&done);
+
+    __ bind(&check_false);
+    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+    __ cmp(scratch2, Operand(ip));
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+    __ mov(input_reg, Operand::Zero());
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+    __ sub(ip, scratch2, Operand(kHeapObjectTag));
+    __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
+    __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ cmp(input_reg, Operand::Zero());
+      __ b(ne, &done);
+      __ VmovHigh(scratch1, double_scratch2);
+      __ tst(scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+    // Optimistically untag the input.
+    // If the input is a HeapObject, SmiUntag will set the carry flag.
+    __ SmiUntag(input_reg, SetCC);
+    // Branch to deferred code if the input was tagged.
+    // The deferred code will take care of restoring the tag.
+    __ b(cs, deferred->entry());
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DwVfpRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DwVfpRegister double_input = ToDoubleRegister(instr->value());
+  LowDwVfpRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ cmp(result_reg, Operand::Zero());
+      __ b(ne, &done);
+      __ VmovHigh(scratch1, double_input);
+      __ tst(scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DwVfpRegister double_input = ToDoubleRegister(instr->value());
+  LowDwVfpRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ cmp(result_reg, Operand::Zero());
+      __ b(ne, &done);
+      __ VmovHigh(scratch1, double_input);
+      __ tst(scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+  __ SmiTag(result_reg, SetCC);
+  DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ SmiTst(ToRegister(input));
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ SmiTst(ToRegister(input));
+    DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+  }
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+  Register scratch = scratch0();
+
+  __ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+  __ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+  __ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
+  DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmp(scratch, Operand(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmp(scratch, Operand(last));
+        DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ tst(scratch, Operand(mask));
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      __ and_(scratch, scratch, Operand(mask));
+      __ cmp(scratch, Operand(tag));
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ mov(ip, Operand(cell));
+    __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset));
+    __ cmp(reg, ip);
+  } else {
+    __ cmp(reg, Operand(object));
+  }
+  DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ mov(cp, Operand::Zero());
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(r0, scratch0());
+  }
+  __ tst(scratch0(), Operand(kSmiTagMask));
+  DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  Register map_reg = scratch0();
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+
+  __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(map_reg, map, &success);
+    __ b(eq, &success);
+  }
+
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(map_reg, map, &success);
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ b(ne, deferred->entry());
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+
+  // Check for heap number
+  __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ cmp(scratch, Operand(factory()->heap_number_map()));
+  __ b(eq, &heap_number);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ cmp(input_reg, Operand(factory()->undefined_value()));
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+  __ mov(result_reg, Operand::Zero());
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
+  __ jmp(&done);
+
+  // smi
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, result_reg);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DwVfpRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ VmovHigh(result_reg, value_reg);
+  } else {
+    __ VmovLow(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DwVfpRegister result_reg = ToDoubleRegister(instr->result());
+  __ VmovHigh(result_reg, hi_reg);
+  __ VmovLow(result_reg, lo_reg);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ mov(scratch, Operand(size - kHeapObjectTag));
+    } else {
+      __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+    }
+    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    Label loop;
+    __ bind(&loop);
+    __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
+    __ str(scratch2, MemOperand(result, scratch));
+    __ b(ge, &loop);
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, Operand(Smi::FromInt(0)));
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(size);
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size >= 0 && size <= Smi::kMaxValue) {
+      __ Push(Smi::FromInt(size));
+    } else {
+      // We should never get here at runtime => abort
+      __ stop("invalid allocation size");
+      return;
+    }
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(r0));
+  __ push(r0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->value()).is(r3));
+  DCHECK(ToRegister(instr->result()).is(r0));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ mov(r0, Operand(isolate()->factory()->number_string()));
+  __ jmp(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+                                                  instr->FalseLabel(chunk_),
+                                                  input,
+                                                  instr->type_literal());
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(instr, final_branch_condition);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name) {
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label);
+    __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
+    final_branch_condition = lt;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ CompareRoot(input, Heap::kTrueValueRootIndex);
+    __ b(eq, true_label);
+    __ CompareRoot(input, Heap::kFalseValueRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+    __ b(eq, true_label);
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ tst(scratch, Operand(1 << Map::kIsUndetectable));
+    final_branch_condition = ne;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ and_(scratch, scratch,
+            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    __ cmp(scratch, Operand(1 << Map::kIsCallable));
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ b(eq, true_label);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
+    __ b(lt, false_label);
+    // Check for callable or undetectable objects => false.
+    __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ tst(scratch,
+           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    final_branch_condition = eq;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
+  } else if (String::Equals(type_name, factory->type##_string())) {  \
+    __ JumpIfSmi(input, false_label);                                \
+    __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+    __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
+    final_branch_condition = eq;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    __ b(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block literal pool emission for duration of padding.
+      Assembler::BlockConstPoolScope block_const_pool(masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= Assembler::kInstrSize;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+
+  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(hs, &done);
+    Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+    PredictableCodeSizeScope predictable(masm());
+    predictable.ExpectSize(CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr);
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmp(sp, Operand(ip));
+    __ b(lo, deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  __ SmiTst(r0);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ CompareObjectType(r0, r1, r1, JS_PROXY_TYPE);
+  DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
+
+  Label use_cache, call_runtime;
+  Register null_value = r5;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ b(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(r0);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r1, ip);
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ cmp(result, Operand(Smi::FromInt(0)));
+  __ b(ne, &load_cache);
+  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ jmp(&done);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ ldr(result,
+         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ ldr(result,
+         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ cmp(result, Operand::Zero());
+  DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ cmp(map, scratch0());
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object);
+  __ Push(index);
+  __ mov(cp, Operand::Zero());
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r0, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ tst(index, Operand(Smi::FromInt(1)));
+  __ b(ne, deferred->entry());
+  __ mov(index, Operand(index, ASR, 1));
+
+  __ cmp(index, Operand::Zero());
+  __ b(lt, &out_of_object);
+
+  __ add(scratch, object, Operand::PointerOffsetFromSmiKey(index));
+  __ ldr(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ b(&done);
+
+  __ bind(&out_of_object);
+  __ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sub(scratch, result, Operand::PointerOffsetFromSmiKey(index));
+  __ ldr(result, FieldMemOperand(scratch,
+                                 FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ Push(scope_info);
+  __ push(ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/arm/lithium-codegen-arm.h b/src/crankshaft/arm/lithium-codegen-arm.h
new file mode 100644
index 0000000..24a083f
--- /dev/null
+++ b/src/crankshaft/arm/lithium-codegen-arm.h
@@ -0,0 +1,388 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/arm/lithium-arm.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  LinkRegisterStatus GetLinkRegisterState() const {
+    return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+  }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DwVfpRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DwVfpRegister EmitLoadDoubleRegister(LOperand* op,
+                                       SwVfpRegister flt_scratch,
+                                       DwVfpRegister dbl_scratch);
+  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr,
+                             LOperand* value,
+                             LOperand* temp1,
+                             LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  MemOperand PrepareKeyedOperand(Register key,
+                                 Register base,
+                                 bool key_is_constant,
+                                 int constant_key,
+                                 int element_size,
+                                 int shift_size,
+                                 int base_offset);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  Register scratch0() { return r9; }
+  LowDwVfpRegister double_scratch0() { return kScratchDoubleReg; }
+
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  int CallCodeSize(Handle<Code> code, RelocInfo::Mode mode);
+
+  void CallCode(
+      Handle<Code> code,
+      RelocInfo::Mode mode,
+      LInstruction* instr,
+      TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
+
+  void CallCodeGeneric(
+      Handle<Code> code,
+      RelocInfo::Mode mode,
+      LInstruction* instr,
+      SafepointMode safepoint_mode,
+      TargetAddressStorageMode storage_mode = CAN_INLINE_TARGET_ADDRESS);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in r1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason);
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  DwVfpRegister ToDoubleRegister(int index) const;
+
+  MemOperand BuildSeqStringOperand(Register string,
+                                   LOperand* index,
+                                   String::Encoding encoding);
+
+  void EmitIntegerMathAbs(LMathAbs* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition condition);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition condition);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition condition);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DwVfpRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset,
+                    AllocationSiteMode mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      codegen_->masm_->PushSafepointRegisters();
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM_LITHIUM_CODEGEN_ARM_H_
diff --git a/src/crankshaft/arm/lithium-gap-resolver-arm.cc b/src/crankshaft/arm/lithium-gap-resolver-arm.cc
new file mode 100644
index 0000000..066db7d
--- /dev/null
+++ b/src/crankshaft/arm/lithium-gap-resolver-arm.cc
@@ -0,0 +1,301 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/arm/lithium-codegen-arm.h"
+#include "src/crankshaft/arm/lithium-gap-resolver-arm.h"
+
+namespace v8 {
+namespace internal {
+
+// We use the root register to spill a value while breaking a cycle in parallel
+// moves. We don't need access to roots while resolving the move list and using
+// the root register has two advantages:
+//  - It is not in crankshaft allocatable registers list, so it can't interfere
+//    with any of the moves we are resolving.
+//  - We don't need to push it on the stack, as we can reload it with its value
+//    once we have resolved a cycle.
+#define kSavedValueRegister kRootRegister
+
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
+      saved_destination_(NULL), need_to_restore_root_(false) { }
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  if (need_to_restore_root_) {
+    DCHECK(kSavedValueRegister.is(kRootRegister));
+    __ InitializeRootRegister();
+    need_to_restore_root_ = false;
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the source of that move and we remember its
+  // destination. Then we mark this move as resolved so the cycle is
+  // broken and we can perform the other moves.
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    need_to_restore_root_ = true;
+    __ mov(kSavedValueRegister, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    need_to_restore_root_ = true;
+    __ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  if (saved_destination_->IsRegister()) {
+    __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ str(source_register, cgen_->ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ldr(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (!destination_operand.OffsetIsUint12Encodable()) {
+        // ip is overwritten while saving the value to the destination.
+        // Therefore we can't use ip.  It is OK if the read from the source
+        // destroys ip, since that happens before the value is read.
+        __ vldr(kScratchDoubleReg.low(), source_operand);
+        __ vstr(kScratchDoubleReg.low(), destination_operand);
+      } else {
+        __ ldr(ip, source_operand);
+        __ str(ip, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ mov(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
+      } else {
+        __ Move(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DwVfpRegister result = cgen_->ToDoubleRegister(destination);
+      double v = cgen_->ToDouble(constant_source);
+      __ Vmov(result, v, ip);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      need_to_restore_root_ = true;
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ mov(kSavedValueRegister,
+               Operand(cgen_->ToRepresentation(constant_source, r)));
+      } else {
+        __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
+      }
+      __ str(kSavedValueRegister, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ vmov(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ vstr(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        // kScratchDoubleReg was used to break the cycle.
+        __ vpush(kScratchDoubleReg);
+        __ vldr(kScratchDoubleReg, source_operand);
+        __ vstr(kScratchDoubleReg, destination_operand);
+        __ vpop(kScratchDoubleReg);
+      } else {
+        __ vldr(kScratchDoubleReg, source_operand);
+        __ vstr(kScratchDoubleReg, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/arm/lithium-gap-resolver-arm.h b/src/crankshaft/arm/lithium-gap-resolver-arm.h
new file mode 100644
index 0000000..59413c5
--- /dev/null
+++ b/src/crankshaft/arm/lithium-gap-resolver-arm.h
@@ -0,0 +1,63 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+#define V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+
+  // We use the root register as a scratch in a few places. When that happens,
+  // this flag is set to indicate that it needs to be restored.
+  bool need_to_restore_root_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM_LITHIUM_GAP_RESOLVER_ARM_H_
diff --git a/src/crankshaft/arm64/OWNERS b/src/crankshaft/arm64/OWNERS
new file mode 100644
index 0000000..906a5ce
--- /dev/null
+++ b/src/crankshaft/arm64/OWNERS
@@ -0,0 +1 @@
+rmcilroy@chromium.org
diff --git a/src/crankshaft/arm64/delayed-masm-arm64-inl.h b/src/crankshaft/arm64/delayed-masm-arm64-inl.h
new file mode 100644
index 0000000..503fd88
--- /dev/null
+++ b/src/crankshaft/arm64/delayed-masm-arm64-inl.h
@@ -0,0 +1,56 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
+#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
+
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+void DelayedMasm::EndDelayedUse() {
+  EmitPending();
+  DCHECK(!scratch_register_acquired_);
+  ResetSavedValue();
+}
+
+
+void DelayedMasm::Mov(const Register& rd,
+                      const Operand& operand,
+                      DiscardMoveMode discard_mode) {
+  EmitPending();
+  DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+  __ Mov(rd, operand, discard_mode);
+}
+
+
+void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
+  EmitPending();
+  __ Fmov(fd, fn);
+}
+
+
+void DelayedMasm::Fmov(FPRegister fd, double imm) {
+  EmitPending();
+  __ Fmov(fd, imm);
+}
+
+
+void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
+  EmitPending();
+  DCHECK(!IsScratchRegister(result) || scratch_register_acquired_);
+  __ LoadObject(result, object);
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_INL_H_
diff --git a/src/crankshaft/arm64/delayed-masm-arm64.cc b/src/crankshaft/arm64/delayed-masm-arm64.cc
new file mode 100644
index 0000000..6124706
--- /dev/null
+++ b/src/crankshaft/arm64/delayed-masm-arm64.cc
@@ -0,0 +1,197 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
+  DCHECK((src->IsStackSlot() && dst->IsStackSlot()) ||
+         (src->IsDoubleStackSlot() && dst->IsDoubleStackSlot()));
+  MemOperand src_operand = cgen_->ToMemOperand(src);
+  MemOperand dst_operand = cgen_->ToMemOperand(dst);
+  if (pending_ == kStackSlotMove) {
+    DCHECK(pending_pc_ == masm_->pc_offset());
+    UseScratchRegisterScope scope(masm_);
+    DoubleRegister temp1 = scope.AcquireD();
+    DoubleRegister temp2 = scope.AcquireD();
+    switch (MemOperand::AreConsistentForPair(pending_address_src_,
+                                             src_operand)) {
+      case MemOperand::kNotPair:
+        __ Ldr(temp1, pending_address_src_);
+        __ Ldr(temp2, src_operand);
+        break;
+      case MemOperand::kPairAB:
+        __ Ldp(temp1, temp2, pending_address_src_);
+        break;
+      case MemOperand::kPairBA:
+        __ Ldp(temp2, temp1, src_operand);
+        break;
+    }
+    switch (MemOperand::AreConsistentForPair(pending_address_dst_,
+                                             dst_operand)) {
+      case MemOperand::kNotPair:
+        __ Str(temp1, pending_address_dst_);
+        __ Str(temp2, dst_operand);
+        break;
+      case MemOperand::kPairAB:
+        __ Stp(temp1, temp2, pending_address_dst_);
+        break;
+      case MemOperand::kPairBA:
+        __ Stp(temp2, temp1, dst_operand);
+        break;
+    }
+    ResetPending();
+    return;
+  }
+
+  EmitPending();
+  pending_ = kStackSlotMove;
+  pending_address_src_ = src_operand;
+  pending_address_dst_ = dst_operand;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
+  DCHECK(!scratch_register_acquired_);
+  if ((pending_ == kStoreConstant) && (value == pending_value_)) {
+    MemOperand::PairResult result =
+        MemOperand::AreConsistentForPair(pending_address_dst_, operand);
+    if (result != MemOperand::kNotPair) {
+      const MemOperand& dst =
+          (result == MemOperand::kPairAB) ?
+              pending_address_dst_ :
+              operand;
+      DCHECK(pending_pc_ == masm_->pc_offset());
+      if (pending_value_ == 0) {
+        __ Stp(xzr, xzr, dst);
+      } else {
+        SetSavedValue(pending_value_);
+        __ Stp(ScratchRegister(), ScratchRegister(), dst);
+      }
+      ResetPending();
+      return;
+    }
+  }
+
+  EmitPending();
+  pending_ = kStoreConstant;
+  pending_address_dst_ = operand;
+  pending_value_ = value;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) {
+  if ((pending_ == kLoad) &&
+      pending_register_.IsSameSizeAndType(rd)) {
+    switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) {
+      case MemOperand::kNotPair:
+        break;
+      case MemOperand::kPairAB:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        DCHECK(!IsScratchRegister(pending_register_) ||
+               scratch_register_acquired_);
+        DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+        __ Ldp(pending_register_, rd, pending_address_src_);
+        ResetPending();
+        return;
+      case MemOperand::kPairBA:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        DCHECK(!IsScratchRegister(pending_register_) ||
+               scratch_register_acquired_);
+        DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+        __ Ldp(rd, pending_register_, operand);
+        ResetPending();
+        return;
+    }
+  }
+
+  EmitPending();
+  pending_ = kLoad;
+  pending_register_ = rd;
+  pending_address_src_ = operand;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
+  if ((pending_ == kStore) &&
+      pending_register_.IsSameSizeAndType(rd)) {
+    switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) {
+      case MemOperand::kNotPair:
+        break;
+      case MemOperand::kPairAB:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        __ Stp(pending_register_, rd, pending_address_dst_);
+        ResetPending();
+        return;
+      case MemOperand::kPairBA:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        __ Stp(rd, pending_register_, operand);
+        ResetPending();
+        return;
+    }
+  }
+
+  EmitPending();
+  pending_ = kStore;
+  pending_register_ = rd;
+  pending_address_dst_ = operand;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::EmitPending() {
+  DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
+  switch (pending_) {
+    case kNone:
+      return;
+    case kStoreConstant:
+      if (pending_value_ == 0) {
+        __ Str(xzr, pending_address_dst_);
+      } else {
+        SetSavedValue(pending_value_);
+        __ Str(ScratchRegister(), pending_address_dst_);
+      }
+      break;
+    case kLoad:
+      DCHECK(!IsScratchRegister(pending_register_) ||
+              scratch_register_acquired_);
+      __ Ldr(pending_register_, pending_address_src_);
+      break;
+    case kStore:
+      __ Str(pending_register_, pending_address_dst_);
+      break;
+    case kStackSlotMove: {
+      UseScratchRegisterScope scope(masm_);
+      DoubleRegister temp = scope.AcquireD();
+      __ Ldr(temp, pending_address_src_);
+      __ Str(temp, pending_address_dst_);
+      break;
+    }
+  }
+  ResetPending();
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/crankshaft/arm64/delayed-masm-arm64.h b/src/crankshaft/arm64/delayed-masm-arm64.h
new file mode 100644
index 0000000..5da2b72
--- /dev/null
+++ b/src/crankshaft/arm64/delayed-masm-arm64.h
@@ -0,0 +1,165 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+
+// This class delays the generation of some instructions. This way, we have a
+// chance to merge two instructions in one (with load/store pair).
+// Each instruction must either:
+//  - merge with the pending instruction and generate just one instruction.
+//  - emit the pending instruction and then generate the instruction (or set the
+//    pending instruction).
+class DelayedMasm BASE_EMBEDDED {
+ public:
+  DelayedMasm(LCodeGen* owner,
+              MacroAssembler* masm,
+              const Register& scratch_register)
+    : cgen_(owner), masm_(masm), scratch_register_(scratch_register),
+      scratch_register_used_(false), pending_(kNone), saved_value_(0) {
+#ifdef DEBUG
+    pending_register_ = no_reg;
+    pending_value_ = 0;
+    pending_pc_ = 0;
+    scratch_register_acquired_ = false;
+#endif
+  }
+  ~DelayedMasm() {
+    DCHECK(!scratch_register_acquired_);
+    DCHECK(!scratch_register_used_);
+    DCHECK(!pending());
+  }
+  inline void EndDelayedUse();
+
+  const Register& ScratchRegister() {
+    scratch_register_used_ = true;
+    return scratch_register_;
+  }
+  bool IsScratchRegister(const CPURegister& reg) {
+    return reg.Is(scratch_register_);
+  }
+  bool scratch_register_used() const { return scratch_register_used_; }
+  void reset_scratch_register_used() { scratch_register_used_ = false; }
+  // Acquire/Release scratch register for use outside this class.
+  void AcquireScratchRegister() {
+    EmitPending();
+    ResetSavedValue();
+#ifdef DEBUG
+    DCHECK(!scratch_register_acquired_);
+    scratch_register_acquired_ = true;
+#endif
+  }
+  void ReleaseScratchRegister() {
+#ifdef DEBUG
+    DCHECK(scratch_register_acquired_);
+    scratch_register_acquired_ = false;
+#endif
+  }
+  bool pending() { return pending_ != kNone; }
+
+  // Extra layer over the macro-assembler instructions (which emits the
+  // potential pending instruction).
+  inline void Mov(const Register& rd,
+                  const Operand& operand,
+                  DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+  inline void Fmov(FPRegister fd, FPRegister fn);
+  inline void Fmov(FPRegister fd, double imm);
+  inline void LoadObject(Register result, Handle<Object> object);
+  // Instructions which try to merge which the pending instructions.
+  void StackSlotMove(LOperand* src, LOperand* dst);
+  // StoreConstant can only be used if the scratch register is not acquired.
+  void StoreConstant(uint64_t value, const MemOperand& operand);
+  void Load(const CPURegister& rd, const MemOperand& operand);
+  void Store(const CPURegister& rd, const MemOperand& operand);
+  // Emit the potential pending instruction.
+  void EmitPending();
+  // Reset the pending state.
+  void ResetPending() {
+    pending_ = kNone;
+#ifdef DEBUG
+    pending_register_ = no_reg;
+    MemOperand tmp;
+    pending_address_src_ = tmp;
+    pending_address_dst_ = tmp;
+    pending_value_ = 0;
+    pending_pc_ = 0;
+#endif
+  }
+  void InitializeRootRegister() {
+    masm_->InitializeRootRegister();
+  }
+
+ private:
+  // Set the saved value and load the ScratchRegister with it.
+  void SetSavedValue(uint64_t saved_value) {
+    DCHECK(saved_value != 0);
+    if (saved_value_ != saved_value) {
+      masm_->Mov(ScratchRegister(), saved_value);
+      saved_value_ = saved_value;
+    }
+  }
+  // Reset the saved value (i.e. the value of ScratchRegister is no longer
+  // known).
+  void ResetSavedValue() {
+    saved_value_ = 0;
+  }
+
+  LCodeGen* cgen_;
+  MacroAssembler* masm_;
+
+  // Register used to store a constant.
+  Register scratch_register_;
+  bool scratch_register_used_;
+
+  // Sometimes we store or load two values in two contiguous stack slots.
+  // In this case, we try to use the ldp/stp instructions to reduce code size.
+  // To be able to do that, instead of generating directly the instructions,
+  // we register with the following fields that an instruction needs to be
+  // generated. Then with the next instruction, if the instruction is
+  // consistent with the pending one for stp/ldp we generate ldp/stp. Else,
+  // if they are not consistent, we generate the pending instruction and we
+  // register the new instruction (which becomes pending).
+
+  // Enumeration of instructions which can be pending.
+  enum Pending {
+    kNone,
+    kStoreConstant,
+    kLoad, kStore,
+    kStackSlotMove
+  };
+  // The pending instruction.
+  Pending pending_;
+  // For kLoad, kStore: register which must be loaded/stored.
+  CPURegister pending_register_;
+  // For kLoad, kStackSlotMove: address of the load.
+  MemOperand pending_address_src_;
+  // For kStoreConstant, kStore, kStackSlotMove: address of the store.
+  MemOperand pending_address_dst_;
+  // For kStoreConstant: value to be stored.
+  uint64_t pending_value_;
+  // Value held into the ScratchRegister if the saved_value_ is not 0.
+  // For 0, we use xzr.
+  uint64_t saved_value_;
+#ifdef DEBUG
+  // Address where the pending instruction must be generated. It's only used to
+  // check that nothing else has been generated since we set the pending
+  // instruction.
+  int pending_pc_;
+  // If true, the scratch register has been acquired outside this class. The
+  // scratch register can no longer be used for constants.
+  bool scratch_register_acquired_;
+#endif
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM64_DELAYED_MASM_ARM64_H_
diff --git a/src/crankshaft/arm64/lithium-arm64.cc b/src/crankshaft/arm64/lithium-arm64.cc
new file mode 100644
index 0000000..3f43338
--- /dev/null
+++ b/src/crankshaft/arm64/lithium-arm64.cc
@@ -0,0 +1,2733 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/arm64/lithium-arm64.h"
+
+#include <sstream>
+
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if ((parallel_moves_[i] != NULL) && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access();
+  stream->Add(os.str().c_str());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("%p -> %p", *original_map(), *transitioned_map());
+}
+
+
+template<int T>
+void LUnaryMathOperation<T>::PrintDataTo(StringStream* stream) {
+  value()->PrintTo(stream);
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "shl-t";
+    case Token::SAR: return "sar-t";
+    case Token::SHR: return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value,
+                                        DoubleRegister fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAndClobber(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant() ? UseConstant(value) : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant() ? UseConstant(value) : UseRegisterAtStart(value);
+}
+
+
+LConstantOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? UseConstant(value)
+      : Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateResultInstruction<1>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+int LPlatformChunk::GetNextSpillIndex() {
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+  int index = GetNextSpillIndex();
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info_, graph_);
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    // TODO(all): GetNextSpillIndex just increments a field. It has no other
+    // side effects, so we should get rid of this loop.
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex();
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    DoBasicBlock(blocks->at(i));
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
+  DCHECK(is_building());
+  current_block_ = block;
+
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
+          (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+
+  // Translate hydrogen instructions to lithium ones for the current block.
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while ((current != NULL) && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, the register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator,
+                                           &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception). There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), d0);
+    LOperand* right = UseFixedDouble(instr->right(), d1);
+    LArithmeticD* result = new(zone()) LArithmeticD(Token::MOD, left, right);
+    return MarkAsCall(DefineFixedDouble(result, d0), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return DefineAsRegister(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  DCHECK((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+         (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
+         (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
+         (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
+         (op == Token::BIT_XOR));
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+
+  // TODO(jbramley): Once we've implemented smi support for all arithmetic
+  // operations, these assertions should check IsTagged().
+  DCHECK(instr->representation().IsSmiOrTagged());
+  DCHECK(left->representation().IsSmiOrTagged());
+  DCHECK(right->representation().IsSmiOrTagged());
+
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, x1);
+  LOperand* right_operand = UseFixed(right, x0);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = NULL;
+  LOperand* length = NULL;
+  LOperand* index = NULL;
+
+  if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+    args = UseRegisterAtStart(instr->arguments());
+    length = UseConstant(instr->length());
+    index = UseConstant(instr->index());
+  } else {
+    args = UseRegister(instr->arguments());
+    length = UseRegisterAtStart(instr->length());
+    index = UseRegisterOrConstantAtStart(instr->index());
+  }
+
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+    LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right =
+        UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+    LInstruction* result = instr->representation().IsSmi() ?
+        DefineAsRegister(new(zone()) LAddS(left, right)) :
+        DefineAsRegister(new(zone()) LAddI(left, right));
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return DefineAsRegister(new(zone()) LAddE(left, right));
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    DCHECK(instr->representation().IsTagged());
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* temp3 = instr->MustPrefillWithFiller() ? TempRegister() : NULL;
+  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2, temp3);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), x1);
+  LOperand* receiver = UseFixed(instr->receiver(), x0);
+  LOperand* length = UseFixed(instr->length(), x2);
+  LOperand* elements = UseFixed(instr->elements(), x3);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
+  return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* temp = instr->from_inlined() ? NULL : TempRegister();
+  return DefineAsRegister(new(zone()) LArgumentsElements(temp));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right =
+        UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+    return instr->representation().IsSmi() ?
+        DefineAsRegister(new(zone()) LBitS(left, right)) :
+        DefineAsRegister(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  // V8 expects a label to be generated for each basic block.
+  // This is used in some places like LAllocator::IsBlockBoundary
+  // in lithium-allocator.cc
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+      ? UseRegisterOrConstantAtStart(instr->length())
+      : UseRegisterAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+
+  if (r.IsInteger32() || r.IsSmi() || r.IsDouble()) {
+    // These representations have simple checks that cannot deoptimize.
+    return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+  } else {
+    DCHECK(r.IsTagged());
+    if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
+        type.IsHeapNumber()) {
+      // These types have simple checks that cannot deoptimize.
+      return new(zone()) LBranch(UseRegister(value), NULL, NULL);
+    }
+
+    if (type.IsString()) {
+      // This type cannot deoptimize, but needs a scratch register.
+      return new(zone()) LBranch(UseRegister(value), TempRegister(), NULL);
+    }
+
+    ToBooleanStub::Types expected = instr->expected_input_types();
+    bool needs_temps = expected.NeedsMap() || expected.IsEmpty();
+    LOperand* temp1 = needs_temps ? TempRegister() : NULL;
+    LOperand* temp2 = needs_temps ? TempRegister() : NULL;
+
+    if (expected.IsGeneric() || expected.IsEmpty()) {
+      // The generic case cannot deoptimize because it already supports every
+      // possible input type.
+      DCHECK(needs_temps);
+      return new(zone()) LBranch(UseRegister(value), temp1, temp2);
+    } else {
+      return AssignEnvironment(
+          new(zone()) LBranch(UseRegister(value), temp1, temp2));
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), x1);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), cp);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(descriptor,
+                                                                ops,
+                                                                zone());
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), x1);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(x3);
+    vector = FixedTemp(x2);
+  }
+
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  // The call to ArrayConstructCode will expect the constructor to be in x1.
+  LOperand* constructor = UseFixed(instr->constructor(), x1);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LOperand* temp = TempRegister();
+      LInstruction* result =
+          DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = instr->CanTruncateToInt32()
+            ? NULL : TempDoubleRegister();
+        LInstruction* result =
+            DefineAsRegister(new(zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(DefineAsRegister(result));
+    } else {
+      DCHECK(to.IsSmi() || to.IsInteger32());
+      if (instr->CanTruncateToInt32()) {
+        LOperand* value = UseRegister(val);
+        return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value));
+      } else {
+        LOperand* value = UseRegister(val);
+        LDoubleToIntOrSmi* result = new(zone()) LDoubleToIntOrSmi(value);
+        return AssignEnvironment(DefineAsRegister(result));
+      }
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegister(val);
+        LNumberTagU* result =
+            new(zone()) LNumberTagU(value, TempRegister(), TempRegister());
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+                      (kMaxInt == Smi::kMaxValue));
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiTag(value));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegisterAtStart(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(
+            new(zone()) LUint32ToDouble(UseRegisterAtStart(val)));
+      } else {
+        return DefineAsRegister(
+            new(zone()) LInteger32ToDouble(UseRegisterAtStart(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result = new(zone()) LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value, temp));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    return AssignEnvironment(
+        DefineAsRegister(new(zone()) LClampTToUint8(reg,
+                                                    TempDoubleRegister())));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LClassOfTestAndBranch(value,
+                                           TempRegister(),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+      LOperand* left = UseConstant(instr->left());
+      LOperand* right = UseConstant(instr->right());
+      return new(zone()) LCompareNumericAndBranch(left, right);
+    }
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), x1);
+  LOperand* right = UseFixed(instr->right(), x0);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  if (instr->representation().IsTagged()) {
+    return new(zone()) LCmpHoleAndBranchT(value);
+  } else {
+    LOperand* temp = TempRegister();
+    return new(zone()) LCmpHoleAndBranchD(value, temp);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new(zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+      ? NULL : TempRegister();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+          dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+      ? NULL : TempRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+  if (!instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if ((instr->arguments_var() != NULL) &&
+      instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(
+    HForceRepresentation* instr) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LOperand* scratch1 = TempRegister();
+  LOperand* scratch2 = TempRegister();
+  LHasInPrototypeChainAndBranch* result = new (zone())
+      LHasInPrototypeChainAndBranch(object, prototype, scratch1, scratch2);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
+  LOperand* function = UseFixed(instr->function(), x1);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, x0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer =
+      current_block_->last_environment()->DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  LOperand* function = UseRegister(instr->function());
+  LOperand* temp = TempRegister();
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(function, temp)));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* elements = UseRegister(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+
+  if (!instr->is_fixed_typed_array()) {
+    if (instr->representation().IsDouble()) {
+      LOperand* temp = (!instr->key()->IsConstant() ||
+                        instr->RequiresHoleCheck())
+             ? TempRegister()
+             : NULL;
+      LInstruction* result = DefineAsRegister(
+          new (zone()) LLoadKeyedFixedDouble(elements, key, temp));
+      if (instr->RequiresHoleCheck()) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(instr->representation().IsSmiOrTagged() ||
+             instr->representation().IsInteger32());
+      LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+      LInstruction* result =
+          DefineAsRegister(new (zone()) LLoadKeyedFixed(elements, key, temp));
+      if (instr->RequiresHoleCheck() ||
+          (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED &&
+           info()->IsStub())) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    }
+  } else {
+    DCHECK((instr->representation().IsInteger32() &&
+            !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+           (instr->representation().IsDouble() &&
+            IsDoubleOrFloatElementsKind(instr->elements_kind())));
+
+    LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    LInstruction* result = DefineAsRegister(new (zone()) LLoadKeyedExternal(
+        elements, key, backing_store_owner, temp));
+    if (elements_kind == UINT32_ELEMENTS &&
+        !instr->CheckFlag(HInstruction::kUint32)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  x0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* object = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(object));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* remainder = TempRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LFlooringDivI(dividend, divisor, remainder));
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp = TempRegister();
+  LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+          dividend, divisor, temp));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    HValue* least_const = instr->BetterLeftOperand();
+    HValue* most_const = instr->BetterRightOperand();
+
+    // LMulConstI can handle a subset of constants:
+    //  With support for overflow detection:
+    //    -1, 0, 1, 2
+    //    2^n, -(2^n)
+    //  Without support for overflow detection:
+    //    2^n + 1, -(2^n - 1)
+    if (most_const->IsConstant()) {
+      int32_t constant = HConstant::cast(most_const)->Integer32Value();
+      bool small_constant = (constant >= -1) && (constant <= 2);
+      bool end_range_constant = (constant <= -kMaxInt) || (constant == kMaxInt);
+      int32_t constant_abs = Abs(constant);
+
+      if (!end_range_constant &&
+          (small_constant || (base::bits::IsPowerOfTwo32(constant_abs)) ||
+           (!can_overflow && (base::bits::IsPowerOfTwo32(constant_abs + 1) ||
+                              base::bits::IsPowerOfTwo32(constant_abs - 1))))) {
+        LConstantOperand* right = UseConstant(most_const);
+        bool need_register =
+            base::bits::IsPowerOfTwo32(constant_abs) && !small_constant;
+        LOperand* left = need_register ? UseRegister(least_const)
+                                       : UseRegisterAtStart(least_const);
+        LInstruction* result =
+            DefineAsRegister(new(zone()) LMulConstIS(left, right));
+        if ((bailout_on_minus_zero && constant <= 0) ||
+            (can_overflow && constant != 1 &&
+             base::bits::IsPowerOfTwo32(constant_abs))) {
+          result = AssignEnvironment(result);
+        }
+        return result;
+      }
+    }
+
+    // LMulI/S can handle all cases, but it requires that a register is
+    // allocated for the second operand.
+    LOperand* left = UseRegisterAtStart(least_const);
+    LOperand* right = UseRegisterAtStart(most_const);
+    LInstruction* result = instr->representation().IsSmi()
+        ? DefineAsRegister(new(zone()) LMulS(left, right))
+        : DefineAsRegister(new(zone()) LMulI(left, right));
+    if ((bailout_on_minus_zero && least_const != most_const) || can_overflow) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk_->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), d0);
+  LOperand* right;
+  if (exponent_type.IsInteger32()) {
+    right = UseFixed(instr->right(), MathPowIntegerDescriptor::exponent());
+  } else if (exponent_type.IsDouble()) {
+    right = UseFixedDouble(instr->right(), d1);
+  } else {
+    right = UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  }
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, d0),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  AddInstruction(new(zone()) LPreparePushArguments(argc), instr);
+
+  LPushArguments* push_args = new(zone()) LPushArguments(zone());
+
+  for (int i = 0; i < argc; ++i) {
+    if (push_args->ShouldSplitPush()) {
+      AddInstruction(push_args, instr);
+      push_args = new(zone()) LPushArguments(zone());
+    }
+    push_args->AddArgument(UseRegister(instr->argument(i)));
+  }
+
+  return push_args;
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegisterAndClobber(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub()
+      ? UseFixed(instr->context(), cp)
+      : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(UseFixed(instr->value(), x0), context,
+                             parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* temp = TempRegister();
+  LSeqStringGetChar* result =
+      new(zone()) LSeqStringGetChar(string, index, temp);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegister(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegister(instr->index())
+      : UseRegisterOrConstant(instr->index());
+  LOperand* value = UseRegister(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  LOperand* temp = TempRegister();
+  LSeqStringSetChar* result =
+      new(zone()) LSeqStringSetChar(context, string, index, value, temp);
+  return DefineAsRegister(result);
+}
+
+
+HBitwiseBinaryOperation* LChunkBuilder::CanTransformToShiftedOp(HValue* val,
+                                                                HValue** left) {
+  if (!val->representation().IsInteger32()) return NULL;
+  if (!(val->IsBitwise() || val->IsAdd() || val->IsSub())) return NULL;
+
+  HBinaryOperation* hinstr = HBinaryOperation::cast(val);
+  HValue* hleft = hinstr->left();
+  HValue* hright = hinstr->right();
+  DCHECK(hleft->representation().Equals(hinstr->representation()));
+  DCHECK(hright->representation().Equals(hinstr->representation()));
+
+  if (hleft == hright) return NULL;
+
+  if ((hright->IsConstant() &&
+       LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
+      (hinstr->IsCommutative() && hleft->IsConstant() &&
+       LikelyFitsImmField(hinstr, HConstant::cast(hleft)->Integer32Value()))) {
+    // The constant operand will likely fit in the immediate field. We are
+    // better off with
+    //     lsl x8, x9, #imm
+    //     add x0, x8, #imm2
+    // than with
+    //     mov x16, #imm2
+    //     add x0, x16, x9 LSL #imm
+    return NULL;
+  }
+
+  HBitwiseBinaryOperation* shift = NULL;
+  // TODO(aleram): We will miss situations where a shift operation is used by
+  // different instructions both as a left and right operands.
+  if (hright->IsBitwiseBinaryShift() &&
+      HBitwiseBinaryOperation::cast(hright)->right()->IsConstant()) {
+    shift = HBitwiseBinaryOperation::cast(hright);
+    if (left != NULL) {
+      *left = hleft;
+    }
+  } else if (hinstr->IsCommutative() &&
+             hleft->IsBitwiseBinaryShift() &&
+             HBitwiseBinaryOperation::cast(hleft)->right()->IsConstant()) {
+    shift = HBitwiseBinaryOperation::cast(hleft);
+    if (left != NULL) {
+      *left = hright;
+    }
+  } else {
+    return NULL;
+  }
+
+  if ((JSShiftAmountFromHConstant(shift->right()) == 0) && shift->IsShr()) {
+    // Shifts right by zero can deoptimize.
+    return NULL;
+  }
+
+  return shift;
+}
+
+
+bool LChunkBuilder::ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift) {
+  if (!shift->representation().IsInteger32()) {
+    return false;
+  }
+  for (HUseIterator it(shift->uses()); !it.Done(); it.Advance()) {
+    if (shift != CanTransformToShiftedOp(it.value())) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+LInstruction* LChunkBuilder::TryDoOpWithShiftedRightOperand(
+    HBinaryOperation* instr) {
+  HValue* left;
+  HBitwiseBinaryOperation* shift = CanTransformToShiftedOp(instr, &left);
+
+  if ((shift != NULL) && ShiftCanBeOptimizedAway(shift)) {
+    return DoShiftedBinaryOp(instr, left, shift);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoShiftedBinaryOp(
+    HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
+  DCHECK(hshift->IsBitwiseBinaryShift());
+  DCHECK(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
+
+  LTemplateResultInstruction<1>* res;
+  LOperand* left = UseRegisterAtStart(hleft);
+  LOperand* right = UseRegisterAtStart(hshift->left());
+  LOperand* shift_amount = UseConstant(hshift->right());
+  Shift shift_op;
+  switch (hshift->opcode()) {
+    case HValue::kShl: shift_op = LSL; break;
+    case HValue::kShr: shift_op = LSR; break;
+    case HValue::kSar: shift_op = ASR; break;
+    default: UNREACHABLE(); shift_op = NO_SHIFT;
+  }
+
+  if (hinstr->IsBitwise()) {
+    res = new(zone()) LBitI(left, right, shift_op, shift_amount);
+  } else if (hinstr->IsAdd()) {
+    res = new(zone()) LAddI(left, right, shift_op, shift_amount);
+  } else {
+    DCHECK(hinstr->IsSub());
+    res = new(zone()) LSubI(left, right, shift_op, shift_amount);
+  }
+  if (hinstr->CheckFlag(HValue::kCanOverflow)) {
+    AssignEnvironment(res);
+  }
+  return DefineAsRegister(res);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsTagged()) {
+    return DoArithmeticT(op, instr);
+  }
+
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+  if (ShiftCanBeOptimizedAway(instr)) {
+    return NULL;
+  }
+
+  LOperand* left = instr->representation().IsSmi()
+      ? UseRegister(instr->left())
+      : UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+
+  // The only shift that can deoptimize is `left >>> 0`, where left is negative.
+  // In these cases, the result is a uint32 that is too large for an int32.
+  bool right_can_be_zero = !instr->right()->IsConstant() ||
+                           (JSShiftAmountFromHConstant(instr->right()) == 0);
+  bool can_deopt = false;
+  if ((op == Token::SHR) && right_can_be_zero) {
+    can_deopt = !instr->CheckFlag(HInstruction::kUint32);
+  }
+
+  LInstruction* result;
+  if (instr->representation().IsInteger32()) {
+    result = DefineAsRegister(new (zone()) LShiftI(op, left, right, can_deopt));
+  } else {
+    DCHECK(instr->representation().IsSmi());
+    result = DefineAsRegister(new (zone()) LShiftS(op, left, right, can_deopt));
+  }
+
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(HStoreCodeEntry* instr) {
+  LOperand* function = UseRegister(instr->function());
+  LOperand* code_object = UseRegisterAtStart(instr->code_object());
+  LOperand* temp = TempRegister();
+  return new(zone()) LStoreCodeEntry(function, code_object, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* temp = TempRegister();
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    // TODO(all): Replace these constraints when RecordWriteStub has been
+    // rewritten.
+    context = UseRegisterAndClobber(instr->context());
+    value = UseRegisterAndClobber(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* temp = NULL;
+  LOperand* elements = NULL;
+  LOperand* val = NULL;
+
+  if (!instr->is_fixed_typed_array() &&
+      instr->value()->representation().IsTagged() &&
+      instr->NeedsWriteBarrier()) {
+    // RecordWrite() will clobber all registers.
+    elements = UseRegisterAndClobber(instr->elements());
+    val = UseRegisterAndClobber(instr->value());
+    temp = TempRegister();
+  } else {
+    elements = UseRegister(instr->elements());
+    val = UseRegister(instr->value());
+    temp = instr->key()->IsConstant() ? NULL : TempRegister();
+  }
+
+  if (instr->is_fixed_typed_array()) {
+    DCHECK((instr->value()->representation().IsInteger32() &&
+            !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+           (instr->value()->representation().IsDouble() &&
+            IsDoubleOrFloatElementsKind(instr->elements_kind())));
+    DCHECK(instr->elements()->representation().IsExternal());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    return new (zone())
+        LStoreKeyedExternal(elements, key, val, backing_store_owner, temp);
+
+  } else if (instr->value()->representation().IsDouble()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
+
+  } else {
+    DCHECK(instr->elements()->representation().IsTagged());
+    DCHECK(instr->value()->representation().IsSmiOrTagged() ||
+           instr->value()->representation().IsInteger32());
+    return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result = new (zone())
+      LStoreKeyedGeneric(context, object, key, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  // TODO(jbramley): It might be beneficial to allow value to be a constant in
+  // some cases. x64 makes use of this with FLAG_track_fields, for example.
+
+  LOperand* object = UseRegister(instr->object());
+  LOperand* value;
+  LOperand* temp0 = NULL;
+  LOperand* temp1 = NULL;
+
+  if (instr->access().IsExternalMemory() ||
+      (!FLAG_unbox_double_fields && instr->field_representation().IsDouble())) {
+    value = UseRegister(instr->value());
+  } else if (instr->NeedsWriteBarrier()) {
+    value = UseRegisterAndClobber(instr->value());
+    temp0 = TempRegister();
+    temp1 = TempRegister();
+  } else if (instr->NeedsWriteBarrierForMap()) {
+    value = UseRegister(instr->value());
+    temp0 = TempRegister();
+    temp1 = TempRegister();
+  } else {
+    value = UseRegister(instr->value());
+    temp0 = TempRegister();
+  }
+
+  return new(zone()) LStoreNamedField(object, value, temp0, temp1);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), x1);
+  LOperand* right = UseFixed(instr->right(), x0);
+
+  LStringAdd* result = new(zone()) LStringAdd(context, left, right);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseRegisterAndClobber(instr->string());
+  LOperand* index = UseRegisterAndClobber(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), x1);
+  LOperand* right = UseFixed(instr->right(), x0);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+    LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
+    if (shifted_operation != NULL) {
+      return shifted_operation;
+    }
+
+    LOperand *left;
+    if (instr->left()->IsConstant() &&
+        (HConstant::cast(instr->left())->Integer32Value() == 0)) {
+      left = UseConstant(instr->left());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+    }
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    LInstruction* result = instr->representation().IsSmi() ?
+        DefineAsRegister(new(zone()) LSubS(left, right)) :
+        DefineAsRegister(new(zone()) LSubI(left, right));
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  if (instr->HasNoUses()) {
+    return NULL;
+  } else {
+    return DefineAsRegister(new(zone()) LThisFunction);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), x0);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL,
+                                            TempRegister(), TempRegister());
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), x0);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp1, temp2);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseRegister(instr->object());
+  LOperand* elements = UseRegister(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, x0);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* value = UseFixed(instr->value(), x3);
+  LTypeof* result = new (zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, x0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  // We only need temp registers in some cases, but we can't dereference the
+  // instr->type_literal() handle to test that here.
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+
+  return new(zone()) LTypeofIsAndBranch(
+      UseRegister(instr->value()), temp1, temp2);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathAbs: {
+      Representation r = instr->representation();
+      if (r.IsTagged()) {
+        // The tagged case might need to allocate a HeapNumber for the result,
+        // so it is handled by a separate LInstruction.
+        LOperand* context = UseFixed(instr->context(), cp);
+        LOperand* input = UseRegister(instr->value());
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LOperand* temp3 = TempRegister();
+        LInstruction* result = DefineAsRegister(
+            new(zone()) LMathAbsTagged(context, input, temp1, temp2, temp3));
+        return AssignEnvironment(AssignPointerMap(result));
+      } else {
+        LOperand* input = UseRegisterAtStart(instr->value());
+        LInstruction* result = DefineAsRegister(new(zone()) LMathAbs(input));
+        if (!r.IsDouble()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+    case kMathExp: {
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseRegister(instr->value());
+      LOperand* double_temp1 = TempDoubleRegister();
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      LOperand* temp3 = TempRegister();
+      LMathExp* result = new(zone()) LMathExp(input, double_temp1,
+                                              temp1, temp2, temp3);
+      return DefineAsRegister(result);
+    }
+    case kMathFloor: {
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseRegisterAtStart(instr->value());
+      if (instr->representation().IsInteger32()) {
+        LMathFloorI* result = new(zone()) LMathFloorI(input);
+        return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+      } else {
+        DCHECK(instr->representation().IsDouble());
+        LMathFloorD* result = new(zone()) LMathFloorD(input);
+        return DefineAsRegister(result);
+      }
+    }
+    case kMathLog: {
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseFixedDouble(instr->value(), d0);
+      LMathLog* result = new(zone()) LMathLog(input);
+      return MarkAsCall(DefineFixedDouble(result, d0), instr);
+    }
+    case kMathPowHalf: {
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseRegister(instr->value());
+      return DefineAsRegister(new(zone()) LMathPowHalf(input));
+    }
+    case kMathRound: {
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseRegister(instr->value());
+      if (instr->representation().IsInteger32()) {
+        LOperand* temp = TempDoubleRegister();
+        LMathRoundI* result = new(zone()) LMathRoundI(input, temp);
+        return AssignEnvironment(DefineAsRegister(result));
+      } else {
+        DCHECK(instr->representation().IsDouble());
+        LMathRoundD* result = new(zone()) LMathRoundD(input);
+        return DefineAsRegister(result);
+      }
+    }
+    case kMathFround: {
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseRegister(instr->value());
+      LMathFround* result = new (zone()) LMathFround(input);
+      return DefineAsRegister(result);
+    }
+    case kMathSqrt: {
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseRegisterAtStart(instr->value());
+      return DefineAsRegister(new(zone()) LMathSqrt(input));
+    }
+    case kMathClz32: {
+      DCHECK(instr->representation().IsInteger32());
+      DCHECK(instr->value()->representation().IsInteger32());
+      LOperand* input = UseRegisterAtStart(instr->value());
+      return DefineAsRegister(new(zone()) LMathClz32(input));
+    }
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk_->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  // Assign object to a fixed register different from those already used in
+  // LForInPrepareMap.
+  LOperand* object = UseFixed(instr->enumerable(), x0);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, x0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegister(instr->map());
+  LOperand* temp = TempRegister();
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map, temp));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegisterAtStart(instr->object());
+  LOperand* index = UseRegisterAndClobber(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegister(instr->receiver());
+  LOperand* function = UseRegister(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/arm64/lithium-arm64.h b/src/crankshaft/arm64/lithium-arm64.h
new file mode 100644
index 0000000..1b627d1
--- /dev/null
+++ b/src/crankshaft/arm64/lithium-arm64.h
@@ -0,0 +1,3160 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddE)                                    \
+  V(AddI)                                    \
+  V(AddS)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BitS)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallFunction)                            \
+  V(CallJSFunction)                          \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CallWithDescriptor)                      \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckMapValue)                           \
+  V(CheckMaps)                               \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CmpHoleAndBranchD)                       \
+  V(CmpHoleAndBranchT)                       \
+  V(CmpMapAndBranch)                         \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpT)                                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToIntOrSmi)                        \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsSmiAndBranch)                          \
+  V(IsStringAndBranch)                       \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyedExternal)                       \
+  V(LoadKeyedFixed)                          \
+  V(LoadKeyedFixedDouble)                    \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(LoadRoot)                                \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathAbsTagged)                           \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloorD)                              \
+  V(MathFloorI)                              \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRoundD)                              \
+  V(MathRoundI)                              \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulConstIS)                              \
+  V(MulI)                                    \
+  V(MulS)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PreparePushArguments)                    \
+  V(PushArguments)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(ShiftS)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyedExternal)                      \
+  V(StoreKeyedFixed)                         \
+  V(StoreKeyedFixedDouble)                   \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(SubS)                                    \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(TruncateDoubleToIntOrSmi)                \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)           \
+  H##type* hydrogen() const {                     \
+    return H##type::cast(this->hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) { }
+
+  virtual ~LInstruction() { }
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+ private:
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int32_t bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return (R != 0) && (result() != NULL); }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  DECLARE_HYDROGEN_ACCESSOR(ControlInstruction);
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const override { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments,
+                     LOperand* length,
+                     LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LAddE final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddE(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LAddI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Shift shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+
+ protected:
+  Shift shift_;
+  LOperand* shift_amount_;
+};
+
+
+class LAddS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddS(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 3> {
+ public:
+  LAllocate(LOperand* context,
+            LOperand* size,
+            LOperand* temp1,
+            LOperand* temp2,
+            LOperand* temp3) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* temp3() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 1> {
+ public:
+  explicit LArgumentsElements(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  explicit LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LBitI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Shift shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+ protected:
+  Shift shift_;
+  LOperand* shift_amount_;
+};
+
+
+class LBitS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitS(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitS, "bit-s")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LBranch final : public LControlInstruction<1, 2> {
+ public:
+  explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+  LOperand* view() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp1) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp1;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpHoleAndBranchD final : public LControlInstruction<1, 1> {
+ public:
+  explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchD, "cmp-hole-and-branch-d")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpHoleAndBranchT final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranchT(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranchT, "cmp-hole-and-branch-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LDoubleToIntOrSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToIntOrSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToIntOrSmi, "double-to-int-or-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 2> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
+                                LOperand* scratch1, LOperand* scratch2) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+    temps_[0] = scratch1;
+    temps_[1] = scratch2;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+  LOperand* scratch1() const { return temps_[0]; }
+  LOperand* scratch2() const { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() const { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+    inputs_[0] = function;
+    temps_[0] = temp;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+template <int T>
+class LLoadKeyed : public LTemplateInstruction<1, 3, T> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    this->inputs_[0] = elements;
+    this->inputs_[1] = key;
+    this->inputs_[2] = backing_store_owner;
+  }
+
+  LOperand* elements() { return this->inputs_[0]; }
+  LOperand* key() { return this->inputs_[1]; }
+  LOperand* backing_store_owner() { return this->inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return this->hydrogen()->elements_kind();
+  }
+  bool is_external() const {
+    return this->hydrogen()->is_external();
+  }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  bool is_typed_elements() const {
+    return is_external() || is_fixed_typed_array();
+  }
+  uint32_t base_offset() const {
+    return this->hydrogen()->base_offset();
+  }
+  void PrintDataTo(StringStream* stream) override {
+    this->elements()->PrintTo(stream);
+    stream->Add("[");
+    this->key()->PrintTo(stream);
+    if (this->base_offset() != 0) {
+      stream->Add(" + %d]", this->base_offset());
+    } else {
+      stream->Add("]");
+    }
+  }
+
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+};
+
+
+class LLoadKeyedExternal: public LLoadKeyed<1> {
+ public:
+  LLoadKeyedExternal(LOperand* elements, LOperand* key,
+                     LOperand* backing_store_owner, LOperand* temp)
+      : LLoadKeyed<1>(elements, key, backing_store_owner) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedExternal, "load-keyed-external");
+};
+
+
+class LLoadKeyedFixed: public LLoadKeyed<1> {
+ public:
+  LLoadKeyedFixed(LOperand* elements, LOperand* key, LOperand* temp)
+      : LLoadKeyed<1>(elements, key, nullptr) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixed, "load-keyed-fixed");
+};
+
+
+class LLoadKeyedFixedDouble: public LLoadKeyed<1> {
+ public:
+  LLoadKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* temp)
+      : LLoadKeyed<1>(elements, key, nullptr) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFixedDouble, "load-keyed-fixed-double");
+};
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+template<int T>
+class LUnaryMathOperation : public LTemplateInstruction<1, 1, T> {
+ public:
+  explicit LUnaryMathOperation(LOperand* value) {
+    this->inputs_[0] = value;
+  }
+
+  LOperand* value() { return this->inputs_[0]; }
+  BuiltinFunctionId op() const { return this->hydrogen()->op(); }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+};
+
+
+class LMathAbsTagged: public LTemplateInstruction<1, 2, 3> {
+ public:
+  LMathAbsTagged(LOperand* context, LOperand* value,
+                 LOperand* temp1, LOperand* temp2, LOperand* temp3) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* temp3() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbsTagged, "math-abs-tagged")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathExp final : public LUnaryMathOperation<4> {
+ public:
+  LMathExp(LOperand* value,
+                LOperand* double_temp1,
+                LOperand* temp1,
+                LOperand* temp2,
+                LOperand* temp3)
+      : LUnaryMathOperation<4>(value) {
+    temps_[0] = double_temp1;
+    temps_[1] = temp1;
+    temps_[2] = temp2;
+    temps_[3] = temp3;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* double_temp1() { return temps_[0]; }
+  LOperand* temp1() { return temps_[1]; }
+  LOperand* temp2() { return temps_[2]; }
+  LOperand* temp3() { return temps_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+// Math.floor with a double result.
+class LMathFloorD final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { }
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
+};
+
+
+// Math.floor with an integer result.
+class LMathFloorI final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { }
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMathLog final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LMathPowHalf final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+// Math.round with an integer result.
+class LMathRoundD final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathRoundD(LOperand* value)
+      : LUnaryMathOperation<0>(value) {
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundD, "math-round-d")
+};
+
+
+// Math.round with an integer result.
+class LMathRoundI final : public LUnaryMathOperation<1> {
+ public:
+  LMathRoundI(LOperand* value, LOperand* temp1)
+      : LUnaryMathOperation<1>(value) {
+    temps_[0] = temp1;
+  }
+
+  LOperand* temp1() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRoundI, "math-round-i")
+};
+
+
+class LMathFround final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathSqrt final : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LModI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LMulConstIS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulConstIS(LOperand* left, LConstantOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LConstantOperand* right() { return LConstantOperand::cast(inputs_[1]); }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulConstIS, "mul-const-i-s")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulS(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-s")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  explicit LNumberTagU(LOperand* value,
+                       LOperand* temp1,
+                       LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LNumberUntagD(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LPreparePushArguments final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LPreparePushArguments(int argc) : argc_(argc) {}
+
+  inline int argc() const { return argc_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PreparePushArguments, "prepare-push-arguments")
+
+ protected:
+  int argc_;
+};
+
+
+class LPushArguments final : public LTemplateResultInstruction<0> {
+ public:
+  explicit LPushArguments(Zone* zone,
+                          int capacity = kRecommendedMaxPushedArgs)
+      : zone_(zone), inputs_(capacity, zone) {}
+
+  LOperand* argument(int i) { return inputs_[i]; }
+  int ArgumentCount() const { return inputs_.length(); }
+
+  void AddArgument(LOperand* arg) { inputs_.Add(arg, zone_); }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArguments, "push-arguments")
+
+  // It is better to limit the number of arguments pushed simultaneously to
+  // avoid pressure on the register allocator.
+  static const int kRecommendedMaxPushedArgs = 4;
+  bool ShouldSplitPush() const {
+    return inputs_.length() >= kRecommendedMaxPushedArgs;
+  }
+
+ protected:
+  Zone* zone_;
+  ZoneList<LOperand*> inputs_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LSeqStringGetChar(LOperand* string,
+                    LOperand* index,
+                    LOperand* temp) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+    temps_[0] = temp;
+  }
+
+  LOperand* string() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 1> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value,
+                    LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+template <int T>
+class LStoreKeyed : public LTemplateInstruction<0, 4, T> {
+ public:
+  LStoreKeyed(LOperand* elements, LOperand* key, LOperand* value,
+              LOperand* backing_store_owner) {
+    this->inputs_[0] = elements;
+    this->inputs_[1] = key;
+    this->inputs_[2] = value;
+    this->inputs_[3] = backing_store_owner;
+  }
+
+  bool is_external() const { return this->hydrogen()->is_external(); }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  bool is_typed_elements() const {
+    return is_external() || is_fixed_typed_array();
+  }
+  LOperand* elements() { return this->inputs_[0]; }
+  LOperand* key() { return this->inputs_[1]; }
+  LOperand* value() { return this->inputs_[2]; }
+  LOperand* backing_store_owner() { return this->inputs_[3]; }
+  ElementsKind elements_kind() const {
+    return this->hydrogen()->elements_kind();
+  }
+
+  bool NeedsCanonicalization() {
+    if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+        hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+      return false;
+    }
+    return this->hydrogen()->NeedsCanonicalization();
+  }
+  uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
+
+  void PrintDataTo(StringStream* stream) override {
+    this->elements()->PrintTo(stream);
+    stream->Add("[");
+    this->key()->PrintTo(stream);
+    if (this->base_offset() != 0) {
+      stream->Add(" + %d] <-", this->base_offset());
+    } else {
+      stream->Add("] <- ");
+    }
+
+    if (this->value() == NULL) {
+      DCHECK(hydrogen()->IsConstantHoleStore() &&
+             hydrogen()->value()->representation().IsDouble());
+      stream->Add("<the hole(nan)>");
+    } else {
+      this->value()->PrintTo(stream);
+    }
+  }
+
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+};
+
+
+class LStoreKeyedExternal final : public LStoreKeyed<1> {
+ public:
+  LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
+                      LOperand* backing_store_owner, LOperand* temp)
+      : LStoreKeyed<1>(elements, key, value, backing_store_owner) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedExternal, "store-keyed-external")
+};
+
+
+class LStoreKeyedFixed final : public LStoreKeyed<1> {
+ public:
+  LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
+                   LOperand* temp)
+      : LStoreKeyed<1>(elements, key, value, nullptr) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixed, "store-keyed-fixed")
+};
+
+
+class LStoreKeyedFixedDouble final : public LStoreKeyed<1> {
+ public:
+  LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
+                         LOperand* temp)
+      : LStoreKeyed<1>(elements, key, value, nullptr) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFixedDouble,
+                               "store-keyed-fixed-double")
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value,
+                   LOperand* temp0, LOperand* temp1) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp0;
+    temps_[1] = temp1;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp0() { return temps_[0]; }
+  LOperand* temp1() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LShiftS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftS(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object,
+                  LOperand* temp) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+    temps_[0] = temp;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right)
+      : shift_(NO_SHIFT), shift_amount_(0)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LSubI(LOperand* left, LOperand* right, Shift shift, LOperand* shift_amount)
+      : shift_(shift), shift_amount_(shift_amount)  {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Shift shift() const { return shift_; }
+  LOperand* shift_amount() const { return shift_amount_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+
+ protected:
+  Shift shift_;
+  LOperand* shift_amount_;
+};
+
+
+class LSubS: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubS(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* temp1,
+                          LOperand* temp2) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* context() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() const { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() const { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 2> {
+ public:
+  LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = object;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LTruncateDoubleToIntOrSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TruncateDoubleToIntOrSmi,
+                               "truncate-double-to-int-or-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool tag_result() { return hydrogen()->representation().IsSmi(); }
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 2> {
+ public:
+  LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() const { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph) { }
+
+  int GetNextSpillIndex();
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HBinaryOperation* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+  static bool HasMagicNumberForDivision(int32_t divisor);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // The operand created by UseRegister is guaranteed to be live until the end
+  // of the instruction. This means that register allocator will not reuse its
+  // register for any other operand inside instruction.
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+
+  // The operand created by UseRegisterAndClobber is guaranteed to be live until
+  // the end of the end of the instruction, and it may also be used as a scratch
+  // register by the instruction implementation.
+  //
+  // This behaves identically to ARM's UseTempRegister. However, it is renamed
+  // to discourage its use in ARM64, since in most cases it is better to
+  // allocate a temporary register for the Lithium instruction.
+  MUST_USE_RESULT LOperand* UseRegisterAndClobber(HValue* value);
+
+  // The operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. The register allocator is free to assign the same
+  // register to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // A constant operand.
+  MUST_USE_RESULT LConstantOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value);
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+
+  // Temporary operand that must be in a double register.
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+
+  // Temporary operand that must be in a fixed double register.
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  LInstruction* AssignPointerMap(LInstruction* instr);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+  void DoBasicBlock(HBasicBlock* block);
+
+  int JSShiftAmountFromHConstant(HValue* constant) {
+    return HConstant::cast(constant)->Integer32Value() & 0x1f;
+  }
+  bool LikelyFitsImmField(HInstruction* instr, int imm) {
+    if (instr->IsAdd() || instr->IsSub()) {
+      return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
+    } else {
+      DCHECK(instr->IsBitwise());
+      unsigned unused_n, unused_imm_s, unused_imm_r;
+      return Assembler::IsImmLogical(imm, kWRegSizeInBits,
+                                     &unused_n, &unused_imm_s, &unused_imm_r);
+    }
+  }
+
+  // Indicates if a sequence of the form
+  //   lsl x8, x9, #imm
+  //   add x0, x1, x8
+  // can be replaced with:
+  //   add x0, x1, x9 LSL #imm
+  // If this is not possible, the function returns NULL. Otherwise it returns a
+  // pointer to the shift instruction that would be optimized away.
+  HBitwiseBinaryOperation* CanTransformToShiftedOp(HValue* val,
+                                                   HValue** left = NULL);
+  // Checks if all uses of the shift operation can optimize it away.
+  bool ShiftCanBeOptimizedAway(HBitwiseBinaryOperation* shift);
+  // Attempts to merge the binary operation and an eventual previous shift
+  // operation into a single operation. Returns the merged instruction on
+  // success, and NULL otherwise.
+  LInstruction* TryDoOpWithShiftedRightOperand(HBinaryOperation* op);
+  LInstruction* DoShiftedBinaryOp(HBinaryOperation* instr,
+                                  HValue* left,
+                                  HBitwiseBinaryOperation* shift);
+
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM64_LITHIUM_ARM64_H_
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.cc b/src/crankshaft/arm64/lithium-codegen-arm64.cc
new file mode 100644
index 0000000..571bc15
--- /dev/null
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.cc
@@ -0,0 +1,5788 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+
+#include "src/arm64/frames-arm64.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() { }
+
+  virtual void BeforeCall(int call_size) const { }
+
+  virtual void AfterCall() const {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+// Emit code to branch if the given condition holds.
+// The code generated here doesn't modify the flags and they must have
+// been set by some prior instructions.
+//
+// The EmitInverted function simply inverts the condition.
+class BranchOnCondition : public BranchGenerator {
+ public:
+  BranchOnCondition(LCodeGen* codegen, Condition cond)
+    : BranchGenerator(codegen),
+      cond_(cond) { }
+
+  virtual void Emit(Label* label) const {
+    __ B(cond_, label);
+  }
+
+  virtual void EmitInverted(Label* label) const {
+    if (cond_ != al) {
+      __ B(NegateCondition(cond_), label);
+    }
+  }
+
+ private:
+  Condition cond_;
+};
+
+
+// Emit code to compare lhs and rhs and branch if the condition holds.
+// This uses MacroAssembler's CompareAndBranch function so it will handle
+// converting the comparison to Cbz/Cbnz if the right-hand side is 0.
+//
+// EmitInverted still compares the two operands but inverts the condition.
+class CompareAndBranch : public BranchGenerator {
+ public:
+  CompareAndBranch(LCodeGen* codegen,
+                   Condition cond,
+                   const Register& lhs,
+                   const Operand& rhs)
+    : BranchGenerator(codegen),
+      cond_(cond),
+      lhs_(lhs),
+      rhs_(rhs) { }
+
+  virtual void Emit(Label* label) const {
+    __ CompareAndBranch(lhs_, rhs_, cond_, label);
+  }
+
+  virtual void EmitInverted(Label* label) const {
+    __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
+  }
+
+ private:
+  Condition cond_;
+  const Register& lhs_;
+  const Operand& rhs_;
+};
+
+
+// Test the input with the given mask and branch if the condition holds.
+// If the condition is 'eq' or 'ne' this will use MacroAssembler's
+// TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
+// conversion to Tbz/Tbnz when possible.
+class TestAndBranch : public BranchGenerator {
+ public:
+  TestAndBranch(LCodeGen* codegen,
+                Condition cond,
+                const Register& value,
+                uint64_t mask)
+    : BranchGenerator(codegen),
+      cond_(cond),
+      value_(value),
+      mask_(mask) { }
+
+  virtual void Emit(Label* label) const {
+    switch (cond_) {
+      case eq:
+        __ TestAndBranchIfAllClear(value_, mask_, label);
+        break;
+      case ne:
+        __ TestAndBranchIfAnySet(value_, mask_, label);
+        break;
+      default:
+        __ Tst(value_, mask_);
+        __ B(cond_, label);
+    }
+  }
+
+  virtual void EmitInverted(Label* label) const {
+    // The inverse of "all clear" is "any set" and vice versa.
+    switch (cond_) {
+      case eq:
+        __ TestAndBranchIfAnySet(value_, mask_, label);
+        break;
+      case ne:
+        __ TestAndBranchIfAllClear(value_, mask_, label);
+        break;
+      default:
+        __ Tst(value_, mask_);
+        __ B(NegateCondition(cond_), label);
+    }
+  }
+
+ private:
+  Condition cond_;
+  const Register& value_;
+  uint64_t mask_;
+};
+
+
+// Test the input and branch if it is non-zero and not a NaN.
+class BranchIfNonZeroNumber : public BranchGenerator {
+ public:
+  BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
+                        const FPRegister& scratch)
+    : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
+
+  virtual void Emit(Label* label) const {
+    __ Fabs(scratch_, value_);
+    // Compare with 0.0. Because scratch_ is positive, the result can be one of
+    // nZCv (equal), nzCv (greater) or nzCV (unordered).
+    __ Fcmp(scratch_, 0.0);
+    __ B(gt, label);
+  }
+
+  virtual void EmitInverted(Label* label) const {
+    __ Fabs(scratch_, value_);
+    __ Fcmp(scratch_, 0.0);
+    __ B(le, label);
+  }
+
+ private:
+  const FPRegister& value_;
+  const FPRegister& scratch_;
+};
+
+
+// Test the input and branch if it is a heap number.
+class BranchIfHeapNumber : public BranchGenerator {
+ public:
+  BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
+      : BranchGenerator(codegen), value_(value) { }
+
+  virtual void Emit(Label* label) const {
+    __ JumpIfHeapNumber(value_, label);
+  }
+
+  virtual void EmitInverted(Label* label) const {
+    __ JumpIfNotHeapNumber(value_, label);
+  }
+
+ private:
+  const Register& value_;
+};
+
+
+// Test the input and branch if it is the specified root value.
+class BranchIfRoot : public BranchGenerator {
+ public:
+  BranchIfRoot(LCodeGen* codegen, const Register& value,
+               Heap::RootListIndex index)
+      : BranchGenerator(codegen), value_(value), index_(index) { }
+
+  virtual void Emit(Label* label) const {
+    __ JumpIfRoot(value_, index_, label);
+  }
+
+  virtual void EmitInverted(Label* label) const {
+    __ JumpIfNotRoot(value_, index_, label);
+  }
+
+ private:
+  const Register& value_;
+  const Heap::RootListIndex index_;
+};
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+
+  Assembler::BlockPoolsScope scope(masm_);
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+  if ((code->kind() == Code::BINARY_OP_IC) ||
+      (code->kind() == Code::COMPARE_IC)) {
+    // Signal that we don't inline smi code before these stubs in the
+    // optimizing code generator.
+    InlineSmiCheckInfo::EmitNotInlined(masm());
+  }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).Is(x1));
+  DCHECK(ToRegister(instr->result()).Is(x0));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(x3));
+    DCHECK(vector_register.is(x2));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ Mov(vector_register, vector);
+    __ Mov(slot_register, Operand(Smi::FromInt(index)));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ Mov(x0, arity);
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(x1));
+
+  __ Mov(x0, Operand(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ Mov(x2, instr->hydrogen()->site());
+  } else {
+    __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
+  }
+
+
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+
+      // We might need to create a holey array; look at the first argument.
+      __ Peek(x10, 0);
+      __ Cbz(x10, &packed_case);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ B(&done);
+      __ Bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ Bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+
+  DCHECK(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr,
+                           SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Mov(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ LoadHeapObject(cp,
+                      Handle<HeapObject>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                            SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::Kind kind,
+                               int arguments,
+                               Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(
+      masm(), kind, arguments, deopt_mode);
+
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator iterator(doubles);
+  int count = 0;
+  while (!iterator.Done()) {
+    // TODO(all): Is this supposed to save just the callee-saved doubles? It
+    // looks like it's saving all of them.
+    FPRegister value = FPRegister::from_code(iterator.Current());
+    __ Poke(value, count * kDoubleSize);
+    iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator iterator(doubles);
+  int count = 0;
+  while (!iterator.Done()) {
+    // TODO(all): Is this supposed to restore just the callee-saved doubles? It
+    // looks like it's restoring all of them.
+    FPRegister value = FPRegister::from_code(iterator.Current());
+    __ Peek(value, count * kDoubleSize);
+    iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info()->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ Debug("stop-at", __LINE__, BREAK);
+    }
+#endif
+  }
+
+  DCHECK(__ StackPointer().Is(jssp));
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue());
+    }
+    frame_is_built_ = true;
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    __ Claim(slots, kPointerSize);
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Allocate a local context if needed.
+  if (info()->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in x1.
+    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate())));
+      __ Push(x1, x10);
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ Push(x1);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+    // Context is returned in x0. It replaces the context passed to us. It's
+    // saved in the stack and kept live in cp.
+    __ Mov(cp, x0);
+    __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        Register value = x0;
+        Register scratch = x3;
+
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ Ldr(value, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextMemOperand(cp, var->index());
+        __ Str(value, target);
+        // Update the write barrier. This clobbers value and scratch.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
+                                    value, scratch, GetLinkRegisterState(),
+                                    kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ Claim(slots);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+
+      __ Bind(code->entry());
+
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ Push(lr, fp, cp);
+        __ Mov(fp, Smi::FromInt(StackFrame::STUB));
+        __ Push(fp);
+        __ Add(fp, __ StackPointer(),
+               StandardFrameConstants::kFixedFrameSizeFromFp);
+        Comment(";;; Deferred code");
+      }
+
+      code->Generate();
+
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ Pop(xzr, cp, fp, lr);
+        frame_is_built_ = false;
+      }
+
+      __ B(code->exit());
+    }
+  }
+
+  // Force constant pool emission at the end of the deferred code to make
+  // sure that no constant pools are emitted after deferred code because
+  // deferred code generation is the last step which generates code. The two
+  // following steps will only output data used by crakshaft.
+  masm()->CheckConstPool(true, false);
+
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  Label needs_frame, call_deopt_entry;
+
+  if (jump_table_.length() > 0) {
+    Comment(";;; -------------------- Jump table --------------------");
+    Address base = jump_table_[0]->address;
+
+    UseScratchRegisterScope temps(masm());
+    Register entry_offset = temps.AcquireX();
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
+      __ Bind(&table_entry->label);
+
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->deopt_info);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load the base
+      // address and add an immediate offset.
+      __ Mov(entry_offset, entry - base);
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        Comment(";;; call deopt with frame");
+        // Save lr before Bl, fp will be adjusted in the needs_frame code.
+        __ Push(lr, fp);
+        // Reuse the existing needs_frame code.
+        __ Bl(&needs_frame);
+      } else {
+        // There is nothing special to do, so just continue to the second-level
+        // table.
+        __ Bl(&call_deopt_entry);
+      }
+      info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                   table_entry->deopt_info.inlining_id);
+
+      masm()->CheckConstPool(false, false);
+    }
+
+    if (needs_frame.is_linked()) {
+      // This variant of deopt can only be used with stubs. Since we don't
+      // have a function pointer to install in the stack frame that we're
+      // building, install a special marker there instead.
+      DCHECK(info()->IsStub());
+
+      Comment(";;; needs_frame common code");
+      UseScratchRegisterScope temps(masm());
+      Register stub_marker = temps.AcquireX();
+      __ Bind(&needs_frame);
+      __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
+      __ Push(cp, stub_marker);
+      __ Add(fp, __ StackPointer(), 2 * kPointerSize);
+    }
+
+    // Generate common code for calling the second-level deopt table.
+    __ Bind(&call_deopt_entry);
+
+    if (info()->saves_caller_doubles()) {
+      DCHECK(info()->IsStub());
+      RestoreCallerDoubles();
+    }
+
+    Register deopt_entry = temps.AcquireX();
+    __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
+                                RelocInfo::RUNTIME_ENTRY));
+    __ Add(deopt_entry, deopt_entry, entry_offset);
+    __ Br(deopt_entry);
+  }
+
+  // Force constant pool emission at the end of the deopt jump table to make
+  // sure that no constant pools are emitted after.
+  masm()->CheckConstPool(true, false);
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  // We do not know how much data will be emitted for the safepoint table, so
+  // force emission of the veneer pool.
+  masm()->CheckVeneerPool(true, true);
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::DeoptimizeBranch(
+    LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
+    BranchType branch_type, Register reg, int bit,
+    Deoptimizer::BailoutType* override_bailout_type) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  Deoptimizer::BailoutType bailout_type =
+    info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+
+  if (override_bailout_type != NULL) {
+    bailout_type = *override_bailout_type;
+  }
+
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    Label not_zero;
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+
+    __ Push(x0, x1, x2);
+    __ Mrs(x2, NZCV);
+    __ Mov(x0, count);
+    __ Ldr(w1, MemOperand(x0));
+    __ Subs(x1, x1, 1);
+    __ B(gt, &not_zero);
+    __ Mov(w1, FLAG_deopt_every_n_times);
+    __ Str(w1, MemOperand(x0));
+    __ Pop(x2, x1, x0);
+    DCHECK(frame_is_built_);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ Unreachable();
+
+    __ Bind(&not_zero);
+    __ Str(w1, MemOperand(x0));
+    __ Msr(NZCV, x2);
+    __ Pop(x2, x1, x0);
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    Label dont_trap;
+    __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
+    __ Debug("trap_on_deopt", __LINE__, BREAK);
+    __ Bind(&dont_trap);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to build frame, or restore caller doubles.
+  if (branch_type == always &&
+      frame_is_built_ && !info()->saves_caller_doubles()) {
+    DeoptComment(deopt_info);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry* table_entry =
+        new (zone()) Deoptimizer::JumpTableEntry(
+            entry, deopt_info, bailout_type, !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry->IsEquivalentTo(*jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ B(&jump_table_.last()->label, branch_type, reg, bit);
+  }
+}
+
+
+void LCodeGen::Deoptimize(LInstruction* instr,
+                          Deoptimizer::DeoptReason deopt_reason,
+                          Deoptimizer::BailoutType* override_bailout_type) {
+  DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
+                   override_bailout_type);
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason) {
+  DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
+}
+
+
+void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
+                                Deoptimizer::DeoptReason deopt_reason) {
+  DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+                                   Deoptimizer::DeoptReason deopt_reason) {
+  DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
+}
+
+
+void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
+                                    Deoptimizer::DeoptReason deopt_reason) {
+  int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
+  DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
+}
+
+
+void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
+                               Deoptimizer::DeoptReason deopt_reason) {
+  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
+}
+
+
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
+                                  Deoptimizer::DeoptReason deopt_reason) {
+  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
+}
+
+
+void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+                                LInstruction* instr,
+                                Deoptimizer::DeoptReason deopt_reason) {
+  __ CompareRoot(rt, index);
+  DeoptimizeIf(eq, instr, deopt_reason);
+}
+
+
+void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+                                   LInstruction* instr,
+                                   Deoptimizer::DeoptReason deopt_reason) {
+  __ CompareRoot(rt, index);
+  DeoptimizeIf(ne, instr, deopt_reason);
+}
+
+
+void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+                                     Deoptimizer::DeoptReason deopt_reason) {
+  __ TestForMinusZero(input);
+  DeoptimizeIf(vs, instr, deopt_reason);
+}
+
+
+void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
+  __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+}
+
+
+void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+                                  Deoptimizer::DeoptReason deopt_reason) {
+  DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
+}
+
+
+void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+                                    Deoptimizer::DeoptReason deopt_reason) {
+  DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    intptr_t current_pc = masm()->pc_offset();
+
+    if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+      ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK((padding_size % kInstructionSize) == 0);
+      InstructionAccurateScope instruction_accurate(
+          masm(), padding_size / kInstructionSize);
+
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= kInstructionSize;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  // TODO(all): support zero register results, as ToRegister32.
+  DCHECK((op != NULL) && op->IsRegister());
+  return Register::from_code(op->index());
+}
+
+
+Register LCodeGen::ToRegister32(LOperand* op) const {
+  DCHECK(op != NULL);
+  if (op->IsConstantOperand()) {
+    // If this is a constant operand, the result must be the zero register.
+    DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
+    return wzr;
+  } else {
+    return ToRegister(op).W();
+  }
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK((op != NULL) && op->IsDoubleRegister());
+  return DoubleRegister::from_code(op->index());
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  DCHECK(op != NULL);
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand(0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+Operand LCodeGen::ToOperand32(LOperand* op) {
+  DCHECK(op != NULL);
+  if (op->IsRegister()) {
+    return Operand(ToRegister32(op));
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      return Operand(constant->Integer32Value());
+    } else {
+      // Other constants not implemented.
+      Abort(kToOperand32UnsupportedImmediate);
+    }
+  }
+  // Other cases are not implemented.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+static int64_t ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
+  DCHECK(op != NULL);
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    int fp_offset = StackSlotOffset(op->index());
+    // Loads and stores have a bigger reach in positive offset than negative.
+    // We try to access using jssp (positive offset) first, then fall back to
+    // fp (negative offset) if that fails.
+    //
+    // We can reference a stack slot from jssp only if we know how much we've
+    // put on the stack. We don't know this in the following cases:
+    // - stack_mode != kCanUseStackPointer: this is the case when deferred
+    //   code has saved the registers.
+    // - saves_caller_doubles(): some double registers have been pushed, jssp
+    //   references the end of the double registers and not the end of the stack
+    //   slots.
+    // In both of the cases above, we _could_ add the tracking information
+    // required so that we can use jssp here, but in practice it isn't worth it.
+    if ((stack_mode == kCanUseStackPointer) &&
+        !info()->saves_caller_doubles()) {
+      int jssp_offset_to_fp =
+          StandardFrameConstants::kFixedFrameSizeFromFp +
+          (pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
+      int jssp_offset = fp_offset + jssp_offset_to_fp;
+      if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
+        return MemOperand(masm()->StackPointer(), jssp_offset);
+      }
+    }
+    return MemOperand(fp, fp_offset);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(masm()->StackPointer(),
+                      ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+template <class LI>
+Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
+  if (shift_info->shift() == NO_SHIFT) {
+    return ToOperand32(right);
+  } else {
+    return Operand(
+        ToRegister32(right),
+        shift_info->shift(),
+        JSShiftAmountFromLConstant(shift_info->shift_amount()));
+  }
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return constant->Integer32Value();
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = nv;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchGeneric(InstrType instr,
+                                 const BranchGenerator& branch) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
+  } else {
+    branch.Emit(chunk_->GetAssemblyLabel(left_block));
+    if (right_block != next_block) {
+      __ B(chunk_->GetAssemblyLabel(right_block));
+    }
+  }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
+  DCHECK((condition != al) && (condition != nv));
+  BranchOnCondition branch(this, condition);
+  EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitCompareAndBranch(InstrType instr,
+                                    Condition condition,
+                                    const Register& lhs,
+                                    const Operand& rhs) {
+  DCHECK((condition != al) && (condition != nv));
+  CompareAndBranch branch(this, condition, lhs, rhs);
+  EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitTestAndBranch(InstrType instr,
+                                 Condition condition,
+                                 const Register& value,
+                                 uint64_t mask) {
+  DCHECK((condition != al) && (condition != nv));
+  TestAndBranch branch(this, condition, value, mask);
+  EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
+                                         const FPRegister& value,
+                                         const FPRegister& scratch) {
+  BranchIfNonZeroNumber branch(this, value, scratch);
+  EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
+                                      const Register& value) {
+  BranchIfHeapNumber branch(this, value);
+  EmitBranchGeneric(instr, branch);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchIfRoot(InstrType instr,
+                                const Register& value,
+                                Heap::RootListIndex index) {
+  BranchIfRoot branch(this, value, index);
+  EmitBranchGeneric(instr, branch);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) {
+      resolver_.Resolve(move);
+    }
+  }
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+
+  // The pointer to the arguments array come from DoArgumentsElements.
+  // It does not point directly to the arguments and there is an offest of
+  // two words that we must take into account when accessing an argument.
+  // Subtracting the index from length accounts for one, so we add one more.
+
+  if (instr->length()->IsConstantOperand() &&
+      instr->index()->IsConstantOperand()) {
+    int index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int length = ToInteger32(LConstantOperand::cast(instr->length()));
+    int offset = ((length - index) + 1) * kPointerSize;
+    __ Ldr(result, MemOperand(arguments, offset));
+  } else if (instr->index()->IsConstantOperand()) {
+    Register length = ToRegister32(instr->length());
+    int index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int loc = index - 1;
+    if (loc != 0) {
+      __ Sub(result.W(), length, loc);
+      __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+    } else {
+      __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
+    }
+  } else {
+    Register length = ToRegister32(instr->length());
+    Operand index = ToOperand32(instr->index());
+    __ Sub(result.W(), length, index);
+    __ Add(result.W(), result.W(), 1);
+    __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
+  }
+}
+
+
+void LCodeGen::DoAddE(LAddE* instr) {
+  Register result = ToRegister(instr->result());
+  Register left = ToRegister(instr->left());
+  Operand right = Operand(x0);  // Dummy initialization.
+  if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
+    right = Operand(ToRegister(instr->right()));
+  } else if (instr->right()->IsConstantOperand()) {
+    right = ToInteger32(LConstantOperand::cast(instr->right()));
+  } else {
+    right = Operand(ToRegister32(instr->right()), SXTW);
+  }
+
+  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+  __ Add(result, left, right);
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  Register result = ToRegister32(instr->result());
+  Register left = ToRegister32(instr->left());
+  Operand right = ToShiftedRightOperand32(instr->right(), instr);
+
+  if (can_overflow) {
+    __ Adds(result, left, right);
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  } else {
+    __ Add(result, left, right);
+  }
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  Register result = ToRegister(instr->result());
+  Register left = ToRegister(instr->left());
+  Operand right = ToOperand(instr->right());
+  if (can_overflow) {
+    __ Adds(result, left, right);
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  } else {
+    __ Add(result, left, right);
+  }
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate: public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister32(instr->size());
+    __ Sxtw(size.X(), size);
+    __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
+  }
+
+  __ Bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    Register start = temp1;
+    Register end = temp2;
+    Register filler = ToRegister(instr->temp3());
+
+    __ Sub(start, result, kHeapObjectTag);
+
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ Add(end, start, size);
+    } else {
+      __ Add(end, start, ToRegister(instr->size()));
+    }
+    __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
+    __ InitializeFieldsWithFiller(start, end, filler);
+  } else {
+    DCHECK(instr->temp3() == NULL);
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
+
+  PushSafepointRegistersScope scope(this);
+  // We're in a SafepointRegistersScope so we can use any scratch registers.
+  Register size = x0;
+  if (instr->size()->IsConstantOperand()) {
+    __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
+  } else {
+    __ SmiTag(size, ToRegister32(instr->size()).X());
+  }
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ Mov(x10, Smi::FromInt(flags));
+  __ Push(size, x10);
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister32(instr->length());
+
+  Register elements = ToRegister(instr->elements());
+  Register scratch = x5;
+  DCHECK(receiver.Is(x0));  // Used for parameter count.
+  DCHECK(function.Is(x1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).Is(x0));
+  DCHECK(instr->IsMarkedAsCall());
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ Cmp(length, kArgumentsLimit);
+  DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ Push(receiver);
+  Register argc = receiver;
+  receiver = NoReg;
+  __ Sxtw(argc, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ Add(elements, elements, 1 * kPointerSize);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ Cbz(length, &invoke);
+  __ Bind(&loop);
+  __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
+  __ Push(scratch);
+  __ Subs(length, length, 1);
+  __ B(ne, &loop);
+
+  __ Bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in argc (receiver) which is x0, as
+  // expected by InvokeFunction.
+  ParameterCount actual(argc);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    // When we are inside an inlined function, the arguments are the last things
+    // that have been pushed on the stack. Therefore the arguments array can be
+    // accessed directly from jssp.
+    // However in the normal case, it is accessed via fp but there are two words
+    // on the stack between fp and the arguments (the saved lr and fp) and the
+    // LAccessArgumentsAt implementation take that into account.
+    // In the inlined case we need to subtract the size of 2 words to jssp to
+    // get a pointer which will work well with LAccessArgumentsAt.
+    DCHECK(masm()->StackPointer().Is(jssp));
+    __ Sub(result, jssp, 2 * kPointerSize);
+  } else {
+    DCHECK(instr->temp() != NULL);
+    Register previous_fp = ToRegister(instr->temp());
+
+    __ Ldr(previous_fp,
+           MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ Ldr(result,
+           MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
+    __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+    __ Csel(result, fp, previous_fp, ne);
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister32(instr->result());
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ Cmp(fp, elements);
+  __ Mov(result, scope()->num_parameters());
+  __ B(eq, &done);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ Ldr(result,
+         UntagSmiMemOperand(result.X(),
+                            ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Argument length is in result register.
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  switch (instr->op()) {
+    case Token::ADD: __ Fadd(result, left, right); break;
+    case Token::SUB: __ Fsub(result, left, right); break;
+    case Token::MUL: __ Fmul(result, left, right); break;
+    case Token::DIV: __ Fdiv(result, left, right); break;
+    case Token::MOD: {
+      // The ECMA-262 remainder operator is the remainder from a truncating
+      // (round-towards-zero) division. Note that this differs from IEEE-754.
+      //
+      // TODO(jbramley): See if it's possible to do this inline, rather than by
+      // calling a helper function. With frintz (to produce the intermediate
+      // quotient) and fmsub (to calculate the remainder without loss of
+      // precision), it should be possible. However, we would need support for
+      // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
+      // support that yet.
+      DCHECK(left.Is(d0));
+      DCHECK(right.Is(d1));
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()),
+          0, 2);
+      DCHECK(result.Is(d0));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(x1));
+  DCHECK(ToRegister(instr->right()).is(x0));
+  DCHECK(ToRegister(instr->result()).is(x0));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  Register result = ToRegister32(instr->result());
+  Register left = ToRegister32(instr->left());
+  Operand right = ToShiftedRightOperand32(instr->right(), instr);
+
+  switch (instr->op()) {
+    case Token::BIT_AND: __ And(result, left, right); break;
+    case Token::BIT_OR:  __ Orr(result, left, right); break;
+    case Token::BIT_XOR: __ Eor(result, left, right); break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoBitS(LBitS* instr) {
+  Register result = ToRegister(instr->result());
+  Register left = ToRegister(instr->left());
+  Operand right = ToOperand(instr->right());
+
+  switch (instr->op()) {
+    case Token::BIT_AND: __ And(result, left, right); break;
+    case Token::BIT_OR:  __ Orr(result, left, right); break;
+    case Token::BIT_XOR: __ Eor(result, left, right); break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
+  Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
+  DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
+  if (instr->index()->IsConstantOperand()) {
+    Operand index = ToOperand32(instr->index());
+    Register length = ToRegister32(instr->length());
+    __ Cmp(length, index);
+    cond = CommuteCondition(cond);
+  } else {
+    Register index = ToRegister32(instr->index());
+    Operand length = ToOperand32(instr->length());
+    __ Cmp(index, length);
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
+  } else {
+    DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+
+  if (r.IsInteger32()) {
+    DCHECK(!info()->IsStub());
+    EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
+  } else if (r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    STATIC_ASSERT(kSmiTag == 0);
+    EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
+  } else if (r.IsDouble()) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    // Test the double value. Zero and NaN are false.
+    EmitBranchIfNonZeroNumber(instr, value, double_scratch());
+  } else {
+    DCHECK(r.IsTagged());
+    Register value = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ CompareRoot(value, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitGoto(instr->TrueDestination(chunk()));
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      __ Ldr(double_scratch(), FieldMemOperand(value,
+                                               HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      Register temp = ToRegister(instr->temp1());
+      __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
+      EmitCompareAndBranch(instr, ne, temp, 0);
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ JumpIfRoot(
+            value, Heap::kUndefinedValueRootIndex, false_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ JumpIfRoot(
+            value, Heap::kTrueValueRootIndex, true_label);
+        __ JumpIfRoot(
+            value, Heap::kFalseValueRootIndex, false_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ JumpIfRoot(
+            value, Heap::kNullValueRootIndex, false_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        DCHECK(Smi::FromInt(0) == 0);
+        __ Cbz(value, false_label);
+        __ JumpIfSmi(value, true_label);
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a smi, deopt.
+        DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
+      }
+
+      Register map = NoReg;
+      Register scratch = NoReg;
+
+      if (expected.NeedsMap()) {
+        DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
+        map = ToRegister(instr->temp1());
+        scratch = ToRegister(instr->temp2());
+
+        __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ TestAndBranchIfAnySet(
+              scratch, 1 << Map::kIsUndetectable, false_label);
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
+        __ B(ge, true_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
+        __ B(ge, &not_string);
+        __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
+        __ Cbz(scratch, false_label);
+        __ B(true_label);
+        __ Bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
+        __ B(eq, true_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
+        __ B(eq, true_label);
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        Label not_heap_number;
+        __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
+
+        __ Ldr(double_scratch(),
+               FieldMemOperand(value, HeapNumber::kValueOffset));
+        __ Fcmp(double_scratch(), 0.0);
+        // If we got a NaN (overflow bit is set), jump to the false branch.
+        __ B(vs, false_label);
+        __ B(eq, false_label);
+        __ B(true_label);
+        __ Bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        Deoptimize(instr, Deoptimizer::kUnexpectedObject);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  // The function interface relies on the following register assignments.
+  Register function_reg = x1;
+  Register arity_reg = x0;
+
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (FLAG_debug_code) {
+    Label is_not_smi;
+    // Try to confirm that function_reg (x1) is a tagged pointer.
+    __ JumpIfNotSmi(function_reg, &is_not_smi);
+    __ Abort(kExpectedFunctionObject);
+    __ Bind(&is_not_smi);
+  }
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+    __ Mov(arity_reg, arity);
+
+    // Invoke function.
+    __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+    __ Call(x10);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToRegister(instr->result()).Is(x0));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      // TODO(all): on ARM we use a call descriptor to specify a storage mode
+      // but on ARM64 we only have one storage mode so it isn't necessary. Check
+      // this understanding is correct.
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+      __ Br(target);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      // TODO(all): on ARM we use a call descriptor to specify a storage mode
+      // but on ARM64 we only have one storage mode so it isn't necessary. Check
+      // this understanding is correct.
+      __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+      __ Call(target);
+    }
+    generator.AfterCall();
+  }
+
+  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToRegister(instr->function()).is(x1));
+
+  // Change context.
+  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
+  __ Mov(x0, instr->arity());
+
+  // Load the code entry address
+  __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
+  __ Call(x10);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(x0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Register temp = ToRegister(instr->temp());
+  {
+    PushSafepointRegistersScope scope(this);
+    __ Push(object);
+    __ Mov(cp, 0);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(x0, temp);
+  }
+  DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps: public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    virtual void Generate() {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  Register object = ToRegister(instr->value());
+  Register map_reg = ToRegister(instr->temp());
+
+  __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, object);
+    __ Bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(map_reg, map);
+    __ B(eq, &success);
+  }
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(map_reg, map);
+
+  // We didn't match a map.
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ B(ne, deferred->entry());
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  }
+
+  __ Bind(&success);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
+  }
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  Register value = ToRegister(instr->value());
+  DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
+  DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  UseScratchRegisterScope temps(masm());
+  Register view = ToRegister(instr->view());
+  Register scratch = temps.AcquireX();
+
+  __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+  __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+  __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
+  DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
+
+  __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first, last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ Cmp(scratch, first);
+    if (first == last) {
+      // If there is only one type in the interval check for equality.
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    } else if (last == LAST_TYPE) {
+      // We don't need to compare with the higher bound of the interval.
+      DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      // If we are below the lower bound, set the C flag and clear the Z flag
+      // to force a deopt.
+      __ Ccmp(scratch, last, CFlag, hs);
+      DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK((tag == 0) || (tag == mask));
+      if (tag == 0) {
+        DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
+                           Deoptimizer::kWrongInstanceType);
+      } else {
+        DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
+                             Deoptimizer::kWrongInstanceType);
+      }
+    } else {
+      if (tag == 0) {
+        __ Tst(scratch, mask);
+      } else {
+        __ And(scratch, scratch, mask);
+        __ Cmp(scratch, tag);
+      }
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    }
+  }
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->unclamped());
+  Register result = ToRegister32(instr->result());
+  __ ClampDoubleToUint8(result, input, double_scratch());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register input = ToRegister32(instr->unclamped());
+  Register result = ToRegister32(instr->result());
+  __ ClampInt32ToUint8(result, input);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register input = ToRegister(instr->unclamped());
+  Register result = ToRegister32(instr->result());
+  Label done;
+
+  // Both smi and heap number cases are handled.
+  Label is_not_smi;
+  __ JumpIfNotSmi(input, &is_not_smi);
+  __ SmiUntag(result.X(), input);
+  __ ClampInt32ToUint8(result);
+  __ B(&done);
+
+  __ Bind(&is_not_smi);
+
+  // Check for heap number.
+  Label is_heap_number;
+  __ JumpIfHeapNumber(input, &is_heap_number);
+
+  // Check for undefined. Undefined is coverted to zero for clamping conversion.
+  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+                      Deoptimizer::kNotAHeapNumberUndefined);
+  __ Mov(result, 0);
+  __ B(&done);
+
+  // Heap number case.
+  __ Bind(&is_heap_number);
+  DoubleRegister dbl_scratch = double_scratch();
+  DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
+  __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
+
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ Fmov(result_reg, value_reg);
+    __ Lsr(result_reg, result_reg, 32);
+  } else {
+    __ Fmov(result_reg.W(), value_reg.S());
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+
+  // Insert the least significant 32 bits of hi_reg into the most significant
+  // 32 bits of lo_reg, and move to a floating point register.
+  __ Bfi(lo_reg, hi_reg, 32, 32);
+  __ Fmov(result_reg, lo_reg);
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Handle<String> class_name = instr->hydrogen()->class_name();
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+  Register input = ToRegister(instr->value());
+  Register scratch1 = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  __ JumpIfSmi(input, false_label);
+
+  Register map = scratch2;
+  __ CompareObjectType(input, map, scratch1, JS_FUNCTION_TYPE);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ B(eq, true_label);
+  } else {
+    __ B(eq, false_label);
+  }
+
+  // Check if the constructor in the map is a function.
+  {
+    UseScratchRegisterScope temps(masm());
+    Register instance_type = temps.AcquireX();
+    __ GetMapConstructor(scratch1, map, scratch2, instance_type);
+    __ Cmp(instance_type, JS_FUNCTION_TYPE);
+  }
+  // Objects with a non-function constructor have class 'Object'.
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
+    __ B(ne, true_label);
+  } else {
+    __ B(ne, false_label);
+  }
+
+  // The constructor function is in scratch1. Get its instance class name.
+  __ Ldr(scratch1,
+         FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(scratch1,
+         FieldMemOperand(scratch1,
+                         SharedFunctionInfo::kInstanceClassNameOffset));
+
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted. This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax. Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
+  DCHECK(instr->hydrogen()->representation().IsDouble());
+  FPRegister object = ToDoubleRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+
+  // If we don't have a NaN, we don't have the hole, so branch now to avoid the
+  // (relatively expensive) hole-NaN check.
+  __ Fcmp(object, object);
+  __ B(vc, instr->FalseLabel(chunk_));
+
+  // We have a NaN, but is it the hole?
+  __ Fmov(temp, object);
+  EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
+  DCHECK(instr->hydrogen()->representation().IsTagged());
+  Register object = ToRegister(instr->object());
+
+  EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register value = ToRegister(instr->value());
+  Register map = ToRegister(instr->temp());
+
+  __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
+                       instr->TrueLabel(chunk()));
+  } else {
+    Register value = ToRegister(instr->value());
+    __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+    __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
+    __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
+  }
+  EmitGoto(instr->FalseDestination(chunk()));
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
+
+      // If a NaN is involved, i.e. the result is unordered (V set),
+      // jump to false block label.
+      __ B(vs, instr->FalseLabel(chunk_));
+      EmitBranch(instr, cond);
+    } else {
+      if (instr->hydrogen_value()->representation().IsInteger32()) {
+        if (right->IsConstantOperand()) {
+          EmitCompareAndBranch(instr, cond, ToRegister32(left),
+                               ToOperand32(right));
+        } else {
+          // Commute the operands and the condition.
+          EmitCompareAndBranch(instr, CommuteCondition(cond),
+                               ToRegister32(right), ToOperand32(left));
+        }
+      } else {
+        DCHECK(instr->hydrogen_value()->representation().IsSmi());
+        if (right->IsConstantOperand()) {
+          int32_t value = ToInteger32(LConstantOperand::cast(right));
+          EmitCompareAndBranch(instr,
+                               cond,
+                               ToRegister(left),
+                               Operand(Smi::FromInt(value)));
+        } else if (left->IsConstantOperand()) {
+          // Commute the operands and the condition.
+          int32_t value = ToInteger32(LConstantOperand::cast(left));
+          EmitCompareAndBranch(instr,
+                               CommuteCondition(cond),
+                               ToRegister(right),
+                               Operand(Smi::FromInt(value)));
+        } else {
+          EmitCompareAndBranch(instr,
+                               cond,
+                               ToRegister(left),
+                               ToRegister(right));
+        }
+      }
+    }
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+  EmitCompareAndBranch(instr, eq, left, right);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+  Condition cond = TokenToCondition(op, false);
+
+  DCHECK(ToRegister(instr->left()).Is(x1));
+  DCHECK(ToRegister(instr->right()).Is(x0));
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // Signal that we don't inline smi code before this stub.
+  InlineSmiCheckInfo::EmitNotInlined(masm());
+
+  // Return true or false depending on CompareIC result.
+  // This instruction is marked as call. We can clobber any register.
+  DCHECK(instr->IsMarkedAsCall());
+  __ LoadTrueFalseRoots(x1, x2);
+  __ Cmp(x0, 0);
+  __ Csel(ToRegister(instr->result()), x1, x2, cond);
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  if (instr->value() == 0) {
+    if (copysign(1.0, instr->value()) == 1.0) {
+      __ Fmov(result, fp_zero);
+    } else {
+      __ Fneg(result, fp_zero);
+    }
+  } else {
+    __ Fmov(result, instr->value());
+  }
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  DCHECK(is_int32(instr->value()));
+  // Cast the value here to ensure that the value isn't sign extended by the
+  // implicit Operand constructor.
+  __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ Mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ LoadObject(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    UseScratchRegisterScope temps(masm());
+    Register temp = temps.AcquireX();
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ Mov(temp, Operand(cell));
+    __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
+    __ Cmp(reg, temp);
+  } else {
+    __ Cmp(reg, Operand(object));
+  }
+  DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
+    type = Deoptimizer::LAZY;
+  }
+
+  Deoptimize(instr, instr->hydrogen()->reason(), &type);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister32(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    // Test dividend for kMinInt by subtracting one (cmp) and checking for
+    // overflow.
+    __ Cmp(dividend, 1);
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ Tst(dividend, mask);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ Neg(result, dividend);
+    return;
+  }
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (shift == 0) {
+    __ Mov(result, dividend);
+  } else if (shift == 1) {
+    __ Add(result, dividend, Operand(dividend, LSR, 31));
+  } else {
+    __ Mov(result, Operand(dividend, ASR, 31));
+    __ Add(result, dividend, Operand(result, LSR, 32 - shift));
+  }
+  if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
+  if (divisor < 0) __ Neg(result, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister32(instr->result());
+  DCHECK(!AreAliased(dividend, result));
+
+  if (divisor == 0) {
+    Deoptimize(instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Neg(result, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    Register temp = ToRegister32(instr->temp());
+    DCHECK(!AreAliased(dividend, result, temp));
+    __ Sxtw(dividend.X(), dividend);
+    __ Mov(temp, divisor);
+    __ Smsubl(temp.X(), result, temp, dividend.X());
+    DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister32(instr->dividend());
+  Register divisor = ToRegister32(instr->divisor());
+  Register result = ToRegister32(instr->result());
+
+  // Issue the division first, and then check for any deopt cases whilst the
+  // result is computed.
+  __ Sdiv(result, dividend, divisor);
+
+  if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    DCHECK(!instr->temp());
+    return;
+  }
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) as that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Cmp(divisor, 0);
+
+    // If the divisor < 0 (mi), compare the dividend, and deopt if it is
+    // zero, ie. zero dividend with negative divisor deopts.
+    // If the divisor >= 0 (pl, the opposite of mi) set the flags to
+    // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
+    __ Ccmp(dividend, 0, NoFlag, mi);
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    // Test dividend for kMinInt by subtracting one (cmp) and checking for
+    // overflow.
+    __ Cmp(dividend, 1);
+    // If overflow is set, ie. dividend = kMinInt, compare the divisor with
+    // -1. If overflow is clear, set the flags for condition ne, as the
+    // dividend isn't -1, and thus we shouldn't deopt.
+    __ Ccmp(divisor, -1, NoFlag, vs);
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+
+  // Compute remainder and deopt if it's not zero.
+  Register remainder = ToRegister32(instr->temp());
+  __ Msub(remainder, result, divisor, dividend);
+  DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
+}
+
+
+void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister32(instr->result());
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TryRepresentDoubleAsInt32(result, input, double_scratch());
+  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+  if (instr->tag_result()) {
+    __ SmiTag(result.X());
+  }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+
+  RecordPushedArgumentsDelta(instr->hydrogen_value()->argument_delta());
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+
+  __ EnumLengthUntagged(result, map);
+  __ Cbnz(result, &load_cache);
+
+  __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ B(&done);
+
+  __ Bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
+
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  Register object = ToRegister(instr->object());
+  Register null_value = x5;
+
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(object.Is(x0));
+
+  DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ CompareObjectType(object, x1, x1, JS_PROXY_TYPE);
+  DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
+
+  Label use_cache, call_runtime;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
+
+  __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ B(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ Bind(&call_runtime);
+  __ Push(object);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
+                      Deoptimizer::kWrongMap);
+
+  __ Bind(&use_cache);
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  // Assert that we can use a W register load to get the hash.
+  DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
+  __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  // Do not emit jump if we are emitting a goto to the next block.
+  if (!IsNextEmittedBlock(block)) {
+    __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister32(instr->temp());
+
+  // Assert that the cache status bits fit in a W register.
+  DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
+  __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
+  __ Tst(temp, String::kContainsCachedArrayIndexMask);
+  EmitBranch(instr, eq);
+}
+
+
+// HHasInstanceTypeAndBranch instruction is built with an interval of type
+// to test but is only used in very restricted ways. The only possible kinds
+// of intervals are:
+//  - [ FIRST_TYPE, instr->to() ]
+//  - [ instr->form(), LAST_TYPE ]
+//  - instr->from() == instr->to()
+//
+// These kinds of intervals can be check with only one compare instruction
+// providing the correct value and test condition are used.
+//
+// TestType() will return the value to use in the compare instruction and
+// BranchCondition() will return the condition to use depending on the kind
+// of interval actually specified in the instruction.
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK((from == to) || (to == LAST_TYPE));
+  return from;
+}
+
+
+// See comment above TestType function for what this function does.
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    __ Add(result, base, ToOperand32(instr->offset()));
+  } else {
+    __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
+  }
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(x0));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = ToRegister(instr->scratch1());
+  Register const object_instance_type = ToRegister(instr->scratch2());
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ JumpIfSmi(object, instr->FalseLabel(chunk_));
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ Bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ Ldrb(object_instance_type,
+          FieldMemOperand(object_map, Map::kBitFieldOffset));
+  __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
+  DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+  // Deoptimize for proxies.
+  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+  DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+
+  __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+  __ Cmp(object_prototype, prototype);
+  __ B(eq, instr->TrueLabel(chunk_));
+  __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+  __ B(eq, instr->FalseLabel(chunk_));
+  __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+  __ B(&loop);
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  Register value = ToRegister32(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ Scvtf(result, value);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  // The function is required to be in x1.
+  DCHECK(ToRegister(instr->function()).is(x1));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(x1, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+  RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register val = ToRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register value = ToRegister(instr->value());
+  STATIC_ASSERT(kSmiTag == 0);
+  EmitTestAndBranch(instr, eq, value, kSmiTagMask);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+
+  EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+
+  // Inherit pushed_arguments_ from the predecessor's argument count.
+  if (label->block()->HasPredecessor()) {
+    pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
+#ifdef DEBUG
+    for (auto p : *label->block()->predecessors()) {
+      DCHECK_EQ(p->argument_count(), pushed_arguments_);
+    }
+#endif
+  }
+
+  __ Bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+                       Deoptimizer::kHole);
+    } else {
+      Label not_the_hole;
+      __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
+      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+      __ Bind(&not_the_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  // Get the prototype or initial map from the function.
+  __ Ldr(result, FieldMemOperand(function,
+                                 JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+                   Deoptimizer::kHole);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ CompareObjectType(result, temp, temp, MAP_TYPE);
+  __ B(ne, &done);
+
+  // Get the prototype from the initial map.
+  __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ Bind(&done);
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(x0));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Mov(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ Mov(slot_register, Smi::FromInt(index));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Mov(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ Mov(slot_register, Smi::FromInt(index));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).Is(x0));
+  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
+    Register key,
+    Register base,
+    Register scratch,
+    bool key_is_smi,
+    bool key_is_constant,
+    int constant_key,
+    ElementsKind elements_kind,
+    int base_offset) {
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+  if (key_is_constant) {
+    int key_offset = constant_key << element_size_shift;
+    return MemOperand(base, key_offset + base_offset);
+  }
+
+  if (key_is_smi) {
+    __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
+    return MemOperand(scratch, base_offset);
+  }
+
+  if (base_offset == 0) {
+    return MemOperand(base, key, SXTW, element_size_shift);
+  }
+
+  DCHECK(!AreAliased(scratch, key));
+  __ Add(scratch, base, base_offset);
+  return MemOperand(scratch, key, SXTW, element_size_shift);
+}
+
+
+void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
+  Register ext_ptr = ToRegister(instr->elements());
+  Register scratch;
+  ElementsKind elements_kind = instr->elements_kind();
+
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  int constant_key = 0;
+  if (key_is_constant) {
+    DCHECK(instr->temp() == NULL);
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xf0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    scratch = ToRegister(instr->temp());
+    key = ToRegister(instr->key());
+  }
+
+  MemOperand mem_op =
+      PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+                                       key_is_constant, constant_key,
+                                       elements_kind,
+                                       instr->base_offset());
+
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ Ldr(result.S(), mem_op);
+    __ Fcvt(result, result.S());
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ Ldr(result, mem_op);
+  } else {
+    Register result = ToRegister(instr->result());
+
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ Ldrsb(result, mem_op);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ Ldrb(result, mem_op);
+        break;
+      case INT16_ELEMENTS:
+        __ Ldrsh(result, mem_op);
+        break;
+      case UINT16_ELEMENTS:
+        __ Ldrh(result, mem_op);
+        break;
+      case INT32_ELEMENTS:
+        __ Ldrsw(result, mem_op);
+        break;
+      case UINT32_ELEMENTS:
+        __ Ldr(result.W(), mem_op);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          // Deopt if value > 0x80000000.
+          __ Tst(result, 0xFFFFFFFF80000000);
+          DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
+                                              Register elements,
+                                              Register key,
+                                              bool key_is_tagged,
+                                              ElementsKind elements_kind,
+                                              Representation representation,
+                                              int base_offset) {
+  STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+  STATIC_ASSERT(kSmiTag == 0);
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+
+  // Even though the HLoad/StoreKeyed instructions force the input
+  // representation for the key to be an integer, the input gets replaced during
+  // bounds check elimination with the index argument to the bounds check, which
+  // can be tagged, so that case must be handled here, too.
+  if (key_is_tagged) {
+    __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
+    if (representation.IsInteger32()) {
+      DCHECK(elements_kind == FAST_SMI_ELEMENTS);
+      // Read or write only the smi payload in the case of fast smi arrays.
+      return UntagSmiMemOperand(base, base_offset);
+    } else {
+      return MemOperand(base, base_offset);
+    }
+  } else {
+    // Sign extend key because it could be a 32-bit negative value or contain
+    // garbage in the top 32-bits. The address computation happens in 64-bit.
+    DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
+    if (representation.IsInteger32()) {
+      DCHECK(elements_kind == FAST_SMI_ELEMENTS);
+      // Read or write only the smi payload in the case of fast smi arrays.
+      __ Add(base, elements, Operand(key, SXTW, element_size_shift));
+      return UntagSmiMemOperand(base, base_offset);
+    } else {
+      __ Add(base, elements, base_offset);
+      return MemOperand(base, key, SXTW, element_size_shift);
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
+  Register elements = ToRegister(instr->elements());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  MemOperand mem_op;
+
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
+           (instr->temp() == NULL));
+
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xf0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    int offset = instr->base_offset() + constant_key * kDoubleSize;
+    mem_op = MemOperand(elements, offset);
+  } else {
+    Register load_base = ToRegister(instr->temp());
+    Register key = ToRegister(instr->key());
+    bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+    mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
+                                      instr->hydrogen()->elements_kind(),
+                                      instr->hydrogen()->representation(),
+                                      instr->base_offset());
+  }
+
+  __ Ldr(result, mem_op);
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    Register scratch = ToRegister(instr->temp());
+    __ Fmov(scratch, result);
+    __ Eor(scratch, scratch, kHoleNanInt64);
+    DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  MemOperand mem_op;
+
+  Representation representation = instr->hydrogen()->representation();
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(instr->temp() == NULL);
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset = instr->base_offset() +
+        ToInteger32(const_operand) * kPointerSize;
+    if (representation.IsInteger32()) {
+      DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+      STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+      STATIC_ASSERT(kSmiTag == 0);
+      mem_op = UntagSmiMemOperand(elements, offset);
+    } else {
+      mem_op = MemOperand(elements, offset);
+    }
+  } else {
+    Register load_base = ToRegister(instr->temp());
+    Register key = ToRegister(instr->key());
+    bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+
+    mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
+                                      instr->hydrogen()->elements_kind(),
+                                      representation, instr->base_offset());
+  }
+
+  __ Load(result, mem_op, representation);
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+      DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
+    } else {
+      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
+                       Deoptimizer::kHole);
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    __ B(ne, &done);
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+      __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+      DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+    }
+    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+    __ Bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  DCHECK(ToRegister(instr->result()).Is(x0));
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    __ Load(result, MemOperand(object, offset), access.representation());
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DCHECK(access.IsInobject());
+    FPRegister result = ToDoubleRegister(instr->result());
+    __ Ldr(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  Register source;
+  if (access.IsInobject()) {
+    source = object;
+  } else {
+    // Load the properties array, using result as a scratch register.
+    __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    source = result;
+  }
+
+  if (access.representation().IsSmi() &&
+      instr->hydrogen()->representation().IsInteger32()) {
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+    STATIC_ASSERT(kSmiTag == 0);
+    __ Load(result, UntagSmiFieldMemOperand(source, offset),
+            Representation::Integer32());
+  } else {
+    __ Load(result, FieldMemOperand(source, offset), access.representation());
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  // LoadIC expects name and receiver in registers.
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  DCHECK(ToRegister(instr->result()).is(x0));
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLengthSmi(result, map);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    DoubleRegister input = ToDoubleRegister(instr->value());
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ Fabs(result, input);
+  } else if (r.IsSmi() || r.IsInteger32()) {
+    Register input = r.IsSmi() ? ToRegister(instr->value())
+                               : ToRegister32(instr->value());
+    Register result = r.IsSmi() ? ToRegister(instr->result())
+                                : ToRegister32(instr->result());
+    __ Abs(result, input);
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+                                       Label* exit,
+                                       Label* allocation_entry) {
+  // Handle the tricky cases of MathAbsTagged:
+  //  - HeapNumber inputs.
+  //    - Negative inputs produce a positive result, so a new HeapNumber is
+  //      allocated to hold it.
+  //    - Positive inputs are returned as-is, since there is no need to allocate
+  //      a new HeapNumber for the result.
+  //  - The (smi) input -0x80000000, produces +0x80000000, which does not fit
+  //    a smi. In this case, the inline code sets the result and jumps directly
+  //    to the allocation_entry label.
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+  Register result_bits = ToRegister(instr->temp3());
+  Register result = ToRegister(instr->result());
+
+  Label runtime_allocation;
+
+  // Deoptimize if the input is not a HeapNumber.
+  DeoptimizeIfNotHeapNumber(input, instr);
+
+  // If the argument is positive, we can return it as-is, without any need to
+  // allocate a new HeapNumber for the result. We have to do this in integer
+  // registers (rather than with fabs) because we need to be able to distinguish
+  // the two zeroes.
+  __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
+  __ Mov(result, input);
+  __ Tbz(result_bits, kXSignBit, exit);
+
+  // Calculate abs(input) by clearing the sign bit.
+  __ Bic(result_bits, result_bits, kXSignMask);
+
+  // Allocate a new HeapNumber to hold the result.
+  //  result_bits   The bit representation of the (double) result.
+  __ Bind(allocation_entry);
+  __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
+  // The inline (non-deferred) code will store result_bits into result.
+  __ B(exit);
+
+  __ Bind(&runtime_allocation);
+  if (FLAG_debug_code) {
+    // Because result is in the pointer map, we need to make sure it has a valid
+    // tagged value before we call the runtime. We speculatively set it to the
+    // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
+    // be valid.
+    Label result_ok;
+    Register input = ToRegister(instr->value());
+    __ JumpIfSmi(result, &result_ok);
+    __ Cmp(input, result);
+    __ Assert(eq, kUnexpectedValue);
+    __ Bind(&result_ok);
+  }
+
+  { PushSafepointRegistersScope scope(this);
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    __ StoreToSafepointRegisterSlot(x0, result);
+  }
+  // The inline (non-deferred) code will store result_bits into result.
+}
+
+
+void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTagged: public LDeferredCode {
+   public:
+    DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredMathAbsTagged(instr_, exit(),
+                                         allocation_entry());
+    }
+    virtual LInstruction* instr() { return instr_; }
+    Label* allocation_entry() { return &allocation; }
+   private:
+    LMathAbsTagged* instr_;
+    Label allocation;
+  };
+
+  // TODO(jbramley): The early-exit mechanism would skip the new frame handling
+  // in GenerateDeferredCode. Tidy this up.
+  DCHECK(!NeedsDeferredFrame());
+
+  DeferredMathAbsTagged* deferred =
+      new(zone()) DeferredMathAbsTagged(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
+         instr->hydrogen()->value()->representation().IsSmi());
+  Register input = ToRegister(instr->value());
+  Register result_bits = ToRegister(instr->temp3());
+  Register result = ToRegister(instr->result());
+  Label done;
+
+  // Handle smis inline.
+  // We can treat smis as 64-bit integers, since the (low-order) tag bits will
+  // never get set by the negation. This is therefore the same as the Integer32
+  // case in DoMathAbs, except that it operates on 64-bit values.
+  STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+
+  __ JumpIfNotSmi(input, deferred->entry());
+
+  __ Abs(result, input, NULL, &done);
+
+  // The result is the magnitude (abs) of the smallest value a smi can
+  // represent, encoded as a double.
+  __ Mov(result_bits, double_to_rawbits(0x80000000));
+  __ B(deferred->allocation_entry());
+
+  __ Bind(deferred->exit());
+  __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
+
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
+  DoubleRegister double_temp2 = double_scratch();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+  Register temp3 = ToRegister(instr->temp3());
+
+  MathExpGenerator::EmitMathExp(masm(), input, result,
+                                double_temp1, double_temp2,
+                                temp1, temp2, temp3);
+}
+
+
+void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  __ Frintm(result, input);
+}
+
+
+void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ Fcvtms(result, input);
+
+  // Check that the result fits into a 32-bit integer.
+  //  - The result did not overflow.
+  __ Cmp(result, Operand(result, SXTW));
+  //  - The input was not NaN.
+  __ Fccmp(input, input, NoFlag, eq);
+  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  Register result = ToRegister32(instr->result());
+  int32_t divisor = instr->divisor();
+
+  // If the divisor is 1, return the dividend.
+  if (divisor == 1) {
+    __ Mov(result, dividend, kDiscardForSameWReg);
+    return;
+  }
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ Mov(result, Operand(dividend, ASR, shift));
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  __ Negs(result, dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ Mov(result, Operand(dividend, ASR, shift));
+    return;
+  }
+
+  __ Asr(result, result, shift);
+  __ Csel(result, result, kMinInt / divisor, vc);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister32(instr->result());
+  DCHECK(!AreAliased(dividend, result));
+
+  if (divisor == 0) {
+    Deoptimize(instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ Neg(result, result);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister32(instr->temp());
+  DCHECK(!AreAliased(temp, dividend, result));
+  Label needs_adjustment, done;
+  __ Cmp(dividend, 0);
+  __ B(divisor > 0 ? lt : gt, &needs_adjustment);
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Neg(result, result);
+  __ B(&done);
+  __ Bind(&needs_adjustment);
+  __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ Neg(result, result);
+  __ Sub(result, result, Operand(1));
+  __ Bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  Register divisor = ToRegister32(instr->divisor());
+  Register remainder = ToRegister32(instr->temp());
+  Register result = ToRegister32(instr->result());
+
+  // This can't cause an exception on ARM, so we can speculatively
+  // execute it already now.
+  __ Sdiv(result, dividend, divisor);
+
+  // Check for x / 0.
+  DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+
+  // Check for (kMinInt / -1).
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    // The V flag will be set iff dividend == kMinInt.
+    __ Cmp(dividend, 1);
+    __ Ccmp(divisor, -1, NoFlag, vs);
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Cmp(divisor, 0);
+    __ Ccmp(dividend, 0, ZFlag, mi);
+    // "divisor" can't be null because the code would have already been
+    // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
+    // In this case we need to deoptimize to produce a -0.
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  Label done;
+  // If both operands have the same sign then we are done.
+  __ Eor(remainder, dividend, divisor);
+  __ Tbz(remainder, kWSignBit, &done);
+
+  // Check if the result needs to be corrected.
+  __ Msub(remainder, result, divisor, dividend);
+  __ Cbz(remainder, &done);
+  __ Sub(result, result, 1);
+
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToDoubleRegister(instr->value()).is(d0));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+                   0, 1);
+  DCHECK(ToDoubleRegister(instr->result()).Is(d0));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister32(instr->value());
+  Register result = ToRegister32(instr->result());
+  __ Clz(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Label done;
+
+  // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
+  //  Math.pow(-Infinity, 0.5) == +Infinity
+  //  Math.pow(-0.0, 0.5) == +0.0
+
+  // Catch -infinity inputs first.
+  // TODO(jbramley): A constant infinity register would be helpful here.
+  __ Fmov(double_scratch(), kFP64NegativeInfinity);
+  __ Fcmp(double_scratch(), input);
+  __ Fabs(result, input);
+  __ B(&done, eq);
+
+  // Add +0.0 to convert -0.0 to +0.0.
+  __ Fadd(double_scratch(), input, fp_zero);
+  __ Fsqrt(result, double_scratch());
+
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  Register integer_exponent = MathPowIntegerDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(d1));
+  DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(!exponent_type.IsInteger32() ||
+         ToRegister(instr->right()).is(integer_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(d0));
+  DCHECK(ToDoubleRegister(instr->result()).is(d0));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
+    __ Bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
+    // supports large integer exponents.
+    __ Sxtw(integer_exponent, integer_exponent);
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister scratch_d = double_scratch();
+
+  DCHECK(!AreAliased(input, result, scratch_d));
+
+  Label done;
+
+  __ Frinta(result, input);
+  __ Fcmp(input, 0.0);
+  __ Fccmp(result, input, ZFlag, lt);
+  // The result is correct if the input was in [-0, +infinity], or was a
+  // negative integral value.
+  __ B(eq, &done);
+
+  // Here the input is negative, non integral, with an exponent lower than 52.
+  // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
+  // case. So we can safely add 0.5.
+  __ Fmov(scratch_d, 0.5);
+  __ Fadd(result, input, scratch_d);
+  __ Frintm(result, result);
+  // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
+  __ Fabs(result, result);
+  __ Fneg(result, result);
+
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister temp = ToDoubleRegister(instr->temp1());
+  DoubleRegister dot_five = double_scratch();
+  Register result = ToRegister(instr->result());
+  Label done;
+
+  // Math.round() rounds to the nearest integer, with ties going towards
+  // +infinity. This does not match any IEEE-754 rounding mode.
+  //  - Infinities and NaNs are propagated unchanged, but cause deopts because
+  //    they can't be represented as integers.
+  //  - The sign of the result is the same as the sign of the input. This means
+  //    that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
+  //    result of -0.0.
+
+  // Add 0.5 and round towards -infinity.
+  __ Fmov(dot_five, 0.5);
+  __ Fadd(temp, input, dot_five);
+  __ Fcvtms(result, temp);
+
+  // The result is correct if:
+  //  result is not 0, as the input could be NaN or [-0.5, -0.0].
+  //  result is not 1, as 0.499...94 will wrongly map to 1.
+  //  result fits in 32 bits.
+  __ Cmp(result, Operand(result.W(), SXTW));
+  __ Ccmp(result, 1, ZFlag, eq);
+  __ B(hi, &done);
+
+  // At this point, we have to handle possible inputs of NaN or numbers in the
+  // range [-0.5, 1.5[, or numbers larger than 32 bits.
+
+  // Deoptimize if the result > 1, as it must be larger than 32 bits.
+  __ Cmp(result, 1);
+  DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
+
+  // Deoptimize for negative inputs, which at this point are only numbers in
+  // the range [-0.5, -0.0]
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Fmov(result, input);
+    DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Deoptimize if the input was NaN.
+  __ Fcmp(input, dot_five);
+  DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
+
+  // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
+  // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
+  // else 0; we avoid dealing with 0.499...94 directly.
+  __ Cset(result, ge);
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ Fcvt(result.S(), input);
+  __ Fcvt(result, result.S());
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ Fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  HMathMinMax::Operation op = instr->hydrogen()->operation();
+  if (instr->hydrogen()->representation().IsInteger32()) {
+    Register result = ToRegister32(instr->result());
+    Register left = ToRegister32(instr->left());
+    Operand right = ToOperand32(instr->right());
+
+    __ Cmp(left, right);
+    __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+  } else if (instr->hydrogen()->representation().IsSmi()) {
+    Register result = ToRegister(instr->result());
+    Register left = ToRegister(instr->left());
+    Operand right = ToOperand(instr->right());
+
+    __ Cmp(left, right);
+    __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    DoubleRegister left = ToDoubleRegister(instr->left());
+    DoubleRegister right = ToDoubleRegister(instr->right());
+
+    if (op == HMathMinMax::kMathMax) {
+      __ Fmax(result, left, right);
+    } else {
+      DCHECK(op == HMathMinMax::kMathMin);
+      __ Fmin(result, left, right);
+    }
+  }
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister32(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
+    // Note that this is correct even for kMinInt operands.
+    __ Neg(dividend, dividend);
+    __ And(dividend, dividend, mask);
+    __ Negs(dividend, dividend);
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+    __ B(&done);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ And(dividend, dividend, mask);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister32(instr->result());
+  Register temp = ToRegister32(instr->temp());
+  DCHECK(!AreAliased(dividend, result, temp));
+
+  if (divisor == 0) {
+    Deoptimize(instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ Sxtw(dividend.X(), dividend);
+  __ Mov(temp, Abs(divisor));
+  __ Smsubl(result.X(), result, temp, dividend.X());
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ Cbnz(result, &remainder_not_zero);
+    DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  Register dividend = ToRegister32(instr->left());
+  Register divisor = ToRegister32(instr->right());
+  Register result = ToRegister32(instr->result());
+
+  Label done;
+  // modulo = dividend - quotient * divisor
+  __ Sdiv(result, dividend, divisor);
+  if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
+  }
+  __ Msub(result, result, divisor, dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Cbnz(result, &done);
+    DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
+  }
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
+  DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
+  bool is_smi = instr->hydrogen()->representation().IsSmi();
+  Register result =
+      is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
+  Register left =
+      is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left());
+  int32_t right = ToInteger32(instr->right());
+  DCHECK((right > -kMaxInt) && (right < kMaxInt));
+
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+  if (bailout_on_minus_zero) {
+    if (right < 0) {
+      // The result is -0 if right is negative and left is zero.
+      DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
+    } else if (right == 0) {
+      // The result is -0 if the right is zero and the left is negative.
+      DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
+    }
+  }
+
+  switch (right) {
+    // Cases which can detect overflow.
+    case -1:
+      if (can_overflow) {
+        // Only 0x80000000 can overflow here.
+        __ Negs(result, left);
+        DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+      } else {
+        __ Neg(result, left);
+      }
+      break;
+    case 0:
+      // This case can never overflow.
+      __ Mov(result, 0);
+      break;
+    case 1:
+      // This case can never overflow.
+      __ Mov(result, left, kDiscardForSameWReg);
+      break;
+    case 2:
+      if (can_overflow) {
+        __ Adds(result, left, left);
+        DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+      } else {
+        __ Add(result, left, left);
+      }
+      break;
+
+    default:
+      // Multiplication by constant powers of two (and some related values)
+      // can be done efficiently with shifted operands.
+      int32_t right_abs = Abs(right);
+
+      if (base::bits::IsPowerOfTwo32(right_abs)) {
+        int right_log2 = WhichPowerOf2(right_abs);
+
+        if (can_overflow) {
+          Register scratch = result;
+          DCHECK(!AreAliased(scratch, left));
+          __ Cls(scratch, left);
+          __ Cmp(scratch, right_log2);
+          DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
+        }
+
+        if (right >= 0) {
+          // result = left << log2(right)
+          __ Lsl(result, left, right_log2);
+        } else {
+          // result = -left << log2(-right)
+          if (can_overflow) {
+            __ Negs(result, Operand(left, LSL, right_log2));
+            DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+          } else {
+            __ Neg(result, Operand(left, LSL, right_log2));
+          }
+        }
+        return;
+      }
+
+
+      // For the following cases, we could perform a conservative overflow check
+      // with CLS as above. However the few cycles saved are likely not worth
+      // the risk of deoptimizing more often than required.
+      DCHECK(!can_overflow);
+
+      if (right >= 0) {
+        if (base::bits::IsPowerOfTwo32(right - 1)) {
+          // result = left + left << log2(right - 1)
+          __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
+        } else if (base::bits::IsPowerOfTwo32(right + 1)) {
+          // result = -left + left << log2(right + 1)
+          __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
+          __ Neg(result, result);
+        } else {
+          UNREACHABLE();
+        }
+      } else {
+        if (base::bits::IsPowerOfTwo32(-right + 1)) {
+          // result = left - left << log2(-right + 1)
+          __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
+        } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
+          // result = -left - left << log2(-right - 1)
+          __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
+          __ Neg(result, result);
+        } else {
+          UNREACHABLE();
+        }
+      }
+  }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register result = ToRegister32(instr->result());
+  Register left = ToRegister32(instr->left());
+  Register right = ToRegister32(instr->right());
+
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+  if (bailout_on_minus_zero && !left.Is(right)) {
+    // If one operand is zero and the other is negative, the result is -0.
+    //  - Set Z (eq) if either left or right, or both, are 0.
+    __ Cmp(left, 0);
+    __ Ccmp(right, 0, ZFlag, ne);
+    //  - If so (eq), set N (mi) if left + right is negative.
+    //  - Otherwise, clear N.
+    __ Ccmn(left, right, NoFlag, eq);
+    DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+  }
+
+  if (can_overflow) {
+    __ Smull(result.X(), left, right);
+    __ Cmp(result.X(), Operand(result, SXTW));
+    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+  } else {
+    __ Mul(result, left, right);
+  }
+}
+
+
+void LCodeGen::DoMulS(LMulS* instr) {
+  Register result = ToRegister(instr->result());
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+
+  if (bailout_on_minus_zero && !left.Is(right)) {
+    // If one operand is zero and the other is negative, the result is -0.
+    //  - Set Z (eq) if either left or right, or both, are 0.
+    __ Cmp(left, 0);
+    __ Ccmp(right, 0, ZFlag, ne);
+    //  - If so (eq), set N (mi) if left + right is negative.
+    //  - Otherwise, clear N.
+    __ Ccmn(left, right, NoFlag, eq);
+    DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+  }
+
+  STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+  if (can_overflow) {
+    __ Smulh(result, left, right);
+    __ Cmp(result, Operand(result.W(), SXTW));
+    __ SmiTag(result);
+    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+  } else {
+    if (AreAliased(result, left, right)) {
+      // All three registers are the same: half untag the input and then
+      // multiply, giving a tagged result.
+      STATIC_ASSERT((kSmiShift % 2) == 0);
+      __ Asr(result, left, kSmiShift / 2);
+      __ Mul(result, result, result);
+    } else if (result.Is(left) && !left.Is(right)) {
+      // Registers result and left alias, right is distinct: untag left into
+      // result, and then multiply by right, giving a tagged result.
+      __ SmiUntag(result, left);
+      __ Mul(result, result, right);
+    } else {
+      DCHECK(!left.Is(result));
+      // Registers result and right alias, left is distinct, or all registers
+      // are distinct: untag right into result, and then multiply by left,
+      // giving a tagged result.
+      __ SmiUntag(result, right);
+      __ Mul(result, left, result);
+    }
+  }
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = ToRegister(instr->result());
+  __ Mov(result, 0);
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagU and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD: public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
+  } else {
+    __ B(deferred->entry());
+  }
+
+  __ Bind(deferred->exit());
+  __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
+                                    LOperand* value,
+                                    LOperand* temp1,
+                                    LOperand* temp2) {
+  Label slow, convert_and_store;
+  Register src = ToRegister32(value);
+  Register dst = ToRegister(instr->result());
+  Register scratch1 = ToRegister(temp1);
+
+  if (FLAG_inline_new) {
+    Register scratch2 = ToRegister(temp2);
+    __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
+    __ B(&convert_and_store);
+  }
+
+  // Slow case: call the runtime system to do the number allocation.
+  __ Bind(&slow);
+  // TODO(3095996): Put a valid pointer value in the stack slot where the result
+  // register is stored, as this register is in the pointer map, but contains an
+  // integer value.
+  __ Mov(dst, 0);
+  {
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagU and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(x0, dst);
+  }
+
+  // Convert number to floating point and store in the newly allocated heap
+  // number.
+  __ Bind(&convert_and_store);
+  DoubleRegister dbl_scratch = double_scratch();
+  __ Ucvtf(dbl_scratch, src);
+  __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU: public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredNumberTagU(instr_,
+                                      instr_->value(),
+                                      instr_->temp1(),
+                                      instr_->temp2());
+    }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register value = ToRegister32(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+  __ Cmp(value, Smi::kMaxValue);
+  __ B(hi, deferred->entry());
+  __ SmiTag(result, value.X());
+  __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+
+  Label done, load_smi;
+
+  // Work out what untag mode we're working with.
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    __ JumpIfSmi(input, &load_smi);
+
+    Label convert_undefined;
+
+    // Heap number map check.
+    if (can_convert_undefined_to_nan) {
+      __ JumpIfNotHeapNumber(input, &convert_undefined);
+    } else {
+      DeoptimizeIfNotHeapNumber(input, instr);
+    }
+
+    // Load heap number.
+    __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
+    if (instr->hydrogen()->deoptimize_on_minus_zero()) {
+      DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
+    }
+    __ B(&done);
+
+    if (can_convert_undefined_to_nan) {
+      __ Bind(&convert_undefined);
+      DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+                          Deoptimizer::kNotAHeapNumberUndefined);
+
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ B(&done);
+    }
+
+  } else {
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+    // Fall through to load_smi.
+  }
+
+  // Smi to double register conversion.
+  __ Bind(&load_smi);
+  __ SmiUntagToDouble(result, input);
+
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
+  __ PushPreamble(instr->argc(), kPointerSize);
+}
+
+
+void LCodeGen::DoPushArguments(LPushArguments* instr) {
+  MacroAssembler::PushPopQueue args(masm());
+
+  for (int i = 0; i < instr->ArgumentCount(); ++i) {
+    LOperand* arg = instr->argument(i);
+    if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
+      Abort(kDoPushArgumentNotImplementedForDoubleType);
+      return;
+    }
+    args.Queue(ToRegister(arg));
+  }
+
+  // The preamble was done by LPreparePushArguments.
+  args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
+
+  RecordPushedArgumentsDelta(instr->ArgumentCount());
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in x0.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ Push(x0);
+    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+
+  if (NeedsEagerFrame()) {
+    Register stack_pointer = masm()->StackPointer();
+    __ Mov(stack_pointer, fp);
+    __ Pop(fp, lr);
+  }
+
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    __ Drop(parameter_count + 1);
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register parameter_count = ToRegister(instr->parameter_count());
+    __ DropBySMI(parameter_count);
+  }
+  __ Ret();
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+                                           Register temp,
+                                           LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+
+  __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    return MemOperand(temp, ToRegister32(index), SXTW);
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    return MemOperand(temp, ToRegister32(index), SXTW, 1);
+  }
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  if (FLAG_debug_code) {
+    // Even though this lithium instruction comes with a temp register, we
+    // can't use it here because we want to use "AtStart" constraints on the
+    // inputs and the debug code here needs a scratch register.
+    UseScratchRegisterScope temps(masm());
+    Register dbg_temp = temps.AcquireX();
+
+    __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
+
+    __ And(dbg_temp, dbg_temp,
+           Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
+                             ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType);
+  }
+
+  MemOperand operand =
+      BuildSeqStringOperand(string, temp, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ Ldrb(result, operand);
+  } else {
+    __ Ldrh(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (FLAG_debug_code) {
+    DCHECK(ToRegister(instr->context()).is(cp));
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
+                                 encoding_mask);
+  }
+  MemOperand operand =
+      BuildSeqStringOperand(string, temp, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ Strb(value, operand);
+  } else {
+    __ Strh(value, operand);
+  }
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
+  }
+  __ SmiTag(output, input);
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done, untag;
+
+  if (instr->needs_check()) {
+    DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
+  }
+
+  __ Bind(&untag);
+  __ SmiUntag(result, input);
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* right_op = instr->right();
+  Register left = ToRegister32(instr->left());
+  Register result = ToRegister32(instr->result());
+
+  if (right_op->IsRegister()) {
+    Register right = ToRegister32(instr->right());
+    switch (instr->op()) {
+      case Token::ROR: __ Ror(result, left, right); break;
+      case Token::SAR: __ Asr(result, left, right); break;
+      case Token::SHL: __ Lsl(result, left, right); break;
+      case Token::SHR:
+        __ Lsr(result, left, right);
+        if (instr->can_deopt()) {
+          // If `left >>> right` >= 0x80000000, the result is not representable
+          // in a signed 32-bit smi.
+          DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      default: UNREACHABLE();
+    }
+  } else {
+    DCHECK(right_op->IsConstantOperand());
+    int shift_count = JSShiftAmountFromLConstant(right_op);
+    if (shift_count == 0) {
+      if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+        DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+      }
+      __ Mov(result, left, kDiscardForSameWReg);
+    } else {
+      switch (instr->op()) {
+        case Token::ROR: __ Ror(result, left, shift_count); break;
+        case Token::SAR: __ Asr(result, left, shift_count); break;
+        case Token::SHL: __ Lsl(result, left, shift_count); break;
+        case Token::SHR: __ Lsr(result, left, shift_count); break;
+        default: UNREACHABLE();
+      }
+    }
+  }
+}
+
+
+void LCodeGen::DoShiftS(LShiftS* instr) {
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+
+  if (right_op->IsRegister()) {
+    Register right = ToRegister(instr->right());
+
+    // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
+    // Since we're handling smis in X registers, we have to extract these bits
+    // explicitly.
+    __ Ubfx(result, right, kSmiShift, 5);
+
+    switch (instr->op()) {
+      case Token::ROR: {
+        // This is the only case that needs a scratch register. To keep things
+        // simple for the other cases, borrow a MacroAssembler scratch register.
+        UseScratchRegisterScope temps(masm());
+        Register temp = temps.AcquireW();
+        __ SmiUntag(temp, left);
+        __ Ror(result.W(), temp.W(), result.W());
+        __ SmiTag(result);
+        break;
+      }
+      case Token::SAR:
+        __ Asr(result, left, result);
+        __ Bic(result, result, kSmiShiftMask);
+        break;
+      case Token::SHL:
+        __ Lsl(result, left, result);
+        break;
+      case Token::SHR:
+        __ Lsr(result, left, result);
+        __ Bic(result, result, kSmiShiftMask);
+        if (instr->can_deopt()) {
+          // If `left >>> right` >= 0x80000000, the result is not representable
+          // in a signed 32-bit smi.
+          DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      default: UNREACHABLE();
+    }
+  } else {
+    DCHECK(right_op->IsConstantOperand());
+    int shift_count = JSShiftAmountFromLConstant(right_op);
+    if (shift_count == 0) {
+      if ((instr->op() == Token::SHR) && instr->can_deopt()) {
+        DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
+      }
+      __ Mov(result, left);
+    } else {
+      switch (instr->op()) {
+        case Token::ROR:
+          __ SmiUntag(result, left);
+          __ Ror(result.W(), result.W(), shift_count);
+          __ SmiTag(result);
+          break;
+        case Token::SAR:
+          __ Asr(result, left, shift_count);
+          __ Bic(result, result, kSmiShiftMask);
+          break;
+        case Token::SHL:
+          __ Lsl(result, left, shift_count);
+          break;
+        case Token::SHR:
+          __ Lsr(result, left, shift_count);
+          __ Bic(result, result, kSmiShiftMask);
+          break;
+        default: UNREACHABLE();
+      }
+    }
+  }
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ Debug("LDebugBreak", 0, BREAK);
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register scratch1 = x5;
+  Register scratch2 = x6;
+  DCHECK(instr->IsMarkedAsCall());
+
+  // TODO(all): if Mov could handle object in new space then it could be used
+  // here.
+  __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
+  __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
+  __ Push(scratch1, scratch2);
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck: public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+    __ B(hs, &done);
+
+    PredictableCodeSizeScope predictable(masm_,
+                                         Assembler::kCallSizeWithRelocation);
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(isolate()->builtins()->StackCheck(),
+             RelocInfo::CODE_TARGET,
+             instr);
+    __ Bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr);
+    __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
+    __ B(lo, deferred_stack_check->entry());
+
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ Bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  Register temp = ToRegister(instr->temp());
+  __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
+  __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
+  MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ Ldr(scratch, target);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
+                       Deoptimizer::kHole);
+    } else {
+      __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
+    }
+  }
+
+  __ Str(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
+                              scratch, GetLinkRegisterState(), kSaveFPRegs,
+                              EMIT_REMEMBERED_SET, check_needed);
+  }
+  __ Bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
+  Register ext_ptr = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch;
+  ElementsKind elements_kind = instr->elements_kind();
+
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    DCHECK(instr->temp() == NULL);
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xf0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+    scratch = ToRegister(instr->temp());
+  }
+
+  MemOperand dst =
+    PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
+                                     key_is_constant, constant_key,
+                                     elements_kind,
+                                     instr->base_offset());
+
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    DoubleRegister dbl_scratch = double_scratch();
+    __ Fcvt(dbl_scratch.S(), value);
+    __ Str(dbl_scratch.S(), dst);
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ Str(value, dst);
+  } else {
+    Register value = ToRegister(instr->value());
+
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        __ Strb(value, dst);
+        break;
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        __ Strh(value, dst);
+        break;
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        __ Str(value.W(), dst);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
+  Register elements = ToRegister(instr->elements());
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  MemOperand mem_op;
+
+  if (instr->key()->IsConstantOperand()) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xf0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    int offset = instr->base_offset() + constant_key * kDoubleSize;
+    mem_op = MemOperand(elements, offset);
+  } else {
+    Register store_base = ToRegister(instr->temp());
+    Register key = ToRegister(instr->key());
+    bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+    mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
+                                      instr->hydrogen()->elements_kind(),
+                                      instr->hydrogen()->representation(),
+                                      instr->base_offset());
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    __ CanonicalizeNaN(double_scratch(), value);
+    __ Str(double_scratch(), mem_op);
+  } else {
+    __ Str(value, mem_op);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = no_reg;
+  Register store_base = no_reg;
+  Register key = no_reg;
+  MemOperand mem_op;
+
+  if (!instr->key()->IsConstantOperand() ||
+      instr->hydrogen()->NeedsWriteBarrier()) {
+    scratch = ToRegister(instr->temp());
+  }
+
+  Representation representation = instr->hydrogen()->value()->representation();
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    int offset = instr->base_offset() +
+        ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+    if (representation.IsInteger32()) {
+      DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+      DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+      STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+      STATIC_ASSERT(kSmiTag == 0);
+      mem_op = UntagSmiMemOperand(store_base, offset);
+    } else {
+      mem_op = MemOperand(store_base, offset);
+    }
+  } else {
+    store_base = scratch;
+    key = ToRegister(instr->key());
+    bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
+
+    mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
+                                      instr->hydrogen()->elements_kind(),
+                                      representation, instr->base_offset());
+  }
+
+  __ Store(value, mem_op, representation);
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    DCHECK(representation.IsTagged());
+    // This assignment may cause element_addr to alias store_base.
+    Register element_addr = scratch;
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
+    __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
+                   kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
+                   instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = x0;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ B(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ Cmp(ToRegister(current_capacity), Operand(constant_key));
+    __ B(le, deferred->entry());
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ Cmp(ToRegister(key), Operand(constant_capacity));
+    __ B(ge, deferred->entry());
+  } else {
+    __ Cmp(ToRegister(key), ToRegister(current_capacity));
+    __ B(ge, deferred->entry());
+  }
+
+  __ Mov(result, ToRegister(instr->elements()));
+
+  __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = x0;
+  __ Mov(result, 0);
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    __ Move(result, ToRegister(instr->object()));
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
+    } else {
+      __ Mov(x3, ToRegister(key));
+      __ SmiTag(x3);
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    Register value = ToRegister(instr->value());
+    __ Store(value, MemOperand(object, offset), representation);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    FPRegister value = ToDoubleRegister(instr->value());
+    __ Str(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsInteger32Constant(LConstantOperand::cast(instr->value())));
+
+  if (instr->hydrogen()->has_transition()) {
+    Handle<Map> transition = instr->hydrogen()->transition_map();
+    AddDeprecationDependency(transition);
+    // Store the new map value.
+    Register new_map_value = ToRegister(instr->temp0());
+    __ Mov(new_map_value, Operand(transition));
+    __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
+    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object,
+                           new_map_value,
+                           ToRegister(instr->temp1()),
+                           GetLinkRegisterState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register destination;
+  if (access.IsInobject()) {
+    destination = object;
+  } else {
+    Register temp0 = ToRegister(instr->temp0());
+    __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    destination = temp0;
+  }
+
+  if (FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    FPRegister value = ToDoubleRegister(instr->value());
+    __ Str(value, FieldMemOperand(object, offset));
+  } else if (representation.IsSmi() &&
+             instr->hydrogen()->value()->representation().IsInteger32()) {
+    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+#ifdef DEBUG
+    Register temp0 = ToRegister(instr->temp0());
+    __ Ldr(temp0, FieldMemOperand(destination, offset));
+    __ AssertSmi(temp0);
+    // If destination aliased temp0, restore it to the address calculated
+    // earlier.
+    if (destination.Is(temp0)) {
+      DCHECK(!access.IsInobject());
+      __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    }
+#endif
+    STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+    STATIC_ASSERT(kSmiTag == 0);
+    Register value = ToRegister(instr->value());
+    __ Store(value, UntagSmiFieldMemOperand(destination, offset),
+             Representation::Integer32());
+  } else {
+    Register value = ToRegister(instr->value());
+    __ Store(value, FieldMemOperand(destination, offset), representation);
+  }
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    Register value = ToRegister(instr->value());
+    __ RecordWriteField(destination,
+                        offset,
+                        value,                        // Clobbered.
+                        ToRegister(instr->temp1()),   // Clobbered.
+                        GetLinkRegisterState(),
+                        kSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        instr->hydrogen()->SmiCheckForWriteBarrier(),
+                        instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).Is(x1));
+  DCHECK(ToRegister(instr->right()).Is(x0));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt: public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr);
+
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister32(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Mov(result, 0);
+
+  PushSafepointRegistersScope scope(this);
+  __ Push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  Register index = ToRegister(instr->index());
+  __ SmiTagAndPush(index);
+
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(x0);
+  __ SmiUntag(x0);
+  __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode: public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister32(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  __ Cmp(char_code, String::kMaxOneByteCharCode);
+  __ B(hi, deferred->entry());
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
+  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+  __ B(eq, deferred->entry());
+  __ Bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Mov(result, 0);
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTagAndPush(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(x1));
+  DCHECK(ToRegister(instr->right()).is(x0));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+
+  EmitCompareAndBranch(instr, TokenToCondition(instr->op(), false), x0, 0);
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  Register result = ToRegister32(instr->result());
+  Register left = ToRegister32(instr->left());
+  Operand right = ToShiftedRightOperand32(instr->right(), instr);
+
+  if (can_overflow) {
+    __ Subs(result, left, right);
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  } else {
+    __ Sub(result, left, right);
+  }
+}
+
+
+void LCodeGen::DoSubS(LSubS* instr) {
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  Register result = ToRegister(instr->result());
+  Register left = ToRegister(instr->left());
+  Operand right = ToOperand(instr->right());
+  if (can_overflow) {
+    __ Subs(result, left, right);
+    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+  } else {
+    __ Sub(result, left, right);
+  }
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
+                                   LOperand* value,
+                                   LOperand* temp1,
+                                   LOperand* temp2) {
+  Register input = ToRegister(value);
+  Register scratch1 = ToRegister(temp1);
+  DoubleRegister dbl_scratch1 = double_scratch();
+
+  Label done;
+
+  if (instr->truncating()) {
+    Register output = ToRegister(instr->result());
+    Label check_bools;
+
+    // If it's not a heap number, jump to undefined check.
+    __ JumpIfNotHeapNumber(input, &check_bools);
+
+    // A heap number: load value and convert to int32 using truncating function.
+    __ TruncateHeapNumberToI(output, input);
+    __ B(&done);
+
+    __ Bind(&check_bools);
+
+    Register true_root = output;
+    Register false_root = scratch1;
+    __ LoadTrueFalseRoots(true_root, false_root);
+    __ Cmp(input, true_root);
+    __ Cset(output, eq);
+    __ Ccmp(input, false_root, ZFlag, ne);
+    __ B(eq, &done);
+
+    // Output contains zero, undefined is converted to zero for truncating
+    // conversions.
+    DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
+                        Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+  } else {
+    Register output = ToRegister32(instr->result());
+    DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
+
+    DeoptimizeIfNotHeapNumber(input, instr);
+
+    // A heap number: load value and convert to int32 using non-truncating
+    // function. If the result is out of range, branch to deoptimize.
+    __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
+    __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ Cmp(output, 0);
+      __ B(ne, &done);
+      __ Fmov(scratch1, dbl_scratch1);
+      DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
+    }
+  }
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI: public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() {
+      codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
+                                     instr_->temp2());
+    }
+
+    virtual LInstruction* instr() { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(output, input);
+  } else {
+    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+    __ JumpIfNotSmi(input, deferred->entry());
+    __ SmiUntag(output, input);
+    __ Bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).Is(x0));
+  DCHECK(ToRegister(instr->result()).Is(x0));
+  __ Push(x0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object = ToRegister(instr->object());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register temp1 = ToRegister(instr->temp1());
+    Register new_map = ToRegister(instr->temp2());
+    __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
+    __ Mov(new_map, Operand(to_map));
+    __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
+                         kDontSaveFPRegs);
+  } else {
+    {
+      UseScratchRegisterScope temps(masm());
+      // Use the temp register only in a restricted scope - the codegen checks
+      // that we do not use any register across a call.
+      __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
+                  DONT_DO_SMI_CHECK);
+    }
+    DCHECK(object.is(x0));
+    DCHECK(ToRegister(instr->context()).is(cp));
+    PushSafepointRegistersScope scope(this);
+    __ Mov(x1, Operand(to_map));
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+  __ Bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
+  DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+  __ Bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ TruncateDoubleToI(result, input);
+  if (instr->tag_result()) {
+    __ SmiTag(result, result);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->value()).is(x3));
+  DCHECK(ToRegister(instr->result()).is(x0));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ Mov(x0, Immediate(isolate()->factory()->number_string()));
+  __ B(&end);
+  __ Bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ Bind(&end);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Handle<String> type_name = instr->type_literal();
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+  Register value = ToRegister(instr->value());
+
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(value, true_label);
+
+    int true_block = instr->TrueDestination(chunk_);
+    int false_block = instr->FalseDestination(chunk_);
+    int next_block = GetNextEmittedBlock();
+
+    if (true_block == false_block) {
+      EmitGoto(true_block);
+    } else if (true_block == next_block) {
+      __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
+      if (false_block != next_block) {
+        __ B(chunk_->GetAssemblyLabel(false_block));
+      }
+    }
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
+    Register map = ToRegister(instr->temp1());
+    Register scratch = ToRegister(instr->temp2());
+
+    __ JumpIfSmi(value, false_label);
+    __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
+    EmitBranch(instr, lt);
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
+    Register map = ToRegister(instr->temp1());
+    Register scratch = ToRegister(instr->temp2());
+
+    __ JumpIfSmi(value, false_label);
+    __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
+    EmitBranch(instr, eq);
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
+    __ CompareRoot(value, Heap::kFalseValueRootIndex);
+    EmitBranch(instr, eq);
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    DCHECK(instr->temp1() != NULL);
+    Register scratch = ToRegister(instr->temp1());
+
+    __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
+    __ JumpIfSmi(value, false_label);
+    // Check for undetectable objects and jump to the true branch in this case.
+    __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+    __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    DCHECK(instr->temp1() != NULL);
+    Register scratch = ToRegister(instr->temp1());
+
+    __ JumpIfSmi(value, false_label);
+    __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+    __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(scratch, scratch,
+           (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+    EmitCompareAndBranch(instr, eq, scratch, 1 << Map::kIsCallable);
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
+    Register map = ToRegister(instr->temp1());
+    Register scratch = ToRegister(instr->temp2());
+
+    __ JumpIfSmi(value, false_label);
+    __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE,
+                        false_label, lt);
+    // Check for callable or undetectable objects => false.
+    __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+    EmitTestAndBranch(instr, eq, scratch,
+                      (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)       \
+  } else if (String::Equals(type_name, factory->type##_string())) { \
+    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));   \
+    Register map = ToRegister(instr->temp1());                      \
+                                                                    \
+    __ JumpIfSmi(value, false_label);                               \
+    __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));    \
+    __ CompareRoot(map, Heap::k##Type##MapRootIndex);               \
+    EmitBranch(instr, eq);
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    __ B(false_label);
+  }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  Register temp = ToRegister(instr->temp());
+  __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ Cmp(map, temp);
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // If the receiver is null or undefined, we have to pass the global object as
+  // a receiver to normal functions. Values have to be passed unchanged to
+  // builtins and strict-mode functions.
+  Label global_object, done, copy_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    __ Ldr(result, FieldMemOperand(function,
+                                   JSFunction::kSharedFunctionInfoOffset));
+
+    // CompilerHints is an int32 field. See objects.h.
+    __ Ldr(result.W(),
+           FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
+
+    // Do not transform the receiver to object for strict mode functions.
+    __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &copy_receiver);
+
+    // Do not transform the receiver to object for builtins.
+    __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
+  __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
+
+  // Deoptimize if the receiver is not a JS object.
+  DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
+  __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
+  __ B(ge, &copy_receiver);
+  Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
+
+  __ Bind(&global_object);
+  __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+  __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+  __ B(&done);
+
+  __ Bind(&copy_receiver);
+  __ Mov(result, receiver);
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object);
+  __ Push(index);
+  __ Mov(cp, 0);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(x0, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+
+  __ AssertSmi(index);
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ TestAndBranchIfAnySet(
+      index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
+  __ Mov(index, Operand(index, ASR, 1));
+
+  __ Cmp(index, Smi::FromInt(0));
+  __ B(lt, &out_of_object);
+
+  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+  __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
+
+  __ B(&done);
+
+  __ Bind(&out_of_object);
+  __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Ldr(result, FieldMemOperand(result,
+                                 FixedArray::kHeaderSize - kPointerSize));
+  __ Bind(deferred->exit());
+  __ Bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ Push(scope_info);
+  __ Push(ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/arm64/lithium-codegen-arm64.h b/src/crankshaft/arm64/lithium-codegen-arm64.h
new file mode 100644
index 0000000..18856da
--- /dev/null
+++ b/src/crankshaft/arm64/lithium-codegen-arm64.h
@@ -0,0 +1,460 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
+
+#include "src/crankshaft/arm64/lithium-arm64.h"
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+class BranchGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple),
+        pushed_arguments_(0) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  // Simple accessors.
+  Scope* scope() const { return scope_; }
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  LinkRegisterStatus GetLinkRegisterState() const {
+    return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+  }
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  // Support for converting LOperands to assembler types.
+  Register ToRegister(LOperand* op) const;
+  Register ToRegister32(LOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  Operand ToOperand32(LOperand* op);
+  enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
+  MemOperand ToMemOperand(LOperand* op,
+                          StackMode stack_mode = kCanUseStackPointer) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  template <class LI>
+  Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
+
+  int JSShiftAmountFromLConstant(LOperand* constant) {
+    return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
+  }
+
+  // TODO(jbramley): Examine these helpers and check that they make sense.
+  // IsInteger32Constant returns true for smi constants, for example.
+  bool IsInteger32Constant(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  // Return a double scratch register which can be used locally
+  // when generating code for a lithium instruction.
+  DoubleRegister double_scratch() { return crankshaft_fp_scratch; }
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredMathAbsTagged(LMathAbsTagged* instr,
+                               Label* exit,
+                               Label* allocation_entry);
+
+  void DoDeferredNumberTagU(LInstruction* instr,
+                            LOperand* value,
+                            LOperand* temp1,
+                            LOperand* temp2);
+  void DoDeferredTaggedToI(LTaggedToI* instr,
+                           LOperand* value,
+                           LOperand* temp1,
+                           LOperand* temp2);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+  void DoGap(LGap* instr);
+
+  // Generic version of EmitBranch. It contains some code to avoid emitting a
+  // branch on the next emitted basic block where we could just fall-through.
+  // You shouldn't use that directly but rather consider one of the helper like
+  // LCodeGen::EmitBranch, LCodeGen::EmitCompareAndBranch...
+  template<class InstrType>
+  void EmitBranchGeneric(InstrType instr,
+                         const BranchGenerator& branch);
+
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition condition);
+
+  template<class InstrType>
+  void EmitCompareAndBranch(InstrType instr,
+                            Condition condition,
+                            const Register& lhs,
+                            const Operand& rhs);
+
+  template<class InstrType>
+  void EmitTestAndBranch(InstrType instr,
+                         Condition condition,
+                         const Register& value,
+                         uint64_t mask);
+
+  template<class InstrType>
+  void EmitBranchIfNonZeroNumber(InstrType instr,
+                                 const FPRegister& value,
+                                 const FPRegister& scratch);
+
+  template<class InstrType>
+  void EmitBranchIfHeapNumber(InstrType instr,
+                              const Register& value);
+
+  template<class InstrType>
+  void EmitBranchIfRoot(InstrType instr,
+                        const Register& value,
+                        Heap::RootListIndex index);
+
+  // Emits optimized code to deep-copy the contents of statically known object
+  // graphs (e.g. object literal boilerplate). Expects a pointer to the
+  // allocated destination object in the result register, and a pointer to the
+  // source object in the source register.
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    Register scratch,
+                    int* offset,
+                    AllocationSiteMode mode);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+                         SmiCheck check_needed);
+
+  MemOperand BuildSeqStringOperand(Register string,
+                                   Register temp,
+                                   LOperand* index,
+                                   String::Encoding encoding);
+  void DeoptimizeBranch(LInstruction* instr,
+                        Deoptimizer::DeoptReason deopt_reason,
+                        BranchType branch_type, Register reg = NoReg,
+                        int bit = -1,
+                        Deoptimizer::BailoutType* override_bailout_type = NULL);
+  void Deoptimize(LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
+                  Deoptimizer::BailoutType* override_bailout_type = NULL);
+  void DeoptimizeIf(Condition cond, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfZero(Register rt, LInstruction* instr,
+                        Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+                           Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfNegative(Register rt, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfSmi(Register rt, LInstruction* instr,
+                       Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
+                          Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+                        LInstruction* instr,
+                        Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+                           LInstruction* instr,
+                           Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
+  void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+                             Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+                          Deoptimizer::DeoptReason deopt_reason);
+  void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason);
+
+  MemOperand PrepareKeyedExternalArrayOperand(Register key,
+                                              Register base,
+                                              Register scratch,
+                                              bool key_is_smi,
+                                              bool key_is_constant,
+                                              int constant_key,
+                                              ElementsKind elements_kind,
+                                              int base_offset);
+  MemOperand PrepareKeyedArrayOperand(Register base,
+                                      Register elements,
+                                      Register key,
+                                      bool key_is_tagged,
+                                      ElementsKind elements_kind,
+                                      Representation representation,
+                                      int base_offset);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation steps.  Returns true if code generation should continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in x1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordAndWritePosition(int position) override;
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+
+  ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table itself is
+  // emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  // The number of arguments pushed onto the stack, either by this block or by a
+  // predecessor.
+  int pushed_arguments_;
+
+  void RecordPushedArgumentsDelta(int delta) {
+    pushed_arguments_ += delta;
+    DCHECK(pushed_arguments_ >= 0);
+  }
+
+  int old_position_;
+
+  class PushSafepointRegistersScope BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+      UseScratchRegisterScope temps(codegen_->masm_);
+      // Preserve the value of lr which must be saved on the stack (the call to
+      // the stub will clobber it).
+      Register to_be_pushed_lr =
+          temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
+      codegen_->masm_->Mov(to_be_pushed_lr, lr);
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() { }
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return (external_exit_ != NULL) ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+
+
+// This is the abstract class used by EmitBranchGeneric.
+// It is used to emit code for conditional branching. The Emit() function
+// emits code to branch when the condition holds and EmitInverted() emits
+// the branch when the inverted condition is verified.
+//
+// For actual examples of condition see the concrete implementation in
+// lithium-codegen-arm64.cc (e.g. BranchOnCondition, CompareAndBranch).
+class BranchGenerator BASE_EMBEDDED {
+ public:
+  explicit BranchGenerator(LCodeGen* codegen)
+    : codegen_(codegen) { }
+
+  virtual ~BranchGenerator() { }
+
+  virtual void Emit(Label* label) const = 0;
+  virtual void EmitInverted(Label* label) const = 0;
+
+ protected:
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+  LCodeGen* codegen_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM64_LITHIUM_CODEGEN_ARM64_H_
diff --git a/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc b/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
new file mode 100644
index 0000000..3ef9f63
--- /dev/null
+++ b/src/crankshaft/arm64/lithium-gap-resolver-arm64.cc
@@ -0,0 +1,294 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/arm64/delayed-masm-arm64-inl.h"
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"
+#include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM((&masm_))
+
+
+void DelayedGapMasm::EndDelayedUse() {
+  DelayedMasm::EndDelayedUse();
+  if (scratch_register_used()) {
+    DCHECK(ScratchRegister().Is(root));
+    DCHECK(!pending());
+    InitializeRootRegister();
+    reset_scratch_register_used();
+  }
+}
+
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()),
+      root_index_(0), in_cycle_(false), saved_destination_(NULL) {
+}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  DCHECK(!masm_.pending());
+
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+
+    // Skip constants to perform them last. They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when we reach this move again.
+      PerformMove(i);
+      if (in_cycle_) RestoreValue();
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+
+    if (!move.IsEliminated()) {
+      DCHECK(move.source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  __ EndDelayedUse();
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph. We first recursively perform any move blocking this one. We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+  LMoveOperands& current_move = moves_[index];
+
+  DCHECK(!current_move.IsPending());
+  DCHECK(!current_move.IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local. Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(current_move.source() != NULL);  // Otherwise it will look eliminated.
+  LOperand* destination = current_move.destination();
+  current_move.set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies. Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  current_move.set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+
+void LGapResolver::BreakCycle(int index) {
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+
+  // We save in a register the source of that move and we remember its
+  // destination. Then we mark this move as resolved so the cycle is
+  // broken and we can perform the other moves.
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+
+  if (source->IsRegister()) {
+    AcquireSavedValueRegister();
+    __ Mov(SavedValueRegister(), cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    AcquireSavedValueRegister();
+    __ Load(SavedValueRegister(), cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+
+  // Mark this move as resolved.
+  // This move will be actually performed by moving the saved value to this
+  // move's destination in LGapResolver::RestoreValue().
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  if (saved_destination_->IsRegister()) {
+    __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister());
+    ReleaseSavedValueRegister();
+  } else if (saved_destination_->IsStackSlot()) {
+    __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_));
+    ReleaseSavedValueRegister();
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ Fmov(cgen_->ToDoubleRegister(saved_destination_),
+            SavedFPValueRegister());
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ Mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ Store(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ Load(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      EmitStackSlotMove(index);
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsSmi(constant_source)) {
+        __ Mov(dst, cgen_->ToSmi(constant_source));
+      } else if (cgen_->IsInteger32Constant(constant_source)) {
+        __ Mov(dst, cgen_->ToInteger32(constant_source));
+      } else {
+        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DoubleRegister result = cgen_->ToDoubleRegister(destination);
+      __ Fmov(result, cgen_->ToDouble(constant_source));
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      if (cgen_->IsSmi(constant_source)) {
+        Smi* smi = cgen_->ToSmi(constant_source);
+        __ StoreConstant(reinterpret_cast<intptr_t>(smi),
+                         cgen_->ToMemOperand(destination));
+      } else if (cgen_->IsInteger32Constant(constant_source)) {
+        __ StoreConstant(cgen_->ToInteger32(constant_source),
+                         cgen_->ToMemOperand(destination));
+      } else {
+        Handle<Object> handle = cgen_->ToHandle(constant_source);
+        AllowDeferredHandleDereference smi_object_check;
+        if (handle->IsSmi()) {
+          Object* obj = *handle;
+          DCHECK(!obj->IsHeapObject());
+          __ StoreConstant(reinterpret_cast<intptr_t>(obj),
+                           cgen_->ToMemOperand(destination));
+        } else {
+          AcquireSavedValueRegister();
+          __ LoadObject(SavedValueRegister(), handle);
+          __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination));
+          ReleaseSavedValueRegister();
+        }
+      }
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister src = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ Fmov(cgen_->ToDoubleRegister(destination), src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ Store(src, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand src = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ Load(cgen_->ToDoubleRegister(destination), src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      EmitStackSlotMove(index);
+    }
+
+  } else {
+    UNREACHABLE();
+  }
+
+  // The move has been emitted, we can eliminate it.
+  moves_[index].Eliminate();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/arm64/lithium-gap-resolver-arm64.h b/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
new file mode 100644
index 0000000..4f5eb22
--- /dev/null
+++ b/src/crankshaft/arm64/lithium-gap-resolver-arm64.h
@@ -0,0 +1,98 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+#define V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
+
+#include "src/crankshaft/arm64/delayed-masm-arm64.h"
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class DelayedGapMasm : public DelayedMasm {
+ public:
+  DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm)
+    : DelayedMasm(owner, masm, root) {
+    // We use the root register as an extra scratch register.
+    // The root register has two advantages:
+    //  - It is not in crankshaft allocatable registers list, so it can't
+    //    interfere with the allocatable registers.
+    //  - We don't need to push it on the stack, as we can reload it with its
+    //    value once we have finish.
+  }
+  void EndDelayedUse();
+};
+
+
+class LGapResolver BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Emit a move from one stack slot to another.
+  void EmitStackSlotMove(int index) {
+    masm_.StackSlotMove(moves_[index].source(), moves_[index].destination());
+  }
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  // Registers used to solve cycles.
+  const Register& SavedValueRegister() {
+    DCHECK(!masm_.ScratchRegister().IsAllocatable());
+    return masm_.ScratchRegister();
+  }
+  // The scratch register is used to break cycles and to store constant.
+  // These two methods switch from one mode to the other.
+  void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); }
+  void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); }
+  const FPRegister& SavedFPValueRegister() {
+    // We use the Crankshaft floating-point scratch register to break a cycle
+    // involving double values as the MacroAssembler will not need it for the
+    // operations performed by the gap resolver.
+    DCHECK(!crankshaft_fp_scratch.IsAllocatable());
+    return crankshaft_fp_scratch;
+  }
+
+  LCodeGen* cgen_;
+  DelayedGapMasm masm_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_ARM64_LITHIUM_GAP_RESOLVER_ARM64_H_
diff --git a/src/crankshaft/hydrogen-alias-analysis.h b/src/crankshaft/hydrogen-alias-analysis.h
new file mode 100644
index 0000000..de8d0bd
--- /dev/null
+++ b/src/crankshaft/hydrogen-alias-analysis.h
@@ -0,0 +1,74 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+enum HAliasing {
+  kMustAlias,
+  kMayAlias,
+  kNoAlias
+};
+
+
+// Defines the interface to alias analysis for the rest of the compiler.
+// A simple implementation can use only local reasoning, but a more powerful
+// analysis might employ points-to analysis.
+class HAliasAnalyzer : public ZoneObject {
+ public:
+  // Simple alias analysis distinguishes allocations, parameters,
+  // and constants using only local reasoning.
+  HAliasing Query(HValue* a, HValue* b) {
+    // The same SSA value always references the same object.
+    if (a == b) return kMustAlias;
+
+    if (a->IsAllocate() || a->IsInnerAllocatedObject()) {
+      // Two non-identical allocations can never be aliases.
+      if (b->IsAllocate()) return kNoAlias;
+      if (b->IsInnerAllocatedObject()) return kNoAlias;
+      // An allocation can never alias a parameter or a constant.
+      if (b->IsParameter()) return kNoAlias;
+      if (b->IsConstant()) return kNoAlias;
+    }
+    if (b->IsAllocate() || b->IsInnerAllocatedObject()) {
+      // An allocation can never alias a parameter or a constant.
+      if (a->IsParameter()) return kNoAlias;
+      if (a->IsConstant()) return kNoAlias;
+    }
+
+    // Constant objects can be distinguished statically.
+    if (a->IsConstant()) {
+      // TODO(titzer): DataEquals() is more efficient, but that's protected.
+      return a->Equals(b) ? kMustAlias : kNoAlias;
+    }
+    return kMayAlias;
+  }
+
+  // Checks whether the objects referred to by the given instructions may
+  // ever be aliases. Note that this is more conservative than checking
+  // {Query(a, b) == kMayAlias}, since this method considers kMustAlias
+  // objects to also be may-aliasing.
+  inline bool MayAlias(HValue* a, HValue* b) {
+    return Query(a, b) != kNoAlias;
+  }
+
+  inline bool MustAlias(HValue* a, HValue* b) {
+    return Query(a, b) == kMustAlias;
+  }
+
+  inline bool NoAlias(HValue* a, HValue* b) {
+    return Query(a, b) == kNoAlias;
+  }
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_ALIAS_ANALYSIS_H_
diff --git a/src/crankshaft/hydrogen-bce.cc b/src/crankshaft/hydrogen-bce.cc
new file mode 100644
index 0000000..d00d8ce
--- /dev/null
+++ b/src/crankshaft/hydrogen-bce.cc
@@ -0,0 +1,477 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-bce.h"
+
+namespace v8 {
+namespace internal {
+
+
+// We try to "factor up" HBoundsCheck instructions towards the root of the
+// dominator tree.
+// For now we handle checks where the index is like "exp + int32value".
+// If in the dominator tree we check "exp + v1" and later (dominated)
+// "exp + v2", if v2 <= v1 we can safely remove the second check, and if
+// v2 > v1 we can use v2 in the 1st check and again remove the second.
+// To do so we keep a dictionary of all checks where the key if the pair
+// "exp, length".
+// The class BoundsCheckKey represents this key.
+class BoundsCheckKey : public ZoneObject {
+ public:
+  HValue* IndexBase() const { return index_base_; }
+  HValue* Length() const { return length_; }
+
+  uint32_t Hash() {
+    return static_cast<uint32_t>(index_base_->Hashcode() ^ length_->Hashcode());
+  }
+
+  static BoundsCheckKey* Create(Zone* zone,
+                                HBoundsCheck* check,
+                                int32_t* offset) {
+    if (!check->index()->representation().IsSmiOrInteger32()) return NULL;
+
+    HValue* index_base = NULL;
+    HConstant* constant = NULL;
+    bool is_sub = false;
+
+    if (check->index()->IsAdd()) {
+      HAdd* index = HAdd::cast(check->index());
+      if (index->left()->IsConstant()) {
+        constant = HConstant::cast(index->left());
+        index_base = index->right();
+      } else if (index->right()->IsConstant()) {
+        constant = HConstant::cast(index->right());
+        index_base = index->left();
+      }
+    } else if (check->index()->IsSub()) {
+      HSub* index = HSub::cast(check->index());
+      is_sub = true;
+      if (index->right()->IsConstant()) {
+        constant = HConstant::cast(index->right());
+        index_base = index->left();
+      }
+    } else if (check->index()->IsConstant()) {
+      index_base = check->block()->graph()->GetConstant0();
+      constant = HConstant::cast(check->index());
+    }
+
+    if (constant != NULL && constant->HasInteger32Value() &&
+        constant->Integer32Value() != kMinInt) {
+      *offset = is_sub ? - constant->Integer32Value()
+                       : constant->Integer32Value();
+    } else {
+      *offset = 0;
+      index_base = check->index();
+    }
+
+    return new(zone) BoundsCheckKey(index_base, check->length());
+  }
+
+ private:
+  BoundsCheckKey(HValue* index_base, HValue* length)
+      : index_base_(index_base),
+        length_(length) { }
+
+  HValue* index_base_;
+  HValue* length_;
+
+  DISALLOW_COPY_AND_ASSIGN(BoundsCheckKey);
+};
+
+
+// Data about each HBoundsCheck that can be eliminated or moved.
+// It is the "value" in the dictionary indexed by "base-index, length"
+// (the key is BoundsCheckKey).
+// We scan the code with a dominator tree traversal.
+// Traversing the dominator tree we keep a stack (implemented as a singly
+// linked list) of "data" for each basic block that contains a relevant check
+// with the same key (the dictionary holds the head of the list).
+// We also keep all the "data" created for a given basic block in a list, and
+// use it to "clean up" the dictionary when backtracking in the dominator tree
+// traversal.
+// Doing this each dictionary entry always directly points to the check that
+// is dominating the code being examined now.
+// We also track the current "offset" of the index expression and use it to
+// decide if any check is already "covered" (so it can be removed) or not.
+class BoundsCheckBbData: public ZoneObject {
+ public:
+  BoundsCheckKey* Key() const { return key_; }
+  int32_t LowerOffset() const { return lower_offset_; }
+  int32_t UpperOffset() const { return upper_offset_; }
+  HBasicBlock* BasicBlock() const { return basic_block_; }
+  HBoundsCheck* LowerCheck() const { return lower_check_; }
+  HBoundsCheck* UpperCheck() const { return upper_check_; }
+  BoundsCheckBbData* NextInBasicBlock() const { return next_in_bb_; }
+  BoundsCheckBbData* FatherInDominatorTree() const { return father_in_dt_; }
+
+  bool OffsetIsCovered(int32_t offset) const {
+    return offset >= LowerOffset() && offset <= UpperOffset();
+  }
+
+  bool HasSingleCheck() { return lower_check_ == upper_check_; }
+
+  void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
+    BoundsCheckBbData* data = FatherInDominatorTree();
+    while (data != NULL && data->UpperCheck() == check) {
+      DCHECK(data->upper_offset_ < offset);
+      data->upper_offset_ = offset;
+      data = data->FatherInDominatorTree();
+    }
+  }
+
+  void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) {
+    BoundsCheckBbData* data = FatherInDominatorTree();
+    while (data != NULL && data->LowerCheck() == check) {
+      DCHECK(data->lower_offset_ > offset);
+      data->lower_offset_ = offset;
+      data = data->FatherInDominatorTree();
+    }
+  }
+
+  // The goal of this method is to modify either upper_offset_ or
+  // lower_offset_ so that also new_offset is covered (the covered
+  // range grows).
+  //
+  // The precondition is that new_check follows UpperCheck() and
+  // LowerCheck() in the same basic block, and that new_offset is not
+  // covered (otherwise we could simply remove new_check).
+  //
+  // If HasSingleCheck() is true then new_check is added as "second check"
+  // (either upper or lower; note that HasSingleCheck() becomes false).
+  // Otherwise one of the current checks is modified so that it also covers
+  // new_offset, and new_check is removed.
+  void CoverCheck(HBoundsCheck* new_check,
+                  int32_t new_offset) {
+    DCHECK(new_check->index()->representation().IsSmiOrInteger32());
+    bool keep_new_check = false;
+
+    if (new_offset > upper_offset_) {
+      upper_offset_ = new_offset;
+      if (HasSingleCheck()) {
+        keep_new_check = true;
+        upper_check_ = new_check;
+      } else {
+        TightenCheck(upper_check_, new_check, new_offset);
+        UpdateUpperOffsets(upper_check_, upper_offset_);
+      }
+    } else if (new_offset < lower_offset_) {
+      lower_offset_ = new_offset;
+      if (HasSingleCheck()) {
+        keep_new_check = true;
+        lower_check_ = new_check;
+      } else {
+        TightenCheck(lower_check_, new_check, new_offset);
+        UpdateLowerOffsets(lower_check_, lower_offset_);
+      }
+    } else {
+      // Should never have called CoverCheck() in this case.
+      UNREACHABLE();
+    }
+
+    if (!keep_new_check) {
+      if (FLAG_trace_bce) {
+        base::OS::Print("Eliminating check #%d after tightening\n",
+                        new_check->id());
+      }
+      new_check->block()->graph()->isolate()->counters()->
+          bounds_checks_eliminated()->Increment();
+      new_check->DeleteAndReplaceWith(new_check->ActualValue());
+    } else {
+      HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
+                                                            : lower_check_;
+      if (FLAG_trace_bce) {
+        base::OS::Print("Moving second check #%d after first check #%d\n",
+                        new_check->id(), first_check->id());
+      }
+      // The length is guaranteed to be live at first_check.
+      DCHECK(new_check->length() == first_check->length());
+      HInstruction* old_position = new_check->next();
+      new_check->Unlink();
+      new_check->InsertAfter(first_check);
+      MoveIndexIfNecessary(new_check->index(), new_check, old_position);
+    }
+  }
+
+  BoundsCheckBbData(BoundsCheckKey* key,
+                    int32_t lower_offset,
+                    int32_t upper_offset,
+                    HBasicBlock* bb,
+                    HBoundsCheck* lower_check,
+                    HBoundsCheck* upper_check,
+                    BoundsCheckBbData* next_in_bb,
+                    BoundsCheckBbData* father_in_dt)
+      : key_(key),
+        lower_offset_(lower_offset),
+        upper_offset_(upper_offset),
+        basic_block_(bb),
+        lower_check_(lower_check),
+        upper_check_(upper_check),
+        next_in_bb_(next_in_bb),
+        father_in_dt_(father_in_dt) { }
+
+ private:
+  BoundsCheckKey* key_;
+  int32_t lower_offset_;
+  int32_t upper_offset_;
+  HBasicBlock* basic_block_;
+  HBoundsCheck* lower_check_;
+  HBoundsCheck* upper_check_;
+  BoundsCheckBbData* next_in_bb_;
+  BoundsCheckBbData* father_in_dt_;
+
+  void MoveIndexIfNecessary(HValue* index_raw,
+                            HBoundsCheck* insert_before,
+                            HInstruction* end_of_scan_range) {
+    // index_raw can be HAdd(index_base, offset), HSub(index_base, offset),
+    // HConstant(offset) or index_base directly.
+    // In the latter case, no need to move anything.
+    if (index_raw->IsAdd() || index_raw->IsSub()) {
+      HArithmeticBinaryOperation* index =
+          HArithmeticBinaryOperation::cast(index_raw);
+      HValue* left_input = index->left();
+      HValue* right_input = index->right();
+      HValue* context = index->context();
+      bool must_move_index = false;
+      bool must_move_left_input = false;
+      bool must_move_right_input = false;
+      bool must_move_context = false;
+      for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
+        if (cursor == left_input) must_move_left_input = true;
+        if (cursor == right_input) must_move_right_input = true;
+        if (cursor == context) must_move_context = true;
+        if (cursor == index) must_move_index = true;
+        if (cursor->previous() == NULL) {
+          cursor = cursor->block()->dominator()->end();
+        } else {
+          cursor = cursor->previous();
+        }
+      }
+      if (must_move_index) {
+        index->Unlink();
+        index->InsertBefore(insert_before);
+      }
+      // The BCE algorithm only selects mergeable bounds checks that share
+      // the same "index_base", so we'll only ever have to move constants.
+      if (must_move_left_input) {
+        HConstant::cast(left_input)->Unlink();
+        HConstant::cast(left_input)->InsertBefore(index);
+      }
+      if (must_move_right_input) {
+        HConstant::cast(right_input)->Unlink();
+        HConstant::cast(right_input)->InsertBefore(index);
+      }
+      if (must_move_context) {
+        // Contexts are always constants.
+        HConstant::cast(context)->Unlink();
+        HConstant::cast(context)->InsertBefore(index);
+      }
+    } else if (index_raw->IsConstant()) {
+      HConstant* index = HConstant::cast(index_raw);
+      bool must_move = false;
+      for (HInstruction* cursor = end_of_scan_range; cursor != insert_before;) {
+        if (cursor == index) must_move = true;
+        if (cursor->previous() == NULL) {
+          cursor = cursor->block()->dominator()->end();
+        } else {
+          cursor = cursor->previous();
+        }
+      }
+      if (must_move) {
+        index->Unlink();
+        index->InsertBefore(insert_before);
+      }
+    }
+  }
+
+  void TightenCheck(HBoundsCheck* original_check,
+                    HBoundsCheck* tighter_check,
+                    int32_t new_offset) {
+    DCHECK(original_check->length() == tighter_check->length());
+    MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
+    original_check->ReplaceAllUsesWith(original_check->index());
+    original_check->SetOperandAt(0, tighter_check->index());
+    if (FLAG_trace_bce) {
+      base::OS::Print("Tightened check #%d with offset %d from #%d\n",
+                      original_check->id(), new_offset, tighter_check->id());
+    }
+  }
+
+  DISALLOW_COPY_AND_ASSIGN(BoundsCheckBbData);
+};
+
+
+static bool BoundsCheckKeyMatch(void* key1, void* key2) {
+  BoundsCheckKey* k1 = static_cast<BoundsCheckKey*>(key1);
+  BoundsCheckKey* k2 = static_cast<BoundsCheckKey*>(key2);
+  return k1->IndexBase() == k2->IndexBase() && k1->Length() == k2->Length();
+}
+
+
+BoundsCheckTable::BoundsCheckTable(Zone* zone)
+    : ZoneHashMap(BoundsCheckKeyMatch, ZoneHashMap::kDefaultHashMapCapacity,
+                  ZoneAllocationPolicy(zone)) { }
+
+
+BoundsCheckBbData** BoundsCheckTable::LookupOrInsert(BoundsCheckKey* key,
+                                                     Zone* zone) {
+  return reinterpret_cast<BoundsCheckBbData**>(
+      &(ZoneHashMap::LookupOrInsert(key, key->Hash(),
+                                    ZoneAllocationPolicy(zone))->value));
+}
+
+
+void BoundsCheckTable::Insert(BoundsCheckKey* key,
+                              BoundsCheckBbData* data,
+                              Zone* zone) {
+  ZoneHashMap::LookupOrInsert(key, key->Hash(), ZoneAllocationPolicy(zone))
+      ->value = data;
+}
+
+
+void BoundsCheckTable::Delete(BoundsCheckKey* key) {
+  Remove(key, key->Hash());
+}
+
+
+class HBoundsCheckEliminationState {
+ public:
+  HBasicBlock* block_;
+  BoundsCheckBbData* bb_data_list_;
+  int index_;
+};
+
+
+// Eliminates checks in bb and recursively in the dominated blocks.
+// Also replace the results of check instructions with the original value, if
+// the result is used. This is safe now, since we don't do code motion after
+// this point. It enables better register allocation since the value produced
+// by check instructions is really a copy of the original value.
+void HBoundsCheckEliminationPhase::EliminateRedundantBoundsChecks(
+    HBasicBlock* entry) {
+  // Allocate the stack.
+  HBoundsCheckEliminationState* stack =
+    zone()->NewArray<HBoundsCheckEliminationState>(graph()->blocks()->length());
+
+  // Explicitly push the entry block.
+  stack[0].block_ = entry;
+  stack[0].bb_data_list_ = PreProcessBlock(entry);
+  stack[0].index_ = 0;
+  int stack_depth = 1;
+
+  // Implement depth-first traversal with a stack.
+  while (stack_depth > 0) {
+    int current = stack_depth - 1;
+    HBoundsCheckEliminationState* state = &stack[current];
+    const ZoneList<HBasicBlock*>* children = state->block_->dominated_blocks();
+
+    if (state->index_ < children->length()) {
+      // Recursively visit children blocks.
+      HBasicBlock* child = children->at(state->index_++);
+      int next = stack_depth++;
+      stack[next].block_ = child;
+      stack[next].bb_data_list_ = PreProcessBlock(child);
+      stack[next].index_ = 0;
+    } else {
+      // Finished with all children; post process the block.
+      PostProcessBlock(state->block_, state->bb_data_list_);
+      stack_depth--;
+    }
+  }
+}
+
+
+BoundsCheckBbData* HBoundsCheckEliminationPhase::PreProcessBlock(
+    HBasicBlock* bb) {
+  BoundsCheckBbData* bb_data_list = NULL;
+
+  for (HInstructionIterator it(bb); !it.Done(); it.Advance()) {
+    HInstruction* i = it.Current();
+    if (!i->IsBoundsCheck()) continue;
+
+    HBoundsCheck* check = HBoundsCheck::cast(i);
+    int32_t offset = 0;
+    BoundsCheckKey* key =
+        BoundsCheckKey::Create(zone(), check, &offset);
+    if (key == NULL) continue;
+    BoundsCheckBbData** data_p = table_.LookupOrInsert(key, zone());
+    BoundsCheckBbData* data = *data_p;
+    if (data == NULL) {
+      bb_data_list = new(zone()) BoundsCheckBbData(key,
+                                                   offset,
+                                                   offset,
+                                                   bb,
+                                                   check,
+                                                   check,
+                                                   bb_data_list,
+                                                   NULL);
+      *data_p = bb_data_list;
+      if (FLAG_trace_bce) {
+        base::OS::Print("Fresh bounds check data for block #%d: [%d]\n",
+                        bb->block_id(), offset);
+      }
+    } else if (data->OffsetIsCovered(offset)) {
+      bb->graph()->isolate()->counters()->
+          bounds_checks_eliminated()->Increment();
+      if (FLAG_trace_bce) {
+        base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
+                        check->id(), offset);
+      }
+      check->DeleteAndReplaceWith(check->ActualValue());
+    } else if (data->BasicBlock() == bb) {
+      // TODO(jkummerow): I think the following logic would be preferable:
+      // if (data->Basicblock() == bb ||
+      //     graph()->use_optimistic_licm() ||
+      //     bb->IsLoopSuccessorDominator()) {
+      //   data->CoverCheck(check, offset)
+      // } else {
+      //   /* add pristine BCBbData like in (data == NULL) case above */
+      // }
+      // Even better would be: distinguish between read-only dominator-imposed
+      // knowledge and modifiable upper/lower checks.
+      // What happens currently is that the first bounds check in a dominated
+      // block will stay around while any further checks are hoisted out,
+      // which doesn't make sense. Investigate/fix this in a future CL.
+      data->CoverCheck(check, offset);
+    } else if (graph()->use_optimistic_licm() ||
+               bb->IsLoopSuccessorDominator()) {
+      int32_t new_lower_offset = offset < data->LowerOffset()
+          ? offset
+          : data->LowerOffset();
+      int32_t new_upper_offset = offset > data->UpperOffset()
+          ? offset
+          : data->UpperOffset();
+      bb_data_list = new(zone()) BoundsCheckBbData(key,
+                                                   new_lower_offset,
+                                                   new_upper_offset,
+                                                   bb,
+                                                   data->LowerCheck(),
+                                                   data->UpperCheck(),
+                                                   bb_data_list,
+                                                   data);
+      if (FLAG_trace_bce) {
+        base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
+                        bb->block_id(), new_lower_offset, new_upper_offset);
+      }
+      table_.Insert(key, bb_data_list, zone());
+    }
+  }
+
+  return bb_data_list;
+}
+
+
+void HBoundsCheckEliminationPhase::PostProcessBlock(
+    HBasicBlock* block, BoundsCheckBbData* data) {
+  while (data != NULL) {
+    if (data->FatherInDominatorTree()) {
+      table_.Insert(data->Key(), data->FatherInDominatorTree(), zone());
+    } else {
+      table_.Delete(data->Key());
+    }
+    data = data->NextInBasicBlock();
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-bce.h b/src/crankshaft/hydrogen-bce.h
new file mode 100644
index 0000000..e819ffc
--- /dev/null
+++ b/src/crankshaft/hydrogen-bce.h
@@ -0,0 +1,52 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_BCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_BCE_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class BoundsCheckBbData;
+class BoundsCheckKey;
+class BoundsCheckTable : private ZoneHashMap {
+ public:
+  explicit BoundsCheckTable(Zone* zone);
+
+  INLINE(BoundsCheckBbData** LookupOrInsert(BoundsCheckKey* key, Zone* zone));
+  INLINE(void Insert(BoundsCheckKey* key, BoundsCheckBbData* data, Zone* zone));
+  INLINE(void Delete(BoundsCheckKey* key));
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BoundsCheckTable);
+};
+
+
+class HBoundsCheckEliminationPhase : public HPhase {
+ public:
+  explicit HBoundsCheckEliminationPhase(HGraph* graph)
+      : HPhase("H_Bounds checks elimination", graph), table_(zone()) { }
+
+  void Run() {
+    EliminateRedundantBoundsChecks(graph()->entry_block());
+  }
+
+ private:
+  void EliminateRedundantBoundsChecks(HBasicBlock* bb);
+  BoundsCheckBbData* PreProcessBlock(HBasicBlock* bb);
+  void PostProcessBlock(HBasicBlock* bb, BoundsCheckBbData* data);
+
+  BoundsCheckTable table_;
+
+  DISALLOW_COPY_AND_ASSIGN(HBoundsCheckEliminationPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/src/crankshaft/hydrogen-bch.cc b/src/crankshaft/hydrogen-bch.cc
new file mode 100644
index 0000000..060e0bc
--- /dev/null
+++ b/src/crankshaft/hydrogen-bch.cc
@@ -0,0 +1,379 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-bch.h"
+
+namespace v8 {
+namespace internal {
+
+/*
+ * This class is a table with one element for eack basic block.
+ *
+ * It is used to check if, inside one loop, all execution paths contain
+ * a bounds check for a particular [index, length] combination.
+ * The reason is that if there is a path that stays in the loop without
+ * executing a check then the check cannot be hoisted out of the loop (it
+ * would likely fail and cause a deopt for no good reason).
+ * We also check is there are paths that exit the loop early, and if yes we
+ * perform the hoisting only if graph()->use_optimistic_licm() is true.
+ * The reason is that such paths are realtively common and harmless (like in
+ * a "search" method that scans an array until an element is found), but in
+ * some cases they could cause a deopt if we hoist the check so this is a
+ * situation we need to detect.
+ */
+class InductionVariableBlocksTable BASE_EMBEDDED {
+ public:
+  class Element {
+   public:
+    static const int kNoBlock = -1;
+
+    HBasicBlock* block() { return block_; }
+    void set_block(HBasicBlock* block) { block_ = block; }
+    bool is_start() { return is_start_; }
+    bool is_proper_exit() { return is_proper_exit_; }
+    bool is_in_loop() { return is_in_loop_; }
+    bool has_check() { return has_check_; }
+    void set_has_check() { has_check_ = true; }
+    InductionVariableLimitUpdate* additional_limit() {
+      return &additional_limit_;
+    }
+
+    /*
+     * Initializes the table element for a given loop (identified by its
+     * induction variable).
+     */
+    void InitializeLoop(InductionVariableData* data) {
+      DCHECK(data->limit() != NULL);
+      HLoopInformation* loop = data->phi()->block()->current_loop();
+      is_start_ = (block() == loop->loop_header());
+      is_proper_exit_ = (block() == data->induction_exit_target());
+      is_in_loop_ = loop->IsNestedInThisLoop(block()->current_loop());
+      has_check_ = false;
+    }
+
+    // Utility methods to iterate over dominated blocks.
+    void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; }
+    HBasicBlock* CurrentDominatedBlock() {
+      DCHECK(current_dominated_block_ != kNoBlock);
+      return current_dominated_block_ < block()->dominated_blocks()->length() ?
+          block()->dominated_blocks()->at(current_dominated_block_) : NULL;
+    }
+    HBasicBlock* NextDominatedBlock() {
+      current_dominated_block_++;
+      return CurrentDominatedBlock();
+    }
+
+    Element()
+        : block_(NULL), is_start_(false), is_proper_exit_(false),
+          has_check_(false), additional_limit_(),
+          current_dominated_block_(kNoBlock) {}
+
+   private:
+    HBasicBlock* block_;
+    bool is_start_;
+    bool is_proper_exit_;
+    bool is_in_loop_;
+    bool has_check_;
+    InductionVariableLimitUpdate additional_limit_;
+    int current_dominated_block_;
+  };
+
+  HGraph* graph() const { return graph_; }
+  Counters* counters() const { return graph()->isolate()->counters(); }
+  HBasicBlock* loop_header() const { return loop_header_; }
+  Element* at(int index) const { return &(elements_.at(index)); }
+  Element* at(HBasicBlock* block) const { return at(block->block_id()); }
+
+  void AddCheckAt(HBasicBlock* block) {
+    at(block->block_id())->set_has_check();
+  }
+
+  /*
+   * Initializes the table for a given loop (identified by its induction
+   * variable).
+   */
+  void InitializeLoop(InductionVariableData* data) {
+    for (int i = 0; i < graph()->blocks()->length(); i++) {
+      at(i)->InitializeLoop(data);
+    }
+    loop_header_ = data->phi()->block()->current_loop()->loop_header();
+  }
+
+
+  enum Hoistability {
+    HOISTABLE,
+    OPTIMISTICALLY_HOISTABLE,
+    NOT_HOISTABLE
+  };
+
+  /*
+   * This method checks if it is appropriate to hoist the bounds checks on an
+   * induction variable out of the loop.
+   * The problem is that in the loop code graph there could be execution paths
+   * where the check is not performed, but hoisting the check has the same
+   * semantics as performing it at every loop iteration, which could cause
+   * unnecessary check failures (which would mean unnecessary deoptimizations).
+   * The method returns OK if there are no paths that perform an iteration
+   * (loop back to the header) without meeting a check, or UNSAFE is set if
+   * early exit paths are found.
+   */
+  Hoistability CheckHoistability() {
+    for (int i = 0; i < elements_.length(); i++) {
+      at(i)->ResetCurrentDominatedBlock();
+    }
+    bool unsafe = false;
+
+    HBasicBlock* current = loop_header();
+    while (current != NULL) {
+      HBasicBlock* next;
+
+      if (at(current)->has_check() || !at(current)->is_in_loop()) {
+        // We found a check or we reached a dominated block out of the loop,
+        // therefore this block is safe and we can backtrack.
+        next = NULL;
+      } else {
+        for (int i = 0; i < current->end()->SuccessorCount(); i ++) {
+          Element* successor = at(current->end()->SuccessorAt(i));
+
+          if (!successor->is_in_loop()) {
+            if (!successor->is_proper_exit()) {
+              // We found a path that exits the loop early, and is not the exit
+              // related to the induction limit, therefore hoisting checks is
+              // an optimistic assumption.
+              unsafe = true;
+            }
+          }
+
+          if (successor->is_start()) {
+            // We found a path that does one loop iteration without meeting any
+            // check, therefore hoisting checks would be likely to cause
+            // unnecessary deopts.
+            return NOT_HOISTABLE;
+          }
+        }
+
+        next = at(current)->NextDominatedBlock();
+      }
+
+      // If we have no next block we need to backtrack the tree traversal.
+      while (next == NULL) {
+        current = current->dominator();
+        if (current != NULL) {
+          next = at(current)->NextDominatedBlock();
+        } else {
+          // We reached the root: next stays NULL.
+          next = NULL;
+          break;
+        }
+      }
+
+      current = next;
+    }
+
+    return unsafe ? OPTIMISTICALLY_HOISTABLE : HOISTABLE;
+  }
+
+  explicit InductionVariableBlocksTable(HGraph* graph)
+    : graph_(graph), loop_header_(NULL),
+      elements_(graph->blocks()->length(), graph->zone()) {
+    for (int i = 0; i < graph->blocks()->length(); i++) {
+      Element element;
+      element.set_block(graph->blocks()->at(i));
+      elements_.Add(element, graph->zone());
+      DCHECK(at(i)->block()->block_id() == i);
+    }
+  }
+
+  // Tries to hoist a check out of its induction loop.
+  void ProcessRelatedChecks(
+      InductionVariableData::InductionVariableCheck* check,
+      InductionVariableData* data) {
+    HValue* length = check->check()->length();
+    check->set_processed();
+    HBasicBlock* header =
+        data->phi()->block()->current_loop()->loop_header();
+    HBasicBlock* pre_header = header->predecessors()->at(0);
+    // Check that the limit is defined in the loop preheader.
+    if (!data->limit()->IsInteger32Constant()) {
+      HBasicBlock* limit_block = data->limit()->block();
+      if (limit_block != pre_header &&
+          !limit_block->Dominates(pre_header)) {
+        return;
+      }
+    }
+    // Check that the length and limit have compatible representations.
+    if (!(data->limit()->representation().Equals(
+            length->representation()) ||
+        data->limit()->IsInteger32Constant())) {
+      return;
+    }
+    // Check that the length is defined in the loop preheader.
+    if (check->check()->length()->block() != pre_header &&
+        !check->check()->length()->block()->Dominates(pre_header)) {
+      return;
+    }
+
+    // Add checks to the table.
+    for (InductionVariableData::InductionVariableCheck* current_check = check;
+         current_check != NULL;
+         current_check = current_check->next()) {
+      if (current_check->check()->length() != length) continue;
+
+      AddCheckAt(current_check->check()->block());
+      current_check->set_processed();
+    }
+
+    // Check that we will not cause unwanted deoptimizations.
+    Hoistability hoistability = CheckHoistability();
+    if (hoistability == NOT_HOISTABLE ||
+        (hoistability == OPTIMISTICALLY_HOISTABLE &&
+         !graph()->use_optimistic_licm())) {
+      return;
+    }
+
+    // We will do the hoisting, but we must see if the limit is "limit" or if
+    // all checks are done on constants: if all check are done against the same
+    // constant limit we will use that instead of the induction limit.
+    bool has_upper_constant_limit = true;
+    int32_t upper_constant_limit =
+        check->HasUpperLimit() ? check->upper_limit() : 0;
+    for (InductionVariableData::InductionVariableCheck* current_check = check;
+         current_check != NULL;
+         current_check = current_check->next()) {
+      has_upper_constant_limit =
+          has_upper_constant_limit && current_check->HasUpperLimit() &&
+          current_check->upper_limit() == upper_constant_limit;
+      counters()->bounds_checks_eliminated()->Increment();
+      current_check->check()->set_skip_check();
+    }
+
+    // Choose the appropriate limit.
+    Zone* zone = graph()->zone();
+    HValue* context = graph()->GetInvalidContext();
+    HValue* limit = data->limit();
+    if (has_upper_constant_limit) {
+      HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
+                                            upper_constant_limit);
+      new_limit->InsertBefore(pre_header->end());
+      limit = new_limit;
+    }
+
+    // If necessary, redefine the limit in the preheader.
+    if (limit->IsInteger32Constant() &&
+        limit->block() != pre_header &&
+        !limit->block()->Dominates(pre_header)) {
+      HConstant* new_limit = HConstant::New(graph()->isolate(), zone, context,
+                                            limit->GetInteger32Constant());
+      new_limit->InsertBefore(pre_header->end());
+      limit = new_limit;
+    }
+
+    // Do the hoisting.
+    HBoundsCheck* hoisted_check = HBoundsCheck::New(
+        graph()->isolate(), zone, context, limit, check->check()->length());
+    hoisted_check->InsertBefore(pre_header->end());
+    hoisted_check->set_allow_equality(true);
+    counters()->bounds_checks_hoisted()->Increment();
+  }
+
+  void CollectInductionVariableData(HBasicBlock* bb) {
+    bool additional_limit = false;
+
+    for (int i = 0; i < bb->phis()->length(); i++) {
+      HPhi* phi = bb->phis()->at(i);
+      phi->DetectInductionVariable();
+    }
+
+    additional_limit = InductionVariableData::ComputeInductionVariableLimit(
+        bb, at(bb)->additional_limit());
+
+    if (additional_limit) {
+      at(bb)->additional_limit()->updated_variable->
+          UpdateAdditionalLimit(at(bb)->additional_limit());
+    }
+
+    for (HInstruction* i = bb->first(); i != NULL; i = i->next()) {
+      if (!i->IsBoundsCheck()) continue;
+      HBoundsCheck* check = HBoundsCheck::cast(i);
+      InductionVariableData::BitwiseDecompositionResult decomposition;
+      InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
+      if (!decomposition.base->IsPhi()) continue;
+      HPhi* phi = HPhi::cast(decomposition.base);
+
+      if (!phi->IsInductionVariable()) continue;
+      InductionVariableData* data = phi->induction_variable_data();
+
+      // For now ignore loops decrementing the index.
+      if (data->increment() <= 0) continue;
+      if (!data->LowerLimitIsNonNegativeConstant()) continue;
+
+      // TODO(mmassi): skip OSR values for check->length().
+      if (check->length() == data->limit() ||
+          check->length() == data->additional_upper_limit()) {
+        counters()->bounds_checks_eliminated()->Increment();
+        check->set_skip_check();
+        continue;
+      }
+
+      if (!phi->IsLimitedInductionVariable()) continue;
+
+      int32_t limit = data->ComputeUpperLimit(decomposition.and_mask,
+                                              decomposition.or_mask);
+      phi->induction_variable_data()->AddCheck(check, limit);
+    }
+
+    for (int i = 0; i < bb->dominated_blocks()->length(); i++) {
+      CollectInductionVariableData(bb->dominated_blocks()->at(i));
+    }
+
+    if (additional_limit) {
+      at(bb->block_id())->additional_limit()->updated_variable->
+          UpdateAdditionalLimit(at(bb->block_id())->additional_limit());
+    }
+  }
+
+  void EliminateRedundantBoundsChecks(HBasicBlock* bb) {
+    for (int i = 0; i < bb->phis()->length(); i++) {
+      HPhi* phi = bb->phis()->at(i);
+      if (!phi->IsLimitedInductionVariable()) continue;
+
+      InductionVariableData* induction_data = phi->induction_variable_data();
+      InductionVariableData::ChecksRelatedToLength* current_length_group =
+          induction_data->checks();
+      while (current_length_group != NULL) {
+        current_length_group->CloseCurrentBlock();
+        InductionVariableData::InductionVariableCheck* current_base_check =
+            current_length_group->checks();
+        InitializeLoop(induction_data);
+
+        while (current_base_check != NULL) {
+          ProcessRelatedChecks(current_base_check, induction_data);
+          while (current_base_check != NULL &&
+                 current_base_check->processed()) {
+            current_base_check = current_base_check->next();
+          }
+        }
+
+        current_length_group = current_length_group->next();
+      }
+    }
+  }
+
+ private:
+  HGraph* graph_;
+  HBasicBlock* loop_header_;
+  ZoneList<Element> elements_;
+};
+
+
+void HBoundsCheckHoistingPhase::HoistRedundantBoundsChecks() {
+  InductionVariableBlocksTable table(graph());
+  table.CollectInductionVariableData(graph()->entry_block());
+  for (int i = 0; i < graph()->blocks()->length(); i++) {
+    table.EliminateRedundantBoundsChecks(graph()->blocks()->at(i));
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-bch.h b/src/crankshaft/hydrogen-bch.h
new file mode 100644
index 0000000..cdcd407
--- /dev/null
+++ b/src/crankshaft/hydrogen-bch.h
@@ -0,0 +1,33 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_BCH_H_
+#define V8_CRANKSHAFT_HYDROGEN_BCH_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HBoundsCheckHoistingPhase : public HPhase {
+ public:
+  explicit HBoundsCheckHoistingPhase(HGraph* graph)
+      : HPhase("H_Bounds checks hoisting", graph) { }
+
+  void Run() {
+    HoistRedundantBoundsChecks();
+  }
+
+ private:
+  void HoistRedundantBoundsChecks();
+
+  DISALLOW_COPY_AND_ASSIGN(HBoundsCheckHoistingPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_BCE_H_
diff --git a/src/crankshaft/hydrogen-canonicalize.cc b/src/crankshaft/hydrogen-canonicalize.cc
new file mode 100644
index 0000000..4a07357
--- /dev/null
+++ b/src/crankshaft/hydrogen-canonicalize.cc
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-canonicalize.h"
+
+#include "src/crankshaft/hydrogen-redundant-phi.h"
+
+namespace v8 {
+namespace internal {
+
+void HCanonicalizePhase::Run() {
+  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+  // Before removing no-op instructions, save their semantic value.
+  // We must be careful not to set the flag unnecessarily, because GVN
+  // cannot identify two instructions when their flag value differs.
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->IsArithmeticBinaryOperation()) {
+        if (instr->representation().IsInteger32()) {
+          if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+                  HInstruction::kTruncatingToInt32)) {
+            instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+          }
+        } else if (instr->representation().IsSmi()) {
+          if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+                  HInstruction::kTruncatingToSmi)) {
+            instr->SetFlag(HInstruction::kAllUsesTruncatingToSmi);
+          } else if (instr->HasAtLeastOneUseWithFlagAndNoneWithout(
+                         HInstruction::kTruncatingToInt32)) {
+            // Avoid redundant minus zero check
+            instr->SetFlag(HInstruction::kAllUsesTruncatingToInt32);
+          }
+        }
+      }
+    }
+  }
+
+  // Perform actual Canonicalization pass.
+  HRedundantPhiEliminationPhase redundant_phi_eliminator(graph());
+  for (int i = 0; i < blocks->length(); ++i) {
+    // Eliminate redundant phis in the block first; changes to their inputs
+    // might have made them redundant, and eliminating them creates more
+    // opportunities for constant folding and strength reduction.
+    redundant_phi_eliminator.ProcessBlock(blocks->at(i));
+    // Now canonicalize each instruction.
+    for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      HValue* value = instr->Canonicalize();
+      if (value != instr) instr->DeleteAndReplaceWith(value);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-canonicalize.h b/src/crankshaft/hydrogen-canonicalize.h
new file mode 100644
index 0000000..a17557a
--- /dev/null
+++ b/src/crankshaft/hydrogen-canonicalize.h
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
+#define V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HCanonicalizePhase : public HPhase {
+ public:
+  explicit HCanonicalizePhase(HGraph* graph)
+      : HPhase("H_Canonicalize", graph) { }
+
+  void Run();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HCanonicalizePhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_CANONICALIZE_H_
diff --git a/src/crankshaft/hydrogen-check-elimination.cc b/src/crankshaft/hydrogen-check-elimination.cc
new file mode 100644
index 0000000..548e4cd
--- /dev/null
+++ b/src/crankshaft/hydrogen-check-elimination.cc
@@ -0,0 +1,913 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-check-elimination.h"
+
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen-flow-engine.h"
+
+#define GLOBAL 1
+
+// Only collect stats in debug mode.
+#if DEBUG
+#define INC_STAT(x) phase_->x++
+#else
+#define INC_STAT(x)
+#endif
+
+// For code de-uglification.
+#define TRACE(x) if (FLAG_trace_check_elimination) PrintF x
+
+namespace v8 {
+namespace internal {
+
+typedef const UniqueSet<Map>* MapSet;
+
+struct HCheckTableEntry {
+  enum State {
+    // We have seen a map check (i.e. an HCheckMaps) for these maps, so we can
+    // use this information to eliminate further map checks, elements kind
+    // transitions, etc.
+    CHECKED,
+    // Same as CHECKED, but we also know that these maps are stable.
+    CHECKED_STABLE,
+    // These maps are stable, but not checked (i.e. we learned this via field
+    // type tracking or from a constant, or they were initially CHECKED_STABLE,
+    // but became UNCHECKED_STABLE because of an instruction that changes maps
+    // or elements kind), and we need a stability check for them in order to use
+    // this information for check elimination (which turns them back to
+    // CHECKED_STABLE).
+    UNCHECKED_STABLE
+  };
+
+  static const char* State2String(State state) {
+    switch (state) {
+      case CHECKED: return "checked";
+      case CHECKED_STABLE: return "checked stable";
+      case UNCHECKED_STABLE: return "unchecked stable";
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+  static State StateMerge(State state1, State state2) {
+    if (state1 == state2) return state1;
+    if ((state1 == CHECKED && state2 == CHECKED_STABLE) ||
+        (state2 == CHECKED && state1 == CHECKED_STABLE)) {
+      return CHECKED;
+    }
+    DCHECK((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) ||
+           (state2 == CHECKED_STABLE && state1 == UNCHECKED_STABLE));
+    return UNCHECKED_STABLE;
+  }
+
+  HValue* object_;  // The object being approximated. NULL => invalid entry.
+  HInstruction* check_;  // The last check instruction.
+  MapSet maps_;          // The set of known maps for the object.
+  State state_;          // The state of this entry.
+};
+
+
+// The main data structure used during check elimination, which stores a
+// set of known maps for each object.
+class HCheckTable : public ZoneObject {
+ public:
+  static const int kMaxTrackedObjects = 16;
+
+  explicit HCheckTable(HCheckEliminationPhase* phase)
+    : phase_(phase),
+      cursor_(0),
+      size_(0) {
+  }
+
+  // The main processing of instructions.
+  HCheckTable* Process(HInstruction* instr, Zone* zone) {
+    switch (instr->opcode()) {
+      case HValue::kCheckMaps: {
+        ReduceCheckMaps(HCheckMaps::cast(instr));
+        break;
+      }
+      case HValue::kLoadNamedField: {
+        ReduceLoadNamedField(HLoadNamedField::cast(instr));
+        break;
+      }
+      case HValue::kStoreNamedField: {
+        ReduceStoreNamedField(HStoreNamedField::cast(instr));
+        break;
+      }
+      case HValue::kCompareMap: {
+        ReduceCompareMap(HCompareMap::cast(instr));
+        break;
+      }
+      case HValue::kCompareObjectEqAndBranch: {
+        ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch::cast(instr));
+        break;
+      }
+      case HValue::kIsStringAndBranch: {
+        ReduceIsStringAndBranch(HIsStringAndBranch::cast(instr));
+        break;
+      }
+      case HValue::kTransitionElementsKind: {
+        ReduceTransitionElementsKind(
+            HTransitionElementsKind::cast(instr));
+        break;
+      }
+      case HValue::kCheckHeapObject: {
+        ReduceCheckHeapObject(HCheckHeapObject::cast(instr));
+        break;
+      }
+      case HValue::kCheckInstanceType: {
+        ReduceCheckInstanceType(HCheckInstanceType::cast(instr));
+        break;
+      }
+      default: {
+        // If the instruction changes maps uncontrollably, drop everything.
+        if (instr->CheckChangesFlag(kOsrEntries)) {
+          Kill();
+          break;
+        }
+        if (instr->CheckChangesFlag(kElementsKind) ||
+            instr->CheckChangesFlag(kMaps)) {
+          KillUnstableEntries();
+        }
+      }
+      // Improvements possible:
+      // - eliminate redundant HCheckSmi instructions
+      // - track which values have been HCheckHeapObject'd
+    }
+
+    return this;
+  }
+
+  // Support for global analysis with HFlowEngine: Merge given state with
+  // the other incoming state.
+  static HCheckTable* Merge(HCheckTable* succ_state, HBasicBlock* succ_block,
+                            HCheckTable* pred_state, HBasicBlock* pred_block,
+                            Zone* zone) {
+    if (pred_state == NULL || pred_block->IsUnreachable()) {
+      return succ_state;
+    }
+    if (succ_state == NULL) {
+      return pred_state->Copy(succ_block, pred_block, zone);
+    } else {
+      return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+    }
+  }
+
+  // Support for global analysis with HFlowEngine: Given state merged with all
+  // the other incoming states, prepare it for use.
+  static HCheckTable* Finish(HCheckTable* state, HBasicBlock* block,
+                             Zone* zone) {
+    if (state == NULL) {
+      block->MarkUnreachable();
+    } else if (block->IsUnreachable()) {
+      state = NULL;
+    }
+    if (FLAG_trace_check_elimination) {
+      PrintF("Processing B%d, checkmaps-table:\n", block->block_id());
+      Print(state);
+    }
+    return state;
+  }
+
+ private:
+  // Copy state to successor block.
+  HCheckTable* Copy(HBasicBlock* succ, HBasicBlock* from_block, Zone* zone) {
+    HCheckTable* copy = new(zone) HCheckTable(phase_);
+    for (int i = 0; i < size_; i++) {
+      HCheckTableEntry* old_entry = &entries_[i];
+      DCHECK(old_entry->maps_->size() > 0);
+      HCheckTableEntry* new_entry = &copy->entries_[i];
+      new_entry->object_ = old_entry->object_;
+      new_entry->maps_ = old_entry->maps_;
+      new_entry->state_ = old_entry->state_;
+      // Keep the check if the existing check's block dominates the successor.
+      if (old_entry->check_ != NULL &&
+          old_entry->check_->block()->Dominates(succ)) {
+        new_entry->check_ = old_entry->check_;
+      } else {
+        // Leave it NULL till we meet a new check instruction for this object
+        // in the control flow.
+        new_entry->check_ = NULL;
+      }
+    }
+    copy->cursor_ = cursor_;
+    copy->size_ = size_;
+
+    // Create entries for succ block's phis.
+    if (!succ->IsLoopHeader() && succ->phis()->length() > 0) {
+      int pred_index = succ->PredecessorIndexOf(from_block);
+      for (int phi_index = 0;
+           phi_index < succ->phis()->length();
+           ++phi_index) {
+        HPhi* phi = succ->phis()->at(phi_index);
+        HValue* phi_operand = phi->OperandAt(pred_index);
+
+        HCheckTableEntry* pred_entry = copy->Find(phi_operand);
+        if (pred_entry != NULL) {
+          // Create an entry for a phi in the table.
+          copy->Insert(phi, NULL, pred_entry->maps_, pred_entry->state_);
+        }
+      }
+    }
+
+    // Branch-sensitive analysis for certain comparisons may add more facts
+    // to the state for the successor on the true branch.
+    bool learned = false;
+    if (succ->predecessors()->length() == 1) {
+      HControlInstruction* end = succ->predecessors()->at(0)->end();
+      bool is_true_branch = end->SuccessorAt(0) == succ;
+      if (end->IsCompareMap()) {
+        HCompareMap* cmp = HCompareMap::cast(end);
+        HValue* object = cmp->value()->ActualValue();
+        HCheckTableEntry* entry = copy->Find(object);
+        if (is_true_branch) {
+          HCheckTableEntry::State state = cmp->map_is_stable()
+              ? HCheckTableEntry::CHECKED_STABLE
+              : HCheckTableEntry::CHECKED;
+          // Learn on the true branch of if(CompareMap(x)).
+          if (entry == NULL) {
+            copy->Insert(object, cmp, cmp->map(), state);
+          } else {
+            entry->maps_ = new(zone) UniqueSet<Map>(cmp->map(), zone);
+            entry->check_ = cmp;
+            entry->state_ = state;
+          }
+        } else {
+          // Learn on the false branch of if(CompareMap(x)).
+          if (entry != NULL) {
+            EnsureChecked(entry, object, cmp);
+            UniqueSet<Map>* maps = entry->maps_->Copy(zone);
+            maps->Remove(cmp->map());
+            entry->maps_ = maps;
+            DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+          }
+        }
+        learned = true;
+      } else if (is_true_branch && end->IsCompareObjectEqAndBranch()) {
+        // Learn on the true branch of if(CmpObjectEq(x, y)).
+        HCompareObjectEqAndBranch* cmp =
+          HCompareObjectEqAndBranch::cast(end);
+        HValue* left = cmp->left()->ActualValue();
+        HValue* right = cmp->right()->ActualValue();
+        HCheckTableEntry* le = copy->Find(left);
+        HCheckTableEntry* re = copy->Find(right);
+        if (le == NULL) {
+          if (re != NULL) {
+            copy->Insert(left, NULL, re->maps_, re->state_);
+          }
+        } else if (re == NULL) {
+          copy->Insert(right, NULL, le->maps_, le->state_);
+        } else {
+          EnsureChecked(le, cmp->left(), cmp);
+          EnsureChecked(re, cmp->right(), cmp);
+          le->maps_ = re->maps_ = le->maps_->Intersect(re->maps_, zone);
+          le->state_ = re->state_ = HCheckTableEntry::StateMerge(
+              le->state_, re->state_);
+          DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_);
+          DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_);
+        }
+        learned = true;
+      } else if (end->IsIsStringAndBranch()) {
+        HIsStringAndBranch* cmp = HIsStringAndBranch::cast(end);
+        HValue* object = cmp->value()->ActualValue();
+        HCheckTableEntry* entry = copy->Find(object);
+        if (is_true_branch) {
+          // Learn on the true branch of if(IsString(x)).
+          if (entry == NULL) {
+            copy->Insert(object, NULL, string_maps(),
+                         HCheckTableEntry::CHECKED);
+          } else {
+            EnsureChecked(entry, object, cmp);
+            entry->maps_ = entry->maps_->Intersect(string_maps(), zone);
+            DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+          }
+        } else {
+          // Learn on the false branch of if(IsString(x)).
+          if (entry != NULL) {
+            EnsureChecked(entry, object, cmp);
+            entry->maps_ = entry->maps_->Subtract(string_maps(), zone);
+            DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+          }
+        }
+      }
+      // Learning on false branches requires storing negative facts.
+    }
+
+    if (FLAG_trace_check_elimination) {
+      PrintF("B%d checkmaps-table %s from B%d:\n",
+             succ->block_id(),
+             learned ? "learned" : "copied",
+             from_block->block_id());
+      Print(copy);
+    }
+
+    return copy;
+  }
+
+  // Merge this state with the other incoming state.
+  HCheckTable* Merge(HBasicBlock* succ, HCheckTable* that,
+                     HBasicBlock* pred_block, Zone* zone) {
+    if (that->size_ == 0) {
+      // If the other state is empty, simply reset.
+      size_ = 0;
+      cursor_ = 0;
+    } else {
+      int pred_index = succ->PredecessorIndexOf(pred_block);
+      bool compact = false;
+      for (int i = 0; i < size_; i++) {
+        HCheckTableEntry* this_entry = &entries_[i];
+        HCheckTableEntry* that_entry;
+        if (this_entry->object_->IsPhi() &&
+            this_entry->object_->block() == succ) {
+          HPhi* phi = HPhi::cast(this_entry->object_);
+          HValue* phi_operand = phi->OperandAt(pred_index);
+          that_entry = that->Find(phi_operand);
+
+        } else {
+          that_entry = that->Find(this_entry->object_);
+        }
+
+        if (that_entry == NULL ||
+            (that_entry->state_ == HCheckTableEntry::CHECKED &&
+             this_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) ||
+            (this_entry->state_ == HCheckTableEntry::CHECKED &&
+             that_entry->state_ == HCheckTableEntry::UNCHECKED_STABLE)) {
+          this_entry->object_ = NULL;
+          compact = true;
+        } else {
+          this_entry->maps_ =
+              this_entry->maps_->Union(that_entry->maps_, zone);
+          this_entry->state_ = HCheckTableEntry::StateMerge(
+              this_entry->state_, that_entry->state_);
+          if (this_entry->check_ != that_entry->check_) {
+            this_entry->check_ = NULL;
+          }
+          DCHECK(this_entry->maps_->size() > 0);
+        }
+      }
+      if (compact) Compact();
+    }
+
+    if (FLAG_trace_check_elimination) {
+      PrintF("B%d checkmaps-table merged with B%d table:\n",
+             succ->block_id(), pred_block->block_id());
+      Print(this);
+    }
+    return this;
+  }
+
+  void ReduceCheckMaps(HCheckMaps* instr) {
+    HValue* object = instr->value()->ActualValue();
+    HCheckTableEntry* entry = Find(object);
+    if (entry != NULL) {
+      // entry found;
+      HGraph* graph = instr->block()->graph();
+      if (entry->maps_->IsSubset(instr->maps())) {
+        // The first check is more strict; the second is redundant.
+        if (entry->check_ != NULL) {
+          DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+          TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+              instr->id(), instr->block()->block_id(), entry->check_->id()));
+          instr->DeleteAndReplaceWith(entry->check_);
+          INC_STAT(redundant_);
+        } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
+          DCHECK_NULL(entry->check_);
+          TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n",
+                 instr->id(), instr->block()->block_id()));
+          instr->set_maps(entry->maps_->Copy(graph->zone()));
+          instr->MarkAsStabilityCheck();
+          entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+        } else if (!instr->IsStabilityCheck()) {
+          TRACE(("Marking redundant CheckMaps #%d at B%d as dead\n",
+              instr->id(), instr->block()->block_id()));
+          // Mark check as dead but leave it in the graph as a checkpoint for
+          // subsequent checks.
+          instr->SetFlag(HValue::kIsDead);
+          entry->check_ = instr;
+          INC_STAT(removed_);
+        }
+        return;
+      }
+      MapSet intersection = instr->maps()->Intersect(
+          entry->maps_, graph->zone());
+      if (intersection->size() == 0) {
+        // Intersection is empty; probably megamorphic.
+        INC_STAT(empty_);
+        entry->object_ = NULL;
+        Compact();
+      } else {
+        // Update set of maps in the entry.
+        entry->maps_ = intersection;
+        // Update state of the entry.
+        if (instr->maps_are_stable() ||
+            entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
+          entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+        }
+        if (intersection->size() != instr->maps()->size()) {
+          // Narrow set of maps in the second check maps instruction.
+          if (entry->check_ != NULL &&
+              entry->check_->block() == instr->block() &&
+              entry->check_->IsCheckMaps()) {
+            // There is a check in the same block so replace it with a more
+            // strict check and eliminate the second check entirely.
+            HCheckMaps* check = HCheckMaps::cast(entry->check_);
+            DCHECK(!check->IsStabilityCheck());
+            TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
+                check->block()->block_id()));
+            // Update map set and ensure that the check is alive.
+            check->set_maps(intersection);
+            check->ClearFlag(HValue::kIsDead);
+            TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
+                instr->id(), instr->block()->block_id(), entry->check_->id()));
+            instr->DeleteAndReplaceWith(entry->check_);
+          } else {
+            TRACE(("CheckMaps #%d at B%d narrowed\n", instr->id(),
+                instr->block()->block_id()));
+            instr->set_maps(intersection);
+            entry->check_ = instr->IsStabilityCheck() ? NULL : instr;
+          }
+
+          if (FLAG_trace_check_elimination) {
+            Print(this);
+          }
+          INC_STAT(narrowed_);
+        }
+      }
+    } else {
+      // No entry; insert a new one.
+      HCheckTableEntry::State state = instr->maps_are_stable()
+          ? HCheckTableEntry::CHECKED_STABLE
+          : HCheckTableEntry::CHECKED;
+      HCheckMaps* check = instr->IsStabilityCheck() ? NULL : instr;
+      Insert(object, check, instr->maps(), state);
+    }
+  }
+
+  void ReduceCheckInstanceType(HCheckInstanceType* instr) {
+    HValue* value = instr->value()->ActualValue();
+    HCheckTableEntry* entry = Find(value);
+    if (entry == NULL) {
+      if (instr->check() == HCheckInstanceType::IS_STRING) {
+        Insert(value, NULL, string_maps(), HCheckTableEntry::CHECKED);
+      }
+      return;
+    }
+    UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(
+        entry->maps_->size(), zone());
+    for (int i = 0; i < entry->maps_->size(); ++i) {
+      InstanceType type;
+      Unique<Map> map = entry->maps_->at(i);
+      {
+        // This is safe, because maps don't move and their instance type does
+        // not change.
+        AllowHandleDereference allow_deref;
+        type = map.handle()->instance_type();
+      }
+      if (instr->is_interval_check()) {
+        InstanceType first_type, last_type;
+        instr->GetCheckInterval(&first_type, &last_type);
+        if (first_type <= type && type <= last_type) maps->Add(map, zone());
+      } else {
+        uint8_t mask, tag;
+        instr->GetCheckMaskAndTag(&mask, &tag);
+        if ((type & mask) == tag) maps->Add(map, zone());
+      }
+    }
+    if (maps->size() == entry->maps_->size()) {
+      TRACE(("Removing redundant CheckInstanceType #%d at B%d\n",
+              instr->id(), instr->block()->block_id()));
+      EnsureChecked(entry, value, instr);
+      instr->DeleteAndReplaceWith(value);
+      INC_STAT(removed_cit_);
+    } else if (maps->size() != 0) {
+      entry->maps_ = maps;
+      if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
+        entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+      }
+    }
+  }
+
+  void ReduceLoadNamedField(HLoadNamedField* instr) {
+    // Reduce a load of the map field when it is known to be a constant.
+    if (!instr->access().IsMap()) {
+      // Check if we introduce field maps here.
+      MapSet maps = instr->maps();
+      if (maps != NULL) {
+        DCHECK_NE(0, maps->size());
+        Insert(instr, NULL, maps, HCheckTableEntry::UNCHECKED_STABLE);
+      }
+      return;
+    }
+
+    HValue* object = instr->object()->ActualValue();
+    HCheckTableEntry* entry = Find(object);
+    if (entry == NULL || entry->maps_->size() != 1) return;  // Not a constant.
+
+    EnsureChecked(entry, object, instr);
+    Unique<Map> map = entry->maps_->at(0);
+    bool map_is_stable = (entry->state_ != HCheckTableEntry::CHECKED);
+    HConstant* constant = HConstant::CreateAndInsertBefore(
+        instr->block()->graph()->zone(), map, map_is_stable, instr);
+    instr->DeleteAndReplaceWith(constant);
+    INC_STAT(loads_);
+  }
+
+  void ReduceCheckHeapObject(HCheckHeapObject* instr) {
+    HValue* value = instr->value()->ActualValue();
+    if (Find(value) != NULL) {
+      // If the object has known maps, it's definitely a heap object.
+      instr->DeleteAndReplaceWith(value);
+      INC_STAT(removed_cho_);
+    }
+  }
+
+  void ReduceStoreNamedField(HStoreNamedField* instr) {
+    HValue* object = instr->object()->ActualValue();
+    if (instr->has_transition()) {
+      // This store transitions the object to a new map.
+      Kill(object);
+      HConstant* c_transition = HConstant::cast(instr->transition());
+      HCheckTableEntry::State state = c_transition->HasStableMapValue()
+          ? HCheckTableEntry::CHECKED_STABLE
+          : HCheckTableEntry::CHECKED;
+      Insert(object, NULL, c_transition->MapValue(), state);
+    } else if (instr->access().IsMap()) {
+      // This is a store directly to the map field of the object.
+      Kill(object);
+      if (!instr->value()->IsConstant()) return;
+      HConstant* c_value = HConstant::cast(instr->value());
+      HCheckTableEntry::State state = c_value->HasStableMapValue()
+          ? HCheckTableEntry::CHECKED_STABLE
+          : HCheckTableEntry::CHECKED;
+      Insert(object, NULL, c_value->MapValue(), state);
+    } else {
+      // If the instruction changes maps, it should be handled above.
+      CHECK(!instr->CheckChangesFlag(kMaps));
+    }
+  }
+
+  void ReduceCompareMap(HCompareMap* instr) {
+    HCheckTableEntry* entry = Find(instr->value()->ActualValue());
+    if (entry == NULL) return;
+
+    EnsureChecked(entry, instr->value(), instr);
+
+    int succ;
+    if (entry->maps_->Contains(instr->map())) {
+      if (entry->maps_->size() != 1) {
+        TRACE(("CompareMap #%d for #%d at B%d can't be eliminated: "
+               "ambiguous set of maps\n", instr->id(), instr->value()->id(),
+               instr->block()->block_id()));
+        return;
+      }
+      succ = 0;
+      INC_STAT(compares_true_);
+    } else {
+      succ = 1;
+      INC_STAT(compares_false_);
+    }
+
+    TRACE(("Marking redundant CompareMap #%d for #%d at B%d as %s\n",
+        instr->id(), instr->value()->id(), instr->block()->block_id(),
+        succ == 0 ? "true" : "false"));
+    instr->set_known_successor_index(succ);
+
+    int unreachable_succ = 1 - succ;
+    instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
+  }
+
+  void ReduceCompareObjectEqAndBranch(HCompareObjectEqAndBranch* instr) {
+    HValue* left = instr->left()->ActualValue();
+    HCheckTableEntry* le = Find(left);
+    if (le == NULL) return;
+    HValue* right = instr->right()->ActualValue();
+    HCheckTableEntry* re = Find(right);
+    if (re == NULL) return;
+
+    EnsureChecked(le, left, instr);
+    EnsureChecked(re, right, instr);
+
+    // TODO(bmeurer): Add a predicate here instead of computing the intersection
+    MapSet intersection = le->maps_->Intersect(re->maps_, zone());
+    if (intersection->size() > 0) return;
+
+    TRACE(("Marking redundant CompareObjectEqAndBranch #%d at B%d as false\n",
+        instr->id(), instr->block()->block_id()));
+    int succ = 1;
+    instr->set_known_successor_index(succ);
+
+    int unreachable_succ = 1 - succ;
+    instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
+  }
+
+  void ReduceIsStringAndBranch(HIsStringAndBranch* instr) {
+    HValue* value = instr->value()->ActualValue();
+    HCheckTableEntry* entry = Find(value);
+    if (entry == NULL) return;
+    EnsureChecked(entry, value, instr);
+    int succ;
+    if (entry->maps_->IsSubset(string_maps())) {
+      TRACE(("Marking redundant IsStringAndBranch #%d at B%d as true\n",
+             instr->id(), instr->block()->block_id()));
+      succ = 0;
+    } else {
+      MapSet intersection = entry->maps_->Intersect(string_maps(), zone());
+      if (intersection->size() > 0) return;
+      TRACE(("Marking redundant IsStringAndBranch #%d at B%d as false\n",
+            instr->id(), instr->block()->block_id()));
+      succ = 1;
+    }
+    instr->set_known_successor_index(succ);
+    int unreachable_succ = 1 - succ;
+    instr->block()->MarkSuccEdgeUnreachable(unreachable_succ);
+  }
+
+  void ReduceTransitionElementsKind(HTransitionElementsKind* instr) {
+    HValue* object = instr->object()->ActualValue();
+    HCheckTableEntry* entry = Find(object);
+    // Can only learn more about an object that already has a known set of maps.
+    if (entry == NULL) {
+      Kill(object);
+      return;
+    }
+    EnsureChecked(entry, object, instr);
+    if (entry->maps_->Contains(instr->original_map())) {
+      // If the object has the original map, it will be transitioned.
+      UniqueSet<Map>* maps = entry->maps_->Copy(zone());
+      maps->Remove(instr->original_map());
+      maps->Add(instr->transitioned_map(), zone());
+      HCheckTableEntry::State state =
+          (entry->state_ == HCheckTableEntry::CHECKED_STABLE &&
+           instr->map_is_stable())
+              ? HCheckTableEntry::CHECKED_STABLE
+              : HCheckTableEntry::CHECKED;
+      Kill(object);
+      Insert(object, NULL, maps, state);
+    } else {
+      // Object does not have the given map, thus the transition is redundant.
+      instr->DeleteAndReplaceWith(object);
+      INC_STAT(transitions_);
+    }
+  }
+
+  void EnsureChecked(HCheckTableEntry* entry,
+                     HValue* value,
+                     HInstruction* instr) {
+    if (entry->state_ != HCheckTableEntry::UNCHECKED_STABLE) return;
+    HGraph* graph = instr->block()->graph();
+    HCheckMaps* check = HCheckMaps::CreateAndInsertBefore(
+        graph->zone(), value, entry->maps_->Copy(graph->zone()), true, instr);
+    check->MarkAsStabilityCheck();
+    entry->state_ = HCheckTableEntry::CHECKED_STABLE;
+    entry->check_ = NULL;
+  }
+
+  // Kill everything in the table.
+  void Kill() {
+    size_ = 0;
+    cursor_ = 0;
+  }
+
+  // Kill all unstable entries in the table.
+  void KillUnstableEntries() {
+    bool compact = false;
+    for (int i = 0; i < size_; ++i) {
+      HCheckTableEntry* entry = &entries_[i];
+      DCHECK_NOT_NULL(entry->object_);
+      if (entry->state_ == HCheckTableEntry::CHECKED) {
+        entry->object_ = NULL;
+        compact = true;
+      } else {
+        // All checked stable entries become unchecked stable.
+        entry->state_ = HCheckTableEntry::UNCHECKED_STABLE;
+        entry->check_ = NULL;
+      }
+    }
+    if (compact) Compact();
+  }
+
+  // Kill everything in the table that may alias {object}.
+  void Kill(HValue* object) {
+    bool compact = false;
+    for (int i = 0; i < size_; i++) {
+      HCheckTableEntry* entry = &entries_[i];
+      DCHECK_NOT_NULL(entry->object_);
+      if (phase_->aliasing_->MayAlias(entry->object_, object)) {
+        entry->object_ = NULL;
+        compact = true;
+      }
+    }
+    if (compact) Compact();
+    DCHECK_NULL(Find(object));
+  }
+
+  void Compact() {
+    // First, compact the array in place.
+    int max = size_, dest = 0, old_cursor = cursor_;
+    for (int i = 0; i < max; i++) {
+      if (entries_[i].object_ != NULL) {
+        if (dest != i) entries_[dest] = entries_[i];
+        dest++;
+      } else {
+        if (i < old_cursor) cursor_--;
+        size_--;
+      }
+    }
+    DCHECK(size_ == dest);
+    DCHECK(cursor_ <= size_);
+
+    // Preserve the age of the entries by moving the older entries to the end.
+    if (cursor_ == size_) return;  // Cursor already points at end.
+    if (cursor_ != 0) {
+      // | L = oldest |   R = newest   |       |
+      //              ^ cursor         ^ size  ^ MAX
+      HCheckTableEntry tmp_entries[kMaxTrackedObjects];
+      int L = cursor_;
+      int R = size_ - cursor_;
+
+      MemMove(&tmp_entries[0], &entries_[0], L * sizeof(HCheckTableEntry));
+      MemMove(&entries_[0], &entries_[L], R * sizeof(HCheckTableEntry));
+      MemMove(&entries_[R], &tmp_entries[0], L * sizeof(HCheckTableEntry));
+    }
+
+    cursor_ = size_;  // Move cursor to end.
+  }
+
+  static void Print(HCheckTable* table) {
+    if (table == NULL) {
+      PrintF("  unreachable\n");
+      return;
+    }
+
+    for (int i = 0; i < table->size_; i++) {
+      HCheckTableEntry* entry = &table->entries_[i];
+      DCHECK(entry->object_ != NULL);
+      PrintF("  checkmaps-table @%d: %s #%d ", i,
+             entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
+      if (entry->check_ != NULL) {
+        PrintF("check #%d ", entry->check_->id());
+      }
+      MapSet list = entry->maps_;
+      PrintF("%d %s maps { ", list->size(),
+             HCheckTableEntry::State2String(entry->state_));
+      for (int j = 0; j < list->size(); j++) {
+        if (j > 0) PrintF(", ");
+        PrintF("%" V8PRIxPTR, list->at(j).Hashcode());
+      }
+      PrintF(" }\n");
+    }
+  }
+
+  HCheckTableEntry* Find(HValue* object) {
+    for (int i = size_ - 1; i >= 0; i--) {
+      // Search from most-recently-inserted to least-recently-inserted.
+      HCheckTableEntry* entry = &entries_[i];
+      DCHECK(entry->object_ != NULL);
+      if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry;
+    }
+    return NULL;
+  }
+
+  void Insert(HValue* object,
+              HInstruction* check,
+              Unique<Map> map,
+              HCheckTableEntry::State state) {
+    Insert(object, check, new(zone()) UniqueSet<Map>(map, zone()), state);
+  }
+
+  void Insert(HValue* object,
+              HInstruction* check,
+              MapSet maps,
+              HCheckTableEntry::State state) {
+    DCHECK(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL);
+    HCheckTableEntry* entry = &entries_[cursor_++];
+    entry->object_ = object;
+    entry->check_ = check;
+    entry->maps_ = maps;
+    entry->state_ = state;
+    // If the table becomes full, wrap around and overwrite older entries.
+    if (cursor_ == kMaxTrackedObjects) cursor_ = 0;
+    if (size_ < kMaxTrackedObjects) size_++;
+  }
+
+  Zone* zone() const { return phase_->zone(); }
+  MapSet string_maps() const { return phase_->string_maps(); }
+
+  friend class HCheckMapsEffects;
+  friend class HCheckEliminationPhase;
+
+  HCheckEliminationPhase* phase_;
+  HCheckTableEntry entries_[kMaxTrackedObjects];
+  int16_t cursor_;  // Must be <= kMaxTrackedObjects
+  int16_t size_;    // Must be <= kMaxTrackedObjects
+  STATIC_ASSERT(kMaxTrackedObjects < (1 << 15));
+};
+
+
+// Collects instructions that can cause effects that invalidate information
+// needed for check elimination.
+class HCheckMapsEffects : public ZoneObject {
+ public:
+  explicit HCheckMapsEffects(Zone* zone) : objects_(0, zone) { }
+
+  // Effects are _not_ disabled.
+  inline bool Disabled() const { return false; }
+
+  // Process a possibly side-effecting instruction.
+  void Process(HInstruction* instr, Zone* zone) {
+    switch (instr->opcode()) {
+      case HValue::kStoreNamedField: {
+        HStoreNamedField* store = HStoreNamedField::cast(instr);
+        if (store->access().IsMap() || store->has_transition()) {
+          objects_.Add(store->object(), zone);
+        }
+        break;
+      }
+      case HValue::kTransitionElementsKind: {
+        objects_.Add(HTransitionElementsKind::cast(instr)->object(), zone);
+        break;
+      }
+      default: {
+        flags_.Add(instr->ChangesFlags());
+        break;
+      }
+    }
+  }
+
+  // Apply these effects to the given check elimination table.
+  void Apply(HCheckTable* table) {
+    if (flags_.Contains(kOsrEntries)) {
+      // Uncontrollable map modifications; kill everything.
+      table->Kill();
+      return;
+    }
+
+    // Kill all unstable entries.
+    if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
+      table->KillUnstableEntries();
+    }
+
+    // Kill maps for each object contained in these effects.
+    for (int i = 0; i < objects_.length(); ++i) {
+      table->Kill(objects_[i]->ActualValue());
+    }
+  }
+
+  // Union these effects with the other effects.
+  void Union(HCheckMapsEffects* that, Zone* zone) {
+    flags_.Add(that->flags_);
+    for (int i = 0; i < that->objects_.length(); ++i) {
+      objects_.Add(that->objects_[i], zone);
+    }
+  }
+
+ private:
+  ZoneList<HValue*> objects_;
+  GVNFlagSet flags_;
+};
+
+
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HCheckEliminationPhase::Run() {
+  HFlowEngine<HCheckTable, HCheckMapsEffects> engine(graph(), zone());
+  HCheckTable* table = new(zone()) HCheckTable(this);
+
+  if (GLOBAL) {
+    // Perform a global analysis.
+    engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+  } else {
+    // Perform only local analysis.
+    for (int i = 0; i < graph()->blocks()->length(); i++) {
+      table->Kill();
+      engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
+    }
+  }
+
+  if (FLAG_trace_check_elimination) PrintStats();
+}
+
+
+// Are we eliminated yet?
+void HCheckEliminationPhase::PrintStats() {
+#if DEBUG
+  #define PRINT_STAT(x) if (x##_ > 0) PrintF(" %-16s = %2d\n", #x, x##_)
+#else
+  #define PRINT_STAT(x)
+#endif
+  PRINT_STAT(redundant);
+  PRINT_STAT(removed);
+  PRINT_STAT(removed_cho);
+  PRINT_STAT(removed_cit);
+  PRINT_STAT(narrowed);
+  PRINT_STAT(loads);
+  PRINT_STAT(empty);
+  PRINT_STAT(compares_true);
+  PRINT_STAT(compares_false);
+  PRINT_STAT(transitions);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-check-elimination.h b/src/crankshaft/hydrogen-check-elimination.h
new file mode 100644
index 0000000..d6339df
--- /dev/null
+++ b/src/crankshaft/hydrogen-check-elimination.h
@@ -0,0 +1,74 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Remove CheckMaps instructions through flow- and branch-sensitive analysis.
+class HCheckEliminationPhase : public HPhase {
+ public:
+  explicit HCheckEliminationPhase(HGraph* graph)
+      : HPhase("H_Check Elimination", graph), aliasing_(),
+        string_maps_(kStringMapsSize, zone()) {
+    // Compute the set of string maps.
+    #define ADD_STRING_MAP(type, size, name, Name)                  \
+      string_maps_.Add(Unique<Map>::CreateImmovable(                \
+              graph->isolate()->factory()->name##_map()), zone());
+    STRING_TYPE_LIST(ADD_STRING_MAP)
+    #undef ADD_STRING_MAP
+    DCHECK_EQ(kStringMapsSize, string_maps_.size());
+#ifdef DEBUG
+    redundant_ = 0;
+    removed_ = 0;
+    removed_cho_ = 0;
+    removed_cit_ = 0;
+    narrowed_ = 0;
+    loads_ = 0;
+    empty_ = 0;
+    compares_true_ = 0;
+    compares_false_ = 0;
+    transitions_ = 0;
+#endif
+  }
+
+  void Run();
+
+  friend class HCheckTable;
+
+ private:
+  const UniqueSet<Map>* string_maps() const { return &string_maps_; }
+
+  void PrintStats();
+
+  HAliasAnalyzer* aliasing_;
+  #define COUNT(type, size, name, Name) + 1
+  static const int kStringMapsSize = 0 STRING_TYPE_LIST(COUNT);
+  #undef COUNT
+  UniqueSet<Map> string_maps_;
+#ifdef DEBUG
+  int redundant_;
+  int removed_;
+  int removed_cho_;
+  int removed_cit_;
+  int narrowed_;
+  int loads_;
+  int empty_;
+  int compares_true_;
+  int compares_false_;
+  int transitions_;
+#endif
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_CHECK_ELIMINATION_H_
diff --git a/src/crankshaft/hydrogen-dce.cc b/src/crankshaft/hydrogen-dce.cc
new file mode 100644
index 0000000..3cb9cf4
--- /dev/null
+++ b/src/crankshaft/hydrogen-dce.cc
@@ -0,0 +1,105 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-dce.h"
+
+namespace v8 {
+namespace internal {
+
+void HDeadCodeEliminationPhase::MarkLive(
+    HValue* instr, ZoneList<HValue*>* worklist) {
+  if (instr->CheckFlag(HValue::kIsLive)) return;  // Already live.
+
+  if (FLAG_trace_dead_code_elimination) PrintLive(NULL, instr);
+
+  // Transitively mark all inputs of live instructions live.
+  worklist->Add(instr, zone());
+  while (!worklist->is_empty()) {
+    HValue* instr = worklist->RemoveLast();
+    instr->SetFlag(HValue::kIsLive);
+    for (int i = 0; i < instr->OperandCount(); ++i) {
+      HValue* input = instr->OperandAt(i);
+      if (!input->CheckFlag(HValue::kIsLive)) {
+        input->SetFlag(HValue::kIsLive);
+        worklist->Add(input, zone());
+        if (FLAG_trace_dead_code_elimination) PrintLive(instr, input);
+      }
+    }
+  }
+}
+
+
+void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
+  AllowHandleDereference allow_deref;
+  OFStream os(stdout);
+  os << "[MarkLive ";
+  if (ref != NULL) {
+    os << *ref;
+  } else {
+    os << "root";
+  }
+  os << " -> " << *instr << "]" << std::endl;
+}
+
+
+void HDeadCodeEliminationPhase::MarkLiveInstructions() {
+  ZoneList<HValue*> worklist(10, zone());
+
+  // Transitively mark all live instructions, starting from roots.
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->CannotBeEliminated()) MarkLive(instr, &worklist);
+    }
+    for (int j = 0; j < block->phis()->length(); j++) {
+      HPhi* phi = block->phis()->at(j);
+      if (phi->CannotBeEliminated()) MarkLive(phi, &worklist);
+    }
+  }
+
+  DCHECK(worklist.is_empty());  // Should have processed everything.
+}
+
+
+void HDeadCodeEliminationPhase::RemoveDeadInstructions() {
+  ZoneList<HPhi*> worklist(graph()->blocks()->length(), zone());
+
+  // Remove any instruction not marked kIsLive.
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (!instr->CheckFlag(HValue::kIsLive)) {
+        // Instruction has not been marked live, so remove it.
+        instr->DeleteAndReplaceWith(NULL);
+      } else {
+        // Clear the liveness flag to leave the graph clean for the next DCE.
+        instr->ClearFlag(HValue::kIsLive);
+      }
+    }
+    // Collect phis that are dead and remove them in the next pass.
+    for (int j = 0; j < block->phis()->length(); j++) {
+      HPhi* phi = block->phis()->at(j);
+      if (!phi->CheckFlag(HValue::kIsLive)) {
+        worklist.Add(phi, zone());
+      } else {
+        phi->ClearFlag(HValue::kIsLive);
+      }
+    }
+  }
+
+  // Process phis separately to avoid simultaneously mutating the phi list.
+  while (!worklist.is_empty()) {
+    HPhi* phi = worklist.RemoveLast();
+    HBasicBlock* block = phi->block();
+    phi->DeleteAndReplaceWith(NULL);
+    if (phi->HasMergedIndex()) {
+      block->RecordDeletedPhi(phi->merged_index());
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-dce.h b/src/crankshaft/hydrogen-dce.h
new file mode 100644
index 0000000..f620a3c
--- /dev/null
+++ b/src/crankshaft/hydrogen-dce.h
@@ -0,0 +1,35 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_DCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_DCE_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HDeadCodeEliminationPhase : public HPhase {
+ public:
+  explicit HDeadCodeEliminationPhase(HGraph* graph)
+      : HPhase("H_Dead code elimination", graph) { }
+
+  void Run() {
+    MarkLiveInstructions();
+    RemoveDeadInstructions();
+  }
+
+ private:
+  void MarkLive(HValue* instr, ZoneList<HValue*>* worklist);
+  void PrintLive(HValue* ref, HValue* instr);
+  void MarkLiveInstructions();
+  void RemoveDeadInstructions();
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_DCE_H_
diff --git a/src/crankshaft/hydrogen-dehoist.cc b/src/crankshaft/hydrogen-dehoist.cc
new file mode 100644
index 0000000..34de94a
--- /dev/null
+++ b/src/crankshaft/hydrogen-dehoist.cc
@@ -0,0 +1,72 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-dehoist.h"
+
+#include "src/base/safe_math.h"
+
+namespace v8 {
+namespace internal {
+
+static void DehoistArrayIndex(ArrayInstructionInterface* array_operation) {
+  HValue* index = array_operation->GetKey()->ActualValue();
+  if (!index->representation().IsSmiOrInteger32()) return;
+  if (!index->IsAdd() && !index->IsSub()) return;
+
+  HConstant* constant;
+  HValue* subexpression;
+  HBinaryOperation* binary_operation = HBinaryOperation::cast(index);
+  if (binary_operation->left()->IsConstant() && index->IsAdd()) {
+    subexpression = binary_operation->right();
+    constant = HConstant::cast(binary_operation->left());
+  } else if (binary_operation->right()->IsConstant()) {
+    subexpression = binary_operation->left();
+    constant = HConstant::cast(binary_operation->right());
+  } else {
+    return;
+  }
+
+  if (!constant->HasInteger32Value()) return;
+  v8::base::internal::CheckedNumeric<int32_t> checked_value =
+      constant->Integer32Value();
+  int32_t sign = binary_operation->IsSub() ? -1 : 1;
+  checked_value = checked_value * sign;
+
+  // Multiply value by elements size, bailing out on overflow.
+  int32_t elements_kind_size =
+      1 << ElementsKindToShiftSize(array_operation->elements_kind());
+  checked_value = checked_value * elements_kind_size;
+  if (!checked_value.IsValid()) return;
+  int32_t value = checked_value.ValueOrDie();
+  if (value < 0) return;
+
+  // Ensure that the array operation can add value to existing base offset
+  // without overflowing.
+  if (!array_operation->TryIncreaseBaseOffset(value)) return;
+
+  array_operation->SetKey(subexpression);
+  if (binary_operation->HasNoUses()) {
+    binary_operation->DeleteAndReplaceWith(NULL);
+  }
+
+  array_operation->SetDehoisted(true);
+}
+
+
+void HDehoistIndexComputationsPhase::Run() {
+  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->IsLoadKeyed()) {
+        DehoistArrayIndex(HLoadKeyed::cast(instr));
+      } else if (instr->IsStoreKeyed()) {
+        DehoistArrayIndex(HStoreKeyed::cast(instr));
+      }
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-dehoist.h b/src/crankshaft/hydrogen-dehoist.h
new file mode 100644
index 0000000..d68f62c
--- /dev/null
+++ b/src/crankshaft/hydrogen-dehoist.h
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
+#define V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HDehoistIndexComputationsPhase : public HPhase {
+ public:
+  explicit HDehoistIndexComputationsPhase(HGraph* graph)
+      : HPhase("H_Dehoist index computations", graph) { }
+
+  void Run();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HDehoistIndexComputationsPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_DEHOIST_H_
diff --git a/src/crankshaft/hydrogen-environment-liveness.cc b/src/crankshaft/hydrogen-environment-liveness.cc
new file mode 100644
index 0000000..ae0bd08
--- /dev/null
+++ b/src/crankshaft/hydrogen-environment-liveness.cc
@@ -0,0 +1,232 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/crankshaft/hydrogen-environment-liveness.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+HEnvironmentLivenessAnalysisPhase::HEnvironmentLivenessAnalysisPhase(
+    HGraph* graph)
+    : HPhase("H_Environment liveness analysis", graph),
+      block_count_(graph->blocks()->length()),
+      maximum_environment_size_(graph->maximum_environment_size()),
+      live_at_block_start_(block_count_, zone()),
+      first_simulate_(block_count_, zone()),
+      first_simulate_invalid_for_index_(block_count_, zone()),
+      markers_(maximum_environment_size_, zone()),
+      collect_markers_(true),
+      last_simulate_(NULL),
+      went_live_since_last_simulate_(maximum_environment_size_, zone()) {
+  DCHECK(maximum_environment_size_ > 0);
+  for (int i = 0; i < block_count_; ++i) {
+    live_at_block_start_.Add(
+        new(zone()) BitVector(maximum_environment_size_, zone()), zone());
+    first_simulate_.Add(NULL, zone());
+    first_simulate_invalid_for_index_.Add(
+        new(zone()) BitVector(maximum_environment_size_, zone()), zone());
+  }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlot(
+    int index, HSimulate* simulate) {
+  int operand_index = simulate->ToOperandIndex(index);
+  if (operand_index == -1) {
+    simulate->AddAssignedValue(index, graph()->GetConstantUndefined());
+  } else {
+    simulate->SetOperandAt(operand_index, graph()->GetConstantUndefined());
+  }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsInSuccessors(
+    HBasicBlock* block, BitVector* live) {
+  // When a value is live in successor A but dead in B, we must
+  // explicitly zap it in B.
+  for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+    HBasicBlock* successor = it.Current();
+    int successor_id = successor->block_id();
+    BitVector* live_in_successor = live_at_block_start_[successor_id];
+    if (live_in_successor->Equals(*live)) continue;
+    for (int i = 0; i < live->length(); ++i) {
+      if (!live->Contains(i)) continue;
+      if (live_in_successor->Contains(i)) continue;
+      if (first_simulate_invalid_for_index_.at(successor_id)->Contains(i)) {
+        continue;
+      }
+      HSimulate* simulate = first_simulate_.at(successor_id);
+      if (simulate == NULL) continue;
+      DCHECK(VerifyClosures(simulate->closure(),
+          block->last_environment()->closure()));
+      ZapEnvironmentSlot(i, simulate);
+    }
+  }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::ZapEnvironmentSlotsForInstruction(
+    HEnvironmentMarker* marker) {
+  if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
+  HSimulate* simulate = marker->next_simulate();
+  if (simulate != NULL) {
+    DCHECK(VerifyClosures(simulate->closure(), marker->closure()));
+    ZapEnvironmentSlot(marker->index(), simulate);
+  }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtBlockEnd(
+    HBasicBlock* block,
+    BitVector* live) {
+  // Liveness at the end of each block: union of liveness in successors.
+  live->Clear();
+  for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+    live->Union(*live_at_block_start_[it.Current()->block_id()]);
+  }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::UpdateLivenessAtInstruction(
+    HInstruction* instr,
+    BitVector* live) {
+  switch (instr->opcode()) {
+    case HValue::kEnvironmentMarker: {
+      HEnvironmentMarker* marker = HEnvironmentMarker::cast(instr);
+      int index = marker->index();
+      if (!live->Contains(index)) {
+        marker->SetFlag(HValue::kEndsLiveRange);
+      } else {
+        marker->ClearFlag(HValue::kEndsLiveRange);
+      }
+      if (!went_live_since_last_simulate_.Contains(index)) {
+        marker->set_next_simulate(last_simulate_);
+      }
+      if (marker->kind() == HEnvironmentMarker::LOOKUP) {
+        live->Add(index);
+      } else {
+        DCHECK(marker->kind() == HEnvironmentMarker::BIND);
+        live->Remove(index);
+        went_live_since_last_simulate_.Add(index);
+      }
+      if (collect_markers_) {
+        // Populate |markers_| list during the first pass.
+        markers_.Add(marker, zone());
+      }
+      break;
+    }
+    case HValue::kLeaveInlined:
+      // No environment values are live at the end of an inlined section.
+      live->Clear();
+      last_simulate_ = NULL;
+
+      // The following DCHECKs guard the assumption used in case
+      // kEnterInlined below:
+      DCHECK(instr->next()->IsSimulate());
+      DCHECK(instr->next()->next()->IsGoto());
+
+      break;
+    case HValue::kEnterInlined: {
+      // Those environment values are live that are live at any return
+      // target block. Here we make use of the fact that the end of an
+      // inline sequence always looks like this: HLeaveInlined, HSimulate,
+      // HGoto (to return_target block), with no environment lookups in
+      // between (see DCHECKs above).
+      HEnterInlined* enter = HEnterInlined::cast(instr);
+      live->Clear();
+      for (int i = 0; i < enter->return_targets()->length(); ++i) {
+        int return_id = enter->return_targets()->at(i)->block_id();
+        live->Union(*live_at_block_start_[return_id]);
+      }
+      last_simulate_ = NULL;
+      break;
+    }
+    case HValue::kSimulate:
+      last_simulate_ = HSimulate::cast(instr);
+      went_live_since_last_simulate_.Clear();
+      break;
+    default:
+      break;
+  }
+}
+
+
+void HEnvironmentLivenessAnalysisPhase::Run() {
+  DCHECK(maximum_environment_size_ > 0);
+
+  // Main iteration. Compute liveness of environment slots, and store it
+  // for each block until it doesn't change any more. For efficiency, visit
+  // blocks in reverse order and walk backwards through each block. We
+  // need several iterations to propagate liveness through nested loops.
+  BitVector live(maximum_environment_size_, zone());
+  BitVector worklist(block_count_, zone());
+  for (int i = 0; i < block_count_; ++i) {
+    worklist.Add(i);
+  }
+  while (!worklist.IsEmpty()) {
+    for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
+      if (!worklist.Contains(block_id)) {
+        continue;
+      }
+      worklist.Remove(block_id);
+      last_simulate_ = NULL;
+
+      HBasicBlock* block = graph()->blocks()->at(block_id);
+      UpdateLivenessAtBlockEnd(block, &live);
+
+      for (HInstruction* instr = block->end(); instr != NULL;
+           instr = instr->previous()) {
+        UpdateLivenessAtInstruction(instr, &live);
+      }
+
+      // Reached the start of the block, do necessary bookkeeping:
+      // store computed information for this block and add predecessors
+      // to the work list as necessary.
+      first_simulate_.Set(block_id, last_simulate_);
+      first_simulate_invalid_for_index_[block_id]->CopyFrom(
+          went_live_since_last_simulate_);
+      if (live_at_block_start_[block_id]->UnionIsChanged(live)) {
+        for (int i = 0; i < block->predecessors()->length(); ++i) {
+          worklist.Add(block->predecessors()->at(i)->block_id());
+        }
+        if (block->IsInlineReturnTarget()) {
+          worklist.Add(block->inlined_entry_block()->block_id());
+        }
+      }
+    }
+    // Only collect bind/lookup instructions during the first pass.
+    collect_markers_ = false;
+  }
+
+  // Analysis finished. Zap dead environment slots.
+  for (int i = 0; i < markers_.length(); ++i) {
+    ZapEnvironmentSlotsForInstruction(markers_[i]);
+  }
+  for (int block_id = block_count_ - 1; block_id >= 0; --block_id) {
+    HBasicBlock* block = graph()->blocks()->at(block_id);
+    UpdateLivenessAtBlockEnd(block, &live);
+    ZapEnvironmentSlotsInSuccessors(block, &live);
+  }
+
+  // Finally, remove the HEnvironment{Bind,Lookup} markers.
+  for (int i = 0; i < markers_.length(); ++i) {
+    markers_[i]->DeleteAndReplaceWith(NULL);
+  }
+}
+
+
+#ifdef DEBUG
+bool HEnvironmentLivenessAnalysisPhase::VerifyClosures(
+    Handle<JSFunction> a, Handle<JSFunction> b) {
+  Heap::RelocationLock for_heap_access(isolate()->heap());
+  AllowHandleDereference for_verification;
+  return a.is_identical_to(b);
+}
+#endif
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-environment-liveness.h b/src/crankshaft/hydrogen-environment-liveness.h
new file mode 100644
index 0000000..d9e156b
--- /dev/null
+++ b/src/crankshaft/hydrogen-environment-liveness.h
@@ -0,0 +1,68 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Trims live ranges of environment slots by doing explicit liveness analysis.
+// Values in the environment are kept alive by every subsequent LInstruction
+// that is assigned an LEnvironment, which creates register pressure and
+// unnecessary spill slot moves. Therefore it is beneficial to trim the
+// live ranges of environment slots by zapping them with a constant after
+// the last lookup that refers to them.
+// Slots are identified by their index and only affected if whitelisted in
+// HOptimizedGraphBuilder::IsEligibleForEnvironmentLivenessAnalysis().
+class HEnvironmentLivenessAnalysisPhase : public HPhase {
+ public:
+  explicit HEnvironmentLivenessAnalysisPhase(HGraph* graph);
+
+  void Run();
+
+ private:
+  void ZapEnvironmentSlot(int index, HSimulate* simulate);
+  void ZapEnvironmentSlotsInSuccessors(HBasicBlock* block, BitVector* live);
+  void ZapEnvironmentSlotsForInstruction(HEnvironmentMarker* marker);
+  void UpdateLivenessAtBlockEnd(HBasicBlock* block, BitVector* live);
+  void UpdateLivenessAtInstruction(HInstruction* instr, BitVector* live);
+#ifdef DEBUG
+  bool VerifyClosures(Handle<JSFunction> a, Handle<JSFunction> b);
+#endif
+
+  int block_count_;
+
+  // Largest number of local variables in any environment in the graph
+  // (including inlined environments).
+  int maximum_environment_size_;
+
+  // Per-block data. All these lists are indexed by block_id.
+  ZoneList<BitVector*> live_at_block_start_;
+  ZoneList<HSimulate*> first_simulate_;
+  ZoneList<BitVector*> first_simulate_invalid_for_index_;
+
+  // List of all HEnvironmentMarker instructions for quick iteration/deletion.
+  // It is populated during the first pass over the graph, controlled by
+  // |collect_markers_|.
+  ZoneList<HEnvironmentMarker*> markers_;
+  bool collect_markers_;
+
+  // Keeps track of the last simulate seen, as well as the environment slots
+  // for which a new live range has started since (so they must not be zapped
+  // in that simulate when the end of another live range of theirs is found).
+  HSimulate* last_simulate_;
+  BitVector went_live_since_last_simulate_;
+
+  DISALLOW_COPY_AND_ASSIGN(HEnvironmentLivenessAnalysisPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_ENVIRONMENT_LIVENESS_H_
diff --git a/src/crankshaft/hydrogen-escape-analysis.cc b/src/crankshaft/hydrogen-escape-analysis.cc
new file mode 100644
index 0000000..ab3bff2
--- /dev/null
+++ b/src/crankshaft/hydrogen-escape-analysis.cc
@@ -0,0 +1,332 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-escape-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool HEscapeAnalysisPhase::HasNoEscapingUses(HValue* value, int size) {
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (use->HasEscapingOperandAt(it.index())) {
+      if (FLAG_trace_escape_analysis) {
+        PrintF("#%d (%s) escapes through #%d (%s) @%d\n", value->id(),
+               value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+      }
+      return false;
+    }
+    if (use->HasOutOfBoundsAccess(size)) {
+      if (FLAG_trace_escape_analysis) {
+        PrintF("#%d (%s) out of bounds at #%d (%s) @%d\n", value->id(),
+               value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+      }
+      return false;
+    }
+    int redefined_index = use->RedefinedOperandIndex();
+    if (redefined_index == it.index() && !HasNoEscapingUses(use, size)) {
+      if (FLAG_trace_escape_analysis) {
+        PrintF("#%d (%s) escapes redefinition #%d (%s) @%d\n", value->id(),
+               value->Mnemonic(), use->id(), use->Mnemonic(), it.index());
+      }
+      return false;
+    }
+  }
+  return true;
+}
+
+
+void HEscapeAnalysisPhase::CollectCapturedValues() {
+  int block_count = graph()->blocks()->length();
+  for (int i = 0; i < block_count; ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (!instr->IsAllocate()) continue;
+      HAllocate* allocate = HAllocate::cast(instr);
+      if (!allocate->size()->IsInteger32Constant()) continue;
+      int size_in_bytes = allocate->size()->GetInteger32Constant();
+      if (HasNoEscapingUses(instr, size_in_bytes)) {
+        if (FLAG_trace_escape_analysis) {
+          PrintF("#%d (%s) is being captured\n", instr->id(),
+                 instr->Mnemonic());
+        }
+        captured_.Add(instr, zone());
+      }
+    }
+  }
+}
+
+
+HCapturedObject* HEscapeAnalysisPhase::NewState(HInstruction* previous) {
+  Zone* zone = graph()->zone();
+  HCapturedObject* state =
+      new(zone) HCapturedObject(number_of_values_, number_of_objects_, zone);
+  state->InsertAfter(previous);
+  return state;
+}
+
+
+// Create a new state for replacing HAllocate instructions.
+HCapturedObject* HEscapeAnalysisPhase::NewStateForAllocation(
+    HInstruction* previous) {
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HCapturedObject* state = NewState(previous);
+  for (int index = 0; index < number_of_values_; index++) {
+    state->SetOperandAt(index, undefined);
+  }
+  return state;
+}
+
+
+// Create a new state full of phis for loop header entries.
+HCapturedObject* HEscapeAnalysisPhase::NewStateForLoopHeader(
+    HInstruction* previous,
+    HCapturedObject* old_state) {
+  HBasicBlock* block = previous->block();
+  HCapturedObject* state = NewState(previous);
+  for (int index = 0; index < number_of_values_; index++) {
+    HValue* operand = old_state->OperandAt(index);
+    HPhi* phi = NewPhiAndInsert(block, operand, index);
+    state->SetOperandAt(index, phi);
+  }
+  return state;
+}
+
+
+// Create a new state by copying an existing one.
+HCapturedObject* HEscapeAnalysisPhase::NewStateCopy(
+    HInstruction* previous,
+    HCapturedObject* old_state) {
+  HCapturedObject* state = NewState(previous);
+  for (int index = 0; index < number_of_values_; index++) {
+    HValue* operand = old_state->OperandAt(index);
+    state->SetOperandAt(index, operand);
+  }
+  return state;
+}
+
+
+// Insert a newly created phi into the given block and fill all incoming
+// edges with the given value.
+HPhi* HEscapeAnalysisPhase::NewPhiAndInsert(HBasicBlock* block,
+                                            HValue* incoming_value,
+                                            int index) {
+  Zone* zone = graph()->zone();
+  HPhi* phi = new(zone) HPhi(HPhi::kInvalidMergedIndex, zone);
+  for (int i = 0; i < block->predecessors()->length(); i++) {
+    phi->AddInput(incoming_value);
+  }
+  block->AddPhi(phi);
+  return phi;
+}
+
+
+// Insert a newly created value check as a replacement for map checks.
+HValue* HEscapeAnalysisPhase::NewMapCheckAndInsert(HCapturedObject* state,
+                                                   HCheckMaps* mapcheck) {
+  Zone* zone = graph()->zone();
+  HValue* value = state->map_value();
+  // TODO(mstarzinger): This will narrow a map check against a set of maps
+  // down to the first element in the set. Revisit and fix this.
+  HCheckValue* check = HCheckValue::New(graph()->isolate(), zone, NULL, value,
+                                        mapcheck->maps()->at(0), false);
+  check->InsertBefore(mapcheck);
+  return check;
+}
+
+
+// Replace a field load with a given value, forcing Smi representation if
+// necessary.
+HValue* HEscapeAnalysisPhase::NewLoadReplacement(
+    HLoadNamedField* load, HValue* load_value) {
+  isolate()->counters()->crankshaft_escape_loads_replaced()->Increment();
+  HValue* replacement = load_value;
+  Representation representation = load->representation();
+  if (representation.IsSmiOrInteger32() || representation.IsDouble()) {
+    Zone* zone = graph()->zone();
+    HInstruction* new_instr = HForceRepresentation::New(
+        graph()->isolate(), zone, NULL, load_value, representation);
+    new_instr->InsertAfter(load);
+    replacement = new_instr;
+  }
+  return replacement;
+}
+
+
+// Performs a forward data-flow analysis of all loads and stores on the
+// given captured allocation. This uses a reverse post-order iteration
+// over affected basic blocks. All non-escaping instructions are handled
+// and replaced during the analysis.
+void HEscapeAnalysisPhase::AnalyzeDataFlow(HInstruction* allocate) {
+  HBasicBlock* allocate_block = allocate->block();
+  block_states_.AddBlock(NULL, graph()->blocks()->length(), zone());
+
+  // Iterate all blocks starting with the allocation block, since the
+  // allocation cannot dominate blocks that come before.
+  int start = allocate_block->block_id();
+  for (int i = start; i < graph()->blocks()->length(); i++) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    HCapturedObject* state = StateAt(block);
+
+    // Skip blocks that are not dominated by the captured allocation.
+    if (!allocate_block->Dominates(block) && allocate_block != block) continue;
+    if (FLAG_trace_escape_analysis) {
+      PrintF("Analyzing data-flow in B%d\n", block->block_id());
+    }
+
+    // Go through all instructions of the current block.
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      switch (instr->opcode()) {
+        case HValue::kAllocate: {
+          if (instr != allocate) continue;
+          state = NewStateForAllocation(allocate);
+          break;
+        }
+        case HValue::kLoadNamedField: {
+          HLoadNamedField* load = HLoadNamedField::cast(instr);
+          int index = load->access().offset() / kPointerSize;
+          if (load->object() != allocate) continue;
+          DCHECK(load->access().IsInobject());
+          HValue* replacement =
+            NewLoadReplacement(load, state->OperandAt(index));
+          load->DeleteAndReplaceWith(replacement);
+          if (FLAG_trace_escape_analysis) {
+            PrintF("Replacing load #%d with #%d (%s)\n", load->id(),
+                   replacement->id(), replacement->Mnemonic());
+          }
+          break;
+        }
+        case HValue::kStoreNamedField: {
+          HStoreNamedField* store = HStoreNamedField::cast(instr);
+          int index = store->access().offset() / kPointerSize;
+          if (store->object() != allocate) continue;
+          DCHECK(store->access().IsInobject());
+          state = NewStateCopy(store->previous(), state);
+          state->SetOperandAt(index, store->value());
+          if (store->has_transition()) {
+            state->SetOperandAt(0, store->transition());
+          }
+          if (store->HasObservableSideEffects()) {
+            state->ReuseSideEffectsFromStore(store);
+          }
+          store->DeleteAndReplaceWith(store->ActualValue());
+          if (FLAG_trace_escape_analysis) {
+            PrintF("Replacing store #%d%s\n", instr->id(),
+                   store->has_transition() ? " (with transition)" : "");
+          }
+          break;
+        }
+        case HValue::kArgumentsObject:
+        case HValue::kCapturedObject:
+        case HValue::kSimulate: {
+          for (int i = 0; i < instr->OperandCount(); i++) {
+            if (instr->OperandAt(i) != allocate) continue;
+            instr->SetOperandAt(i, state);
+          }
+          break;
+        }
+        case HValue::kCheckHeapObject: {
+          HCheckHeapObject* check = HCheckHeapObject::cast(instr);
+          if (check->value() != allocate) continue;
+          check->DeleteAndReplaceWith(check->ActualValue());
+          break;
+        }
+        case HValue::kCheckMaps: {
+          HCheckMaps* mapcheck = HCheckMaps::cast(instr);
+          if (mapcheck->value() != allocate) continue;
+          NewMapCheckAndInsert(state, mapcheck);
+          mapcheck->DeleteAndReplaceWith(mapcheck->ActualValue());
+          break;
+        }
+        default:
+          // Nothing to see here, move along ...
+          break;
+      }
+    }
+
+    // Propagate the block state forward to all successor blocks.
+    for (int i = 0; i < block->end()->SuccessorCount(); i++) {
+      HBasicBlock* succ = block->end()->SuccessorAt(i);
+      if (!allocate_block->Dominates(succ)) continue;
+      if (succ->predecessors()->length() == 1) {
+        // Case 1: This is the only predecessor, just reuse state.
+        SetStateAt(succ, state);
+      } else if (StateAt(succ) == NULL && succ->IsLoopHeader()) {
+        // Case 2: This is a state that enters a loop header, be
+        // pessimistic about loop headers, add phis for all values.
+        SetStateAt(succ, NewStateForLoopHeader(succ->first(), state));
+      } else if (StateAt(succ) == NULL) {
+        // Case 3: This is the first state propagated forward to the
+        // successor, leave a copy of the current state.
+        SetStateAt(succ, NewStateCopy(succ->first(), state));
+      } else {
+        // Case 4: This is a state that needs merging with previously
+        // propagated states, potentially introducing new phis lazily or
+        // adding values to existing phis.
+        HCapturedObject* succ_state = StateAt(succ);
+        for (int index = 0; index < number_of_values_; index++) {
+          HValue* operand = state->OperandAt(index);
+          HValue* succ_operand = succ_state->OperandAt(index);
+          if (succ_operand->IsPhi() && succ_operand->block() == succ) {
+            // Phi already exists, add operand.
+            HPhi* phi = HPhi::cast(succ_operand);
+            phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
+          } else if (succ_operand != operand) {
+            // Phi does not exist, introduce one.
+            HPhi* phi = NewPhiAndInsert(succ, succ_operand, index);
+            phi->SetOperandAt(succ->PredecessorIndexOf(block), operand);
+            succ_state->SetOperandAt(index, phi);
+          }
+        }
+      }
+    }
+  }
+
+  // All uses have been handled.
+  DCHECK(allocate->HasNoUses());
+  allocate->DeleteAndReplaceWith(NULL);
+}
+
+
+void HEscapeAnalysisPhase::PerformScalarReplacement() {
+  for (int i = 0; i < captured_.length(); i++) {
+    HAllocate* allocate = HAllocate::cast(captured_.at(i));
+
+    // Compute number of scalar values and start with clean slate.
+    int size_in_bytes = allocate->size()->GetInteger32Constant();
+    number_of_values_ = size_in_bytes / kPointerSize;
+    number_of_objects_++;
+    block_states_.Rewind(0);
+
+    // Perform actual analysis step.
+    AnalyzeDataFlow(allocate);
+
+    cumulative_values_ += number_of_values_;
+    DCHECK(allocate->HasNoUses());
+    DCHECK(!allocate->IsLinked());
+  }
+}
+
+
+void HEscapeAnalysisPhase::Run() {
+  // TODO(mstarzinger): We disable escape analysis with OSR for now, because
+  // spill slots might be uninitialized. Needs investigation.
+  if (graph()->has_osr()) return;
+  int max_fixpoint_iteration_count = FLAG_escape_analysis_iterations;
+  for (int i = 0; i < max_fixpoint_iteration_count; i++) {
+    CollectCapturedValues();
+    if (captured_.is_empty()) break;
+    isolate()->counters()->crankshaft_escape_allocs_replaced()->Increment(
+        captured_.length());
+    PerformScalarReplacement();
+    captured_.Rewind(0);
+  }
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-escape-analysis.h b/src/crankshaft/hydrogen-escape-analysis.h
new file mode 100644
index 0000000..7dac6de
--- /dev/null
+++ b/src/crankshaft/hydrogen-escape-analysis.h
@@ -0,0 +1,71 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
+
+#include "src/allocation.h"
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HEscapeAnalysisPhase : public HPhase {
+ public:
+  explicit HEscapeAnalysisPhase(HGraph* graph)
+      : HPhase("H_Escape analysis", graph),
+        captured_(0, zone()),
+        number_of_objects_(0),
+        number_of_values_(0),
+        cumulative_values_(0),
+        block_states_(graph->blocks()->length(), zone()) { }
+
+  void Run();
+
+ private:
+  void CollectCapturedValues();
+  bool HasNoEscapingUses(HValue* value, int size);
+  void PerformScalarReplacement();
+  void AnalyzeDataFlow(HInstruction* instr);
+
+  HCapturedObject* NewState(HInstruction* prev);
+  HCapturedObject* NewStateForAllocation(HInstruction* prev);
+  HCapturedObject* NewStateForLoopHeader(HInstruction* prev, HCapturedObject*);
+  HCapturedObject* NewStateCopy(HInstruction* prev, HCapturedObject* state);
+
+  HPhi* NewPhiAndInsert(HBasicBlock* block, HValue* incoming_value, int index);
+
+  HValue* NewMapCheckAndInsert(HCapturedObject* state, HCheckMaps* mapcheck);
+
+  HValue* NewLoadReplacement(HLoadNamedField* load, HValue* load_value);
+
+  HCapturedObject* StateAt(HBasicBlock* block) {
+    return block_states_.at(block->block_id());
+  }
+
+  void SetStateAt(HBasicBlock* block, HCapturedObject* state) {
+    block_states_.Set(block->block_id(), state);
+  }
+
+  // List of allocations captured during collection phase.
+  ZoneList<HInstruction*> captured_;
+
+  // Number of captured objects on which scalar replacement was done.
+  int number_of_objects_;
+
+  // Number of scalar values tracked during scalar replacement phase.
+  int number_of_values_;
+  int cumulative_values_;
+
+  // Map of block IDs to the data-flow state at block entry during the
+  // scalar replacement phase.
+  ZoneList<HCapturedObject*> block_states_;
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_ESCAPE_ANALYSIS_H_
diff --git a/src/crankshaft/hydrogen-flow-engine.h b/src/crankshaft/hydrogen-flow-engine.h
new file mode 100644
index 0000000..3a488dd
--- /dev/null
+++ b/src/crankshaft/hydrogen-flow-engine.h
@@ -0,0 +1,220 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
+#define V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-instructions.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// An example implementation of effects that doesn't collect anything.
+class NoEffects : public ZoneObject {
+ public:
+  explicit NoEffects(Zone* zone) { }
+
+  inline bool Disabled() {
+    return true;  // Nothing to do.
+  }
+  template <class State>
+  inline void Apply(State* state) {
+    // do nothing.
+  }
+  inline void Process(HInstruction* value, Zone* zone) {
+    // do nothing.
+  }
+  inline void Union(NoEffects* other, Zone* zone) {
+    // do nothing.
+  }
+};
+
+
+// An example implementation of state that doesn't track anything.
+class NoState {
+ public:
+  inline NoState* Copy(HBasicBlock* succ, Zone* zone) {
+    return this;
+  }
+  inline NoState* Process(HInstruction* value, Zone* zone) {
+    return this;
+  }
+  inline NoState* Merge(HBasicBlock* succ, NoState* other, Zone* zone) {
+    return this;
+  }
+};
+
+
+// This class implements an engine that can drive flow-sensitive analyses
+// over a graph of basic blocks, either one block at a time (local analysis)
+// or over the entire graph (global analysis). The flow engine is parameterized
+// by the type of the state and the effects collected while walking over the
+// graph.
+//
+// The "State" collects which facts are known while passing over instructions
+// in control flow order, and the "Effects" collect summary information about
+// which facts could be invalidated on other control flow paths. The effects
+// are necessary to correctly handle loops in the control flow graph without
+// doing a fixed-point iteration. Thus the flow engine is guaranteed to visit
+// each block at most twice; once for state, and optionally once for effects.
+//
+// The flow engine requires the State and Effects classes to implement methods
+// like the example NoState and NoEffects above. It's not necessary to provide
+// an effects implementation for local analysis.
+template <class State, class Effects>
+class HFlowEngine {
+ public:
+  HFlowEngine(HGraph* graph, Zone* zone)
+    : graph_(graph),
+      zone_(zone),
+#if DEBUG
+      pred_counts_(graph->blocks()->length(), zone),
+#endif
+      block_states_(graph->blocks()->length(), zone),
+      loop_effects_(graph->blocks()->length(), zone) {
+    loop_effects_.AddBlock(NULL, graph_->blocks()->length(), zone);
+  }
+
+  // Local analysis. Iterates over the instructions in the given block.
+  State* AnalyzeOneBlock(HBasicBlock* block, State* state) {
+    // Go through all instructions of the current block, updating the state.
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      state = state->Process(it.Current(), zone_);
+    }
+    return state;
+  }
+
+  // Global analysis. Iterates over all blocks that are dominated by the given
+  // block, starting with the initial state. Computes effects for nested loops.
+  void AnalyzeDominatedBlocks(HBasicBlock* root, State* initial) {
+    InitializeStates();
+    SetStateAt(root, initial);
+
+    // Iterate all dominated blocks starting from the given start block.
+    for (int i = root->block_id(); i < graph_->blocks()->length(); i++) {
+      HBasicBlock* block = graph_->blocks()->at(i);
+
+      // Skip blocks not dominated by the root node.
+      if (SkipNonDominatedBlock(root, block)) continue;
+      State* state = State::Finish(StateAt(block), block, zone_);
+
+      if (block->IsReachable()) {
+        DCHECK(state != NULL);
+        if (block->IsLoopHeader()) {
+          // Apply loop effects before analyzing loop body.
+          ComputeLoopEffects(block)->Apply(state);
+        } else {
+          // Must have visited all predecessors before this block.
+          CheckPredecessorCount(block);
+        }
+
+        // Go through all instructions of the current block, updating the state.
+        for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+          state = state->Process(it.Current(), zone_);
+        }
+      }
+
+      // Propagate the block state forward to all successor blocks.
+      int max = block->end()->SuccessorCount();
+      for (int i = 0; i < max; i++) {
+        HBasicBlock* succ = block->end()->SuccessorAt(i);
+        IncrementPredecessorCount(succ);
+
+        if (max == 1 && succ->predecessors()->length() == 1) {
+          // Optimization: successor can inherit this state.
+          SetStateAt(succ, state);
+        } else {
+          // Merge the current state with the state already at the successor.
+          SetStateAt(succ,
+                     State::Merge(StateAt(succ), succ, state, block, zone_));
+        }
+      }
+    }
+  }
+
+ private:
+  // Computes and caches the loop effects for the loop which has the given
+  // block as its loop header.
+  Effects* ComputeLoopEffects(HBasicBlock* block) {
+    DCHECK(block->IsLoopHeader());
+    Effects* effects = loop_effects_[block->block_id()];
+    if (effects != NULL) return effects;  // Already analyzed this loop.
+
+    effects = new(zone_) Effects(zone_);
+    loop_effects_[block->block_id()] = effects;
+    if (effects->Disabled()) return effects;  // No effects for this analysis.
+
+    HLoopInformation* loop = block->loop_information();
+    int end = loop->GetLastBackEdge()->block_id();
+    // Process the blocks between the header and the end.
+    for (int i = block->block_id(); i <= end; i++) {
+      HBasicBlock* member = graph_->blocks()->at(i);
+      if (i != block->block_id() && member->IsLoopHeader()) {
+        // Recursively compute and cache the effects of the nested loop.
+        DCHECK(member->loop_information()->parent_loop() == loop);
+        Effects* nested = ComputeLoopEffects(member);
+        effects->Union(nested, zone_);
+        // Skip the nested loop's blocks.
+        i = member->loop_information()->GetLastBackEdge()->block_id();
+      } else {
+        // Process all the effects of the block.
+        if (member->IsUnreachable()) continue;
+        DCHECK(member->current_loop() == loop);
+        for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
+          effects->Process(it.Current(), zone_);
+        }
+      }
+    }
+    return effects;
+  }
+
+  inline bool SkipNonDominatedBlock(HBasicBlock* root, HBasicBlock* other) {
+    if (root->block_id() == 0) return false;  // Visit the whole graph.
+    if (root == other) return false;          // Always visit the root.
+    return !root->Dominates(other);           // Only visit dominated blocks.
+  }
+
+  inline State* StateAt(HBasicBlock* block) {
+    return block_states_.at(block->block_id());
+  }
+
+  inline void SetStateAt(HBasicBlock* block, State* state) {
+    block_states_.Set(block->block_id(), state);
+  }
+
+  inline void InitializeStates() {
+#if DEBUG
+    pred_counts_.Rewind(0);
+    pred_counts_.AddBlock(0, graph_->blocks()->length(), zone_);
+#endif
+    block_states_.Rewind(0);
+    block_states_.AddBlock(NULL, graph_->blocks()->length(), zone_);
+  }
+
+  inline void CheckPredecessorCount(HBasicBlock* block) {
+    DCHECK(block->predecessors()->length() == pred_counts_[block->block_id()]);
+  }
+
+  inline void IncrementPredecessorCount(HBasicBlock* block) {
+#if DEBUG
+    pred_counts_[block->block_id()]++;
+#endif
+  }
+
+  HGraph* graph_;                    // The hydrogen graph.
+  Zone* zone_;                       // Temporary zone.
+#if DEBUG
+  ZoneList<int> pred_counts_;        // Finished predecessors (by block id).
+#endif
+  ZoneList<State*> block_states_;    // Block states (by block id).
+  ZoneList<Effects*> loop_effects_;  // Loop effects (by block id).
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_FLOW_ENGINE_H_
diff --git a/src/crankshaft/hydrogen-gvn.cc b/src/crankshaft/hydrogen-gvn.cc
new file mode 100644
index 0000000..07bfabc
--- /dev/null
+++ b/src/crankshaft/hydrogen-gvn.cc
@@ -0,0 +1,899 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-gvn.h"
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class HInstructionMap final : public ZoneObject {
+ public:
+  HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
+      : array_size_(0),
+        lists_size_(0),
+        count_(0),
+        array_(NULL),
+        lists_(NULL),
+        free_list_head_(kNil),
+        side_effects_tracker_(side_effects_tracker) {
+    ResizeLists(kInitialSize, zone);
+    Resize(kInitialSize, zone);
+  }
+
+  void Kill(SideEffects side_effects);
+
+  void Add(HInstruction* instr, Zone* zone) {
+    present_depends_on_.Add(side_effects_tracker_->ComputeDependsOn(instr));
+    Insert(instr, zone);
+  }
+
+  HInstruction* Lookup(HInstruction* instr) const;
+
+  HInstructionMap* Copy(Zone* zone) const {
+    return new(zone) HInstructionMap(zone, this);
+  }
+
+  bool IsEmpty() const { return count_ == 0; }
+
+ private:
+  // A linked list of HInstruction* values.  Stored in arrays.
+  struct HInstructionMapListElement {
+    HInstruction* instr;
+    int next;  // Index in the array of the next list element.
+  };
+  static const int kNil = -1;  // The end of a linked list
+
+  // Must be a power of 2.
+  static const int kInitialSize = 16;
+
+  HInstructionMap(Zone* zone, const HInstructionMap* other);
+
+  void Resize(int new_size, Zone* zone);
+  void ResizeLists(int new_size, Zone* zone);
+  void Insert(HInstruction* instr, Zone* zone);
+  uint32_t Bound(uint32_t value) const { return value & (array_size_ - 1); }
+
+  int array_size_;
+  int lists_size_;
+  int count_;  // The number of values stored in the HInstructionMap.
+  SideEffects present_depends_on_;
+  HInstructionMapListElement* array_;
+  // Primary store - contains the first value
+  // with a given hash.  Colliding elements are stored in linked lists.
+  HInstructionMapListElement* lists_;
+  // The linked lists containing hash collisions.
+  int free_list_head_;  // Unused elements in lists_ are on the free list.
+  SideEffectsTracker* side_effects_tracker_;
+};
+
+
+class HSideEffectMap final BASE_EMBEDDED {
+ public:
+  HSideEffectMap();
+  explicit HSideEffectMap(HSideEffectMap* other);
+  HSideEffectMap& operator= (const HSideEffectMap& other);
+
+  void Kill(SideEffects side_effects);
+
+  void Store(SideEffects side_effects, HInstruction* instr);
+
+  bool IsEmpty() const { return count_ == 0; }
+
+  inline HInstruction* operator[](int i) const {
+    DCHECK(0 <= i);
+    DCHECK(i < kNumberOfTrackedSideEffects);
+    return data_[i];
+  }
+  inline HInstruction* at(int i) const { return operator[](i); }
+
+ private:
+  int count_;
+  HInstruction* data_[kNumberOfTrackedSideEffects];
+};
+
+
+void TraceGVN(const char* msg, ...) {
+  va_list arguments;
+  va_start(arguments, msg);
+  base::OS::VPrint(msg, arguments);
+  va_end(arguments);
+}
+
+
+// Wrap TraceGVN in macros to avoid the expense of evaluating its arguments when
+// --trace-gvn is off.
+#define TRACE_GVN_1(msg, a1)                    \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1);                          \
+  }
+
+#define TRACE_GVN_2(msg, a1, a2)                \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2);                      \
+  }
+
+#define TRACE_GVN_3(msg, a1, a2, a3)            \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2, a3);                  \
+  }
+
+#define TRACE_GVN_4(msg, a1, a2, a3, a4)        \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2, a3, a4);              \
+  }
+
+#define TRACE_GVN_5(msg, a1, a2, a3, a4, a5)    \
+  if (FLAG_trace_gvn) {                         \
+    TraceGVN(msg, a1, a2, a3, a4, a5);          \
+  }
+
+
+HInstructionMap::HInstructionMap(Zone* zone, const HInstructionMap* other)
+    : array_size_(other->array_size_),
+      lists_size_(other->lists_size_),
+      count_(other->count_),
+      present_depends_on_(other->present_depends_on_),
+      array_(zone->NewArray<HInstructionMapListElement>(other->array_size_)),
+      lists_(zone->NewArray<HInstructionMapListElement>(other->lists_size_)),
+      free_list_head_(other->free_list_head_),
+      side_effects_tracker_(other->side_effects_tracker_) {
+  MemCopy(array_, other->array_,
+          array_size_ * sizeof(HInstructionMapListElement));
+  MemCopy(lists_, other->lists_,
+          lists_size_ * sizeof(HInstructionMapListElement));
+}
+
+
+void HInstructionMap::Kill(SideEffects changes) {
+  if (!present_depends_on_.ContainsAnyOf(changes)) return;
+  present_depends_on_.RemoveAll();
+  for (int i = 0; i < array_size_; ++i) {
+    HInstruction* instr = array_[i].instr;
+    if (instr != NULL) {
+      // Clear list of collisions first, so we know if it becomes empty.
+      int kept = kNil;  // List of kept elements.
+      int next;
+      for (int current = array_[i].next; current != kNil; current = next) {
+        next = lists_[current].next;
+        HInstruction* instr = lists_[current].instr;
+        SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+        if (depends_on.ContainsAnyOf(changes)) {
+          // Drop it.
+          count_--;
+          lists_[current].next = free_list_head_;
+          free_list_head_ = current;
+        } else {
+          // Keep it.
+          lists_[current].next = kept;
+          kept = current;
+          present_depends_on_.Add(depends_on);
+        }
+      }
+      array_[i].next = kept;
+
+      // Now possibly drop directly indexed element.
+      instr = array_[i].instr;
+      SideEffects depends_on = side_effects_tracker_->ComputeDependsOn(instr);
+      if (depends_on.ContainsAnyOf(changes)) {  // Drop it.
+        count_--;
+        int head = array_[i].next;
+        if (head == kNil) {
+          array_[i].instr = NULL;
+        } else {
+          array_[i].instr = lists_[head].instr;
+          array_[i].next = lists_[head].next;
+          lists_[head].next = free_list_head_;
+          free_list_head_ = head;
+        }
+      } else {
+        present_depends_on_.Add(depends_on);  // Keep it.
+      }
+    }
+  }
+}
+
+
+HInstruction* HInstructionMap::Lookup(HInstruction* instr) const {
+  uint32_t hash = static_cast<uint32_t>(instr->Hashcode());
+  uint32_t pos = Bound(hash);
+  if (array_[pos].instr != NULL) {
+    if (array_[pos].instr->Equals(instr)) return array_[pos].instr;
+    int next = array_[pos].next;
+    while (next != kNil) {
+      if (lists_[next].instr->Equals(instr)) return lists_[next].instr;
+      next = lists_[next].next;
+    }
+  }
+  return NULL;
+}
+
+
+void HInstructionMap::Resize(int new_size, Zone* zone) {
+  DCHECK(new_size > count_);
+  // Hashing the values into the new array has no more collisions than in the
+  // old hash map, so we can use the existing lists_ array, if we are careful.
+
+  // Make sure we have at least one free element.
+  if (free_list_head_ == kNil) {
+    ResizeLists(lists_size_ << 1, zone);
+  }
+
+  HInstructionMapListElement* new_array =
+      zone->NewArray<HInstructionMapListElement>(new_size);
+  memset(new_array, 0, sizeof(HInstructionMapListElement) * new_size);
+
+  HInstructionMapListElement* old_array = array_;
+  int old_size = array_size_;
+
+  int old_count = count_;
+  count_ = 0;
+  // Do not modify present_depends_on_.  It is currently correct.
+  array_size_ = new_size;
+  array_ = new_array;
+
+  if (old_array != NULL) {
+    // Iterate over all the elements in lists, rehashing them.
+    for (int i = 0; i < old_size; ++i) {
+      if (old_array[i].instr != NULL) {
+        int current = old_array[i].next;
+        while (current != kNil) {
+          Insert(lists_[current].instr, zone);
+          int next = lists_[current].next;
+          lists_[current].next = free_list_head_;
+          free_list_head_ = current;
+          current = next;
+        }
+        // Rehash the directly stored instruction.
+        Insert(old_array[i].instr, zone);
+      }
+    }
+  }
+  USE(old_count);
+  DCHECK(count_ == old_count);
+}
+
+
+void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
+  DCHECK(new_size > lists_size_);
+
+  HInstructionMapListElement* new_lists =
+      zone->NewArray<HInstructionMapListElement>(new_size);
+  memset(new_lists, 0, sizeof(HInstructionMapListElement) * new_size);
+
+  HInstructionMapListElement* old_lists = lists_;
+  int old_size = lists_size_;
+
+  lists_size_ = new_size;
+  lists_ = new_lists;
+
+  if (old_lists != NULL) {
+    MemCopy(lists_, old_lists, old_size * sizeof(HInstructionMapListElement));
+  }
+  for (int i = old_size; i < lists_size_; ++i) {
+    lists_[i].next = free_list_head_;
+    free_list_head_ = i;
+  }
+}
+
+
+void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
+  DCHECK(instr != NULL);
+  // Resizing when half of the hashtable is filled up.
+  if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
+  DCHECK(count_ < array_size_);
+  count_++;
+  uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
+  if (array_[pos].instr == NULL) {
+    array_[pos].instr = instr;
+    array_[pos].next = kNil;
+  } else {
+    if (free_list_head_ == kNil) {
+      ResizeLists(lists_size_ << 1, zone);
+    }
+    int new_element_pos = free_list_head_;
+    DCHECK(new_element_pos != kNil);
+    free_list_head_ = lists_[free_list_head_].next;
+    lists_[new_element_pos].instr = instr;
+    lists_[new_element_pos].next = array_[pos].next;
+    DCHECK(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
+    array_[pos].next = new_element_pos;
+  }
+}
+
+
+HSideEffectMap::HSideEffectMap() : count_(0) {
+  memset(data_, 0, kNumberOfTrackedSideEffects * kPointerSize);
+}
+
+
+HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
+  *this = *other;  // Calls operator=.
+}
+
+
+HSideEffectMap& HSideEffectMap::operator=(const HSideEffectMap& other) {
+  if (this != &other) {
+    MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+  }
+  return *this;
+}
+
+
+void HSideEffectMap::Kill(SideEffects side_effects) {
+  for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+    if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
+      if (data_[i] != NULL) count_--;
+      data_[i] = NULL;
+    }
+  }
+}
+
+
+void HSideEffectMap::Store(SideEffects side_effects, HInstruction* instr) {
+  for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+    if (side_effects.ContainsFlag(GVNFlagFromInt(i))) {
+      if (data_[i] == NULL) count_++;
+      data_[i] = instr;
+    }
+  }
+}
+
+
+SideEffects SideEffectsTracker::ComputeChanges(HInstruction* instr) {
+  int index;
+  SideEffects result(instr->ChangesFlags());
+  if (result.ContainsFlag(kGlobalVars)) {
+    if (instr->IsStoreNamedField()) {
+      HStoreNamedField* store = HStoreNamedField::cast(instr);
+      HConstant* target = HConstant::cast(store->object());
+      if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
+                           &index)) {
+        result.RemoveFlag(kGlobalVars);
+        result.AddSpecial(GlobalVar(index));
+        return result;
+      }
+    }
+    for (index = 0; index < kNumberOfGlobalVars; ++index) {
+      result.AddSpecial(GlobalVar(index));
+    }
+  } else if (result.ContainsFlag(kInobjectFields)) {
+    if (instr->IsStoreNamedField() &&
+        ComputeInobjectField(HStoreNamedField::cast(instr)->access(), &index)) {
+      result.RemoveFlag(kInobjectFields);
+      result.AddSpecial(InobjectField(index));
+    } else {
+      for (index = 0; index < kNumberOfInobjectFields; ++index) {
+        result.AddSpecial(InobjectField(index));
+      }
+    }
+  }
+  return result;
+}
+
+
+SideEffects SideEffectsTracker::ComputeDependsOn(HInstruction* instr) {
+  int index;
+  SideEffects result(instr->DependsOnFlags());
+  if (result.ContainsFlag(kGlobalVars)) {
+    if (instr->IsLoadNamedField()) {
+      HLoadNamedField* load = HLoadNamedField::cast(instr);
+      HConstant* target = HConstant::cast(load->object());
+      if (ComputeGlobalVar(Unique<PropertyCell>::cast(target->GetUnique()),
+                           &index)) {
+        result.RemoveFlag(kGlobalVars);
+        result.AddSpecial(GlobalVar(index));
+        return result;
+      }
+    }
+    for (index = 0; index < kNumberOfGlobalVars; ++index) {
+      result.AddSpecial(GlobalVar(index));
+    }
+  } else if (result.ContainsFlag(kInobjectFields)) {
+    if (instr->IsLoadNamedField() &&
+        ComputeInobjectField(HLoadNamedField::cast(instr)->access(), &index)) {
+      result.RemoveFlag(kInobjectFields);
+      result.AddSpecial(InobjectField(index));
+    } else {
+      for (index = 0; index < kNumberOfInobjectFields; ++index) {
+        result.AddSpecial(InobjectField(index));
+      }
+    }
+  }
+  return result;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const TrackedEffects& te) {
+  SideEffectsTracker* t = te.tracker;
+  const char* separator = "";
+  os << "[";
+  for (int bit = 0; bit < kNumberOfFlags; ++bit) {
+    GVNFlag flag = GVNFlagFromInt(bit);
+    if (te.effects.ContainsFlag(flag)) {
+      os << separator;
+      separator = ", ";
+      switch (flag) {
+#define DECLARE_FLAG(Type) \
+  case k##Type:            \
+    os << #Type;           \
+    break;
+GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+        default:
+            break;
+      }
+    }
+  }
+  for (int index = 0; index < t->num_global_vars_; ++index) {
+    if (te.effects.ContainsSpecial(t->GlobalVar(index))) {
+      os << separator << "[" << *t->global_vars_[index].handle() << "]";
+      separator = ", ";
+    }
+  }
+  for (int index = 0; index < t->num_inobject_fields_; ++index) {
+    if (te.effects.ContainsSpecial(t->InobjectField(index))) {
+      os << separator << t->inobject_fields_[index];
+      separator = ", ";
+    }
+  }
+  os << "]";
+  return os;
+}
+
+
+bool SideEffectsTracker::ComputeGlobalVar(Unique<PropertyCell> cell,
+                                          int* index) {
+  for (int i = 0; i < num_global_vars_; ++i) {
+    if (cell == global_vars_[i]) {
+      *index = i;
+      return true;
+    }
+  }
+  if (num_global_vars_ < kNumberOfGlobalVars) {
+    if (FLAG_trace_gvn) {
+      OFStream os(stdout);
+      os << "Tracking global var [" << *cell.handle() << "] "
+         << "(mapped to index " << num_global_vars_ << ")" << std::endl;
+    }
+    *index = num_global_vars_;
+    global_vars_[num_global_vars_++] = cell;
+    return true;
+  }
+  return false;
+}
+
+
+bool SideEffectsTracker::ComputeInobjectField(HObjectAccess access,
+                                              int* index) {
+  for (int i = 0; i < num_inobject_fields_; ++i) {
+    if (access.Equals(inobject_fields_[i])) {
+      *index = i;
+      return true;
+    }
+  }
+  if (num_inobject_fields_ < kNumberOfInobjectFields) {
+    if (FLAG_trace_gvn) {
+      OFStream os(stdout);
+      os << "Tracking inobject field access " << access << " (mapped to index "
+         << num_inobject_fields_ << ")" << std::endl;
+    }
+    *index = num_inobject_fields_;
+    inobject_fields_[num_inobject_fields_++] = access;
+    return true;
+  }
+  return false;
+}
+
+
+HGlobalValueNumberingPhase::HGlobalValueNumberingPhase(HGraph* graph)
+    : HPhase("H_Global value numbering", graph),
+      removed_side_effects_(false),
+      block_side_effects_(graph->blocks()->length(), zone()),
+      loop_side_effects_(graph->blocks()->length(), zone()),
+      visited_on_paths_(graph->blocks()->length(), zone()) {
+  DCHECK(!AllowHandleAllocation::IsAllowed());
+  block_side_effects_.AddBlock(
+      SideEffects(), graph->blocks()->length(), zone());
+  loop_side_effects_.AddBlock(
+      SideEffects(), graph->blocks()->length(), zone());
+}
+
+
+void HGlobalValueNumberingPhase::Run() {
+  DCHECK(!removed_side_effects_);
+  for (int i = FLAG_gvn_iterations; i > 0; --i) {
+    // Compute the side effects.
+    ComputeBlockSideEffects();
+
+    // Perform loop invariant code motion if requested.
+    if (FLAG_loop_invariant_code_motion) LoopInvariantCodeMotion();
+
+    // Perform the actual value numbering.
+    AnalyzeGraph();
+
+    // Continue GVN if we removed any side effects.
+    if (!removed_side_effects_) break;
+    removed_side_effects_ = false;
+
+    // Clear all side effects.
+    DCHECK_EQ(block_side_effects_.length(), graph()->blocks()->length());
+    DCHECK_EQ(loop_side_effects_.length(), graph()->blocks()->length());
+    for (int i = 0; i < graph()->blocks()->length(); ++i) {
+      block_side_effects_[i].RemoveAll();
+      loop_side_effects_[i].RemoveAll();
+    }
+    visited_on_paths_.Clear();
+  }
+}
+
+
+void HGlobalValueNumberingPhase::ComputeBlockSideEffects() {
+  for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
+    // Compute side effects for the block.
+    HBasicBlock* block = graph()->blocks()->at(i);
+    SideEffects side_effects;
+    if (block->IsReachable() && !block->IsDeoptimizing()) {
+      int id = block->block_id();
+      for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+        HInstruction* instr = it.Current();
+        side_effects.Add(side_effects_tracker_.ComputeChanges(instr));
+      }
+      block_side_effects_[id].Add(side_effects);
+
+      // Loop headers are part of their loop.
+      if (block->IsLoopHeader()) {
+        loop_side_effects_[id].Add(side_effects);
+      }
+
+      // Propagate loop side effects upwards.
+      if (block->HasParentLoopHeader()) {
+        HBasicBlock* with_parent = block;
+        if (block->IsLoopHeader()) side_effects = loop_side_effects_[id];
+        do {
+          HBasicBlock* parent_block = with_parent->parent_loop_header();
+          loop_side_effects_[parent_block->block_id()].Add(side_effects);
+          with_parent = parent_block;
+        } while (with_parent->HasParentLoopHeader());
+      }
+    }
+  }
+}
+
+
+void HGlobalValueNumberingPhase::LoopInvariantCodeMotion() {
+  TRACE_GVN_1("Using optimistic loop invariant code motion: %s\n",
+              graph()->use_optimistic_licm() ? "yes" : "no");
+  for (int i = graph()->blocks()->length() - 1; i >= 0; --i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    if (block->IsLoopHeader()) {
+      SideEffects side_effects = loop_side_effects_[block->block_id()];
+      if (FLAG_trace_gvn) {
+        OFStream os(stdout);
+        os << "Try loop invariant motion for " << *block << " changes "
+           << Print(side_effects) << std::endl;
+      }
+      HBasicBlock* last = block->loop_information()->GetLastBackEdge();
+      for (int j = block->block_id(); j <= last->block_id(); ++j) {
+        ProcessLoopBlock(graph()->blocks()->at(j), block, side_effects);
+      }
+    }
+  }
+}
+
+
+void HGlobalValueNumberingPhase::ProcessLoopBlock(
+    HBasicBlock* block,
+    HBasicBlock* loop_header,
+    SideEffects loop_kills) {
+  HBasicBlock* pre_header = loop_header->predecessors()->at(0);
+  if (FLAG_trace_gvn) {
+    OFStream os(stdout);
+    os << "Loop invariant code motion for " << *block << " depends on "
+       << Print(loop_kills) << std::endl;
+  }
+  HInstruction* instr = block->first();
+  while (instr != NULL) {
+    HInstruction* next = instr->next();
+    if (instr->CheckFlag(HValue::kUseGVN)) {
+      SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+      SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
+      if (FLAG_trace_gvn) {
+        OFStream os(stdout);
+        os << "Checking instruction i" << instr->id() << " ("
+           << instr->Mnemonic() << ") changes " << Print(changes)
+           << ", depends on " << Print(depends_on) << ". Loop changes "
+           << Print(loop_kills) << std::endl;
+      }
+      bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
+      if (can_hoist && !graph()->use_optimistic_licm()) {
+        can_hoist = block->IsLoopSuccessorDominator();
+      }
+
+      if (can_hoist) {
+        bool inputs_loop_invariant = true;
+        for (int i = 0; i < instr->OperandCount(); ++i) {
+          if (instr->OperandAt(i)->IsDefinedAfter(pre_header)) {
+            inputs_loop_invariant = false;
+          }
+        }
+
+        if (inputs_loop_invariant && ShouldMove(instr, loop_header)) {
+          TRACE_GVN_2("Hoisting loop invariant instruction i%d to block B%d\n",
+                      instr->id(), pre_header->block_id());
+          // Move the instruction out of the loop.
+          instr->Unlink();
+          instr->InsertBefore(pre_header->end());
+          if (instr->HasSideEffects()) removed_side_effects_ = true;
+        }
+      }
+    }
+    instr = next;
+  }
+}
+
+
+bool HGlobalValueNumberingPhase::AllowCodeMotion() {
+  return info()->IsStub() || info()->opt_count() + 1 < FLAG_max_opt_count;
+}
+
+
+bool HGlobalValueNumberingPhase::ShouldMove(HInstruction* instr,
+                                            HBasicBlock* loop_header) {
+  // If we've disabled code motion or we're in a block that unconditionally
+  // deoptimizes, don't move any instructions.
+  return AllowCodeMotion() && !instr->block()->IsDeoptimizing() &&
+      instr->block()->IsReachable();
+}
+
+
+SideEffects
+HGlobalValueNumberingPhase::CollectSideEffectsOnPathsToDominatedBlock(
+    HBasicBlock* dominator, HBasicBlock* dominated) {
+  SideEffects side_effects;
+  for (int i = 0; i < dominated->predecessors()->length(); ++i) {
+    HBasicBlock* block = dominated->predecessors()->at(i);
+    if (dominator->block_id() < block->block_id() &&
+        block->block_id() < dominated->block_id() &&
+        !visited_on_paths_.Contains(block->block_id())) {
+      visited_on_paths_.Add(block->block_id());
+      side_effects.Add(block_side_effects_[block->block_id()]);
+      if (block->IsLoopHeader()) {
+        side_effects.Add(loop_side_effects_[block->block_id()]);
+      }
+      side_effects.Add(CollectSideEffectsOnPathsToDominatedBlock(
+          dominator, block));
+    }
+  }
+  return side_effects;
+}
+
+
+// Each instance of this class is like a "stack frame" for the recursive
+// traversal of the dominator tree done during GVN (the stack is handled
+// as a double linked list).
+// We reuse frames when possible so the list length is limited by the depth
+// of the dominator tree but this forces us to initialize each frame calling
+// an explicit "Initialize" method instead of a using constructor.
+class GvnBasicBlockState: public ZoneObject {
+ public:
+  static GvnBasicBlockState* CreateEntry(Zone* zone,
+                                         HBasicBlock* entry_block,
+                                         HInstructionMap* entry_map) {
+    return new(zone)
+        GvnBasicBlockState(NULL, entry_block, entry_map, NULL, zone);
+  }
+
+  HBasicBlock* block() { return block_; }
+  HInstructionMap* map() { return map_; }
+  HSideEffectMap* dominators() { return &dominators_; }
+
+  GvnBasicBlockState* next_in_dominator_tree_traversal(
+      Zone* zone,
+      HBasicBlock** dominator) {
+    // This assignment needs to happen before calling next_dominated() because
+    // that call can reuse "this" if we are at the last dominated block.
+    *dominator = block();
+    GvnBasicBlockState* result = next_dominated(zone);
+    if (result == NULL) {
+      GvnBasicBlockState* dominator_state = pop();
+      if (dominator_state != NULL) {
+        // This branch is guaranteed not to return NULL because pop() never
+        // returns a state where "is_done() == true".
+        *dominator = dominator_state->block();
+        result = dominator_state->next_dominated(zone);
+      } else {
+        // Unnecessary (we are returning NULL) but done for cleanness.
+        *dominator = NULL;
+      }
+    }
+    return result;
+  }
+
+ private:
+  void Initialize(HBasicBlock* block,
+                  HInstructionMap* map,
+                  HSideEffectMap* dominators,
+                  bool copy_map,
+                  Zone* zone) {
+    block_ = block;
+    map_ = copy_map ? map->Copy(zone) : map;
+    dominated_index_ = -1;
+    length_ = block->dominated_blocks()->length();
+    if (dominators != NULL) {
+      dominators_ = *dominators;
+    }
+  }
+  bool is_done() { return dominated_index_ >= length_; }
+
+  GvnBasicBlockState(GvnBasicBlockState* previous,
+                     HBasicBlock* block,
+                     HInstructionMap* map,
+                     HSideEffectMap* dominators,
+                     Zone* zone)
+      : previous_(previous), next_(NULL) {
+    Initialize(block, map, dominators, true, zone);
+  }
+
+  GvnBasicBlockState* next_dominated(Zone* zone) {
+    dominated_index_++;
+    if (dominated_index_ == length_ - 1) {
+      // No need to copy the map for the last child in the dominator tree.
+      Initialize(block_->dominated_blocks()->at(dominated_index_),
+                 map(),
+                 dominators(),
+                 false,
+                 zone);
+      return this;
+    } else if (dominated_index_ < length_) {
+      return push(zone, block_->dominated_blocks()->at(dominated_index_));
+    } else {
+      return NULL;
+    }
+  }
+
+  GvnBasicBlockState* push(Zone* zone, HBasicBlock* block) {
+    if (next_ == NULL) {
+      next_ =
+          new(zone) GvnBasicBlockState(this, block, map(), dominators(), zone);
+    } else {
+      next_->Initialize(block, map(), dominators(), true, zone);
+    }
+    return next_;
+  }
+  GvnBasicBlockState* pop() {
+    GvnBasicBlockState* result = previous_;
+    while (result != NULL && result->is_done()) {
+      TRACE_GVN_2("Backtracking from block B%d to block b%d\n",
+                  block()->block_id(),
+                  previous_->block()->block_id())
+      result = result->previous_;
+    }
+    return result;
+  }
+
+  GvnBasicBlockState* previous_;
+  GvnBasicBlockState* next_;
+  HBasicBlock* block_;
+  HInstructionMap* map_;
+  HSideEffectMap dominators_;
+  int dominated_index_;
+  int length_;
+};
+
+
+// This is a recursive traversal of the dominator tree but it has been turned
+// into a loop to avoid stack overflows.
+// The logical "stack frames" of the recursion are kept in a list of
+// GvnBasicBlockState instances.
+void HGlobalValueNumberingPhase::AnalyzeGraph() {
+  HBasicBlock* entry_block = graph()->entry_block();
+  HInstructionMap* entry_map =
+      new(zone()) HInstructionMap(zone(), &side_effects_tracker_);
+  GvnBasicBlockState* current =
+      GvnBasicBlockState::CreateEntry(zone(), entry_block, entry_map);
+
+  while (current != NULL) {
+    HBasicBlock* block = current->block();
+    HInstructionMap* map = current->map();
+    HSideEffectMap* dominators = current->dominators();
+
+    TRACE_GVN_2("Analyzing block B%d%s\n",
+                block->block_id(),
+                block->IsLoopHeader() ? " (loop header)" : "");
+
+    // If this is a loop header kill everything killed by the loop.
+    if (block->IsLoopHeader()) {
+      map->Kill(loop_side_effects_[block->block_id()]);
+      dominators->Kill(loop_side_effects_[block->block_id()]);
+    }
+
+    // Go through all instructions of the current block.
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->CheckFlag(HValue::kTrackSideEffectDominators)) {
+        for (int i = 0; i < kNumberOfTrackedSideEffects; i++) {
+          HValue* other = dominators->at(i);
+          GVNFlag flag = GVNFlagFromInt(i);
+          if (instr->DependsOnFlags().Contains(flag) && other != NULL) {
+            TRACE_GVN_5("Side-effect #%d in %d (%s) is dominated by %d (%s)\n",
+                        i,
+                        instr->id(),
+                        instr->Mnemonic(),
+                        other->id(),
+                        other->Mnemonic());
+            if (instr->HandleSideEffectDominator(flag, other)) {
+              removed_side_effects_ = true;
+            }
+          }
+        }
+      }
+      // Instruction was unlinked during graph traversal.
+      if (!instr->IsLinked()) continue;
+
+      SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
+      if (!changes.IsEmpty()) {
+        // Clear all instructions in the map that are affected by side effects.
+        // Store instruction as the dominating one for tracked side effects.
+        map->Kill(changes);
+        dominators->Store(changes, instr);
+        if (FLAG_trace_gvn) {
+          OFStream os(stdout);
+          os << "Instruction i" << instr->id() << " changes " << Print(changes)
+             << std::endl;
+        }
+      }
+      if (instr->CheckFlag(HValue::kUseGVN) &&
+          !instr->CheckFlag(HValue::kCantBeReplaced)) {
+        DCHECK(!instr->HasObservableSideEffects());
+        HInstruction* other = map->Lookup(instr);
+        if (other != NULL) {
+          DCHECK(instr->Equals(other) && other->Equals(instr));
+          TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
+                      instr->id(),
+                      instr->Mnemonic(),
+                      other->id(),
+                      other->Mnemonic());
+          if (instr->HasSideEffects()) removed_side_effects_ = true;
+          instr->DeleteAndReplaceWith(other);
+        } else {
+          map->Add(instr, zone());
+        }
+      }
+    }
+
+    HBasicBlock* dominator_block;
+    GvnBasicBlockState* next =
+        current->next_in_dominator_tree_traversal(zone(),
+                                                  &dominator_block);
+
+    if (next != NULL) {
+      HBasicBlock* dominated = next->block();
+      HInstructionMap* successor_map = next->map();
+      HSideEffectMap* successor_dominators = next->dominators();
+
+      // Kill everything killed on any path between this block and the
+      // dominated block.  We don't have to traverse these paths if the
+      // value map and the dominators list is already empty.  If the range
+      // of block ids (block_id, dominated_id) is empty there are no such
+      // paths.
+      if ((!successor_map->IsEmpty() || !successor_dominators->IsEmpty()) &&
+          dominator_block->block_id() + 1 < dominated->block_id()) {
+        visited_on_paths_.Clear();
+        SideEffects side_effects_on_all_paths =
+            CollectSideEffectsOnPathsToDominatedBlock(dominator_block,
+                                                      dominated);
+        successor_map->Kill(side_effects_on_all_paths);
+        successor_dominators->Kill(side_effects_on_all_paths);
+      }
+    }
+    current = next;
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-gvn.h b/src/crankshaft/hydrogen-gvn.h
new file mode 100644
index 0000000..a5e2168
--- /dev/null
+++ b/src/crankshaft/hydrogen-gvn.h
@@ -0,0 +1,154 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_GVN_H_
+#define V8_CRANKSHAFT_HYDROGEN_GVN_H_
+
+#include <iosfwd>
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-instructions.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// This class extends GVNFlagSet with additional "special" dynamic side effects,
+// which can be used to represent side effects that cannot be expressed using
+// the GVNFlags of an HInstruction. These special side effects are tracked by a
+// SideEffectsTracker (see below).
+class SideEffects final {
+ public:
+  static const int kNumberOfSpecials = 64 - kNumberOfFlags;
+
+  SideEffects() : bits_(0) {
+    DCHECK(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
+  }
+  explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
+  bool IsEmpty() const { return bits_ == 0; }
+  bool ContainsFlag(GVNFlag flag) const {
+    return (bits_ & MaskFlag(flag)) != 0;
+  }
+  bool ContainsSpecial(int special) const {
+    return (bits_ & MaskSpecial(special)) != 0;
+  }
+  bool ContainsAnyOf(SideEffects set) const { return (bits_ & set.bits_) != 0; }
+  void Add(SideEffects set) { bits_ |= set.bits_; }
+  void AddSpecial(int special) { bits_ |= MaskSpecial(special); }
+  void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
+  void RemoveAll() { bits_ = 0; }
+  uint64_t ToIntegral() const { return bits_; }
+
+ private:
+  uint64_t MaskFlag(GVNFlag flag) const {
+    return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
+  }
+  uint64_t MaskSpecial(int special) const {
+    DCHECK(special >= 0);
+    DCHECK(special < kNumberOfSpecials);
+    return static_cast<uint64_t>(1) << static_cast<unsigned>(
+        special + kNumberOfFlags);
+  }
+
+  uint64_t bits_;
+};
+
+
+struct TrackedEffects;
+
+// Tracks global variable and inobject field loads/stores in a fine grained
+// fashion, and represents them using the "special" dynamic side effects of the
+// SideEffects class (see above). This way unrelated global variable/inobject
+// field stores don't prevent hoisting and merging of global variable/inobject
+// field loads.
+class SideEffectsTracker final BASE_EMBEDDED {
+ public:
+  SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
+  SideEffects ComputeChanges(HInstruction* instr);
+  SideEffects ComputeDependsOn(HInstruction* instr);
+
+ private:
+  friend std::ostream& operator<<(std::ostream& os, const TrackedEffects& f);
+  bool ComputeGlobalVar(Unique<PropertyCell> cell, int* index);
+  bool ComputeInobjectField(HObjectAccess access, int* index);
+
+  static int GlobalVar(int index) {
+    DCHECK(index >= 0);
+    DCHECK(index < kNumberOfGlobalVars);
+    return index;
+  }
+  static int InobjectField(int index) {
+    DCHECK(index >= 0);
+    DCHECK(index < kNumberOfInobjectFields);
+    return index + kNumberOfGlobalVars;
+  }
+
+  // Track up to four global vars.
+  static const int kNumberOfGlobalVars = 4;
+  Unique<PropertyCell> global_vars_[kNumberOfGlobalVars];
+  int num_global_vars_;
+
+  // Track up to n inobject fields.
+  static const int kNumberOfInobjectFields =
+      SideEffects::kNumberOfSpecials - kNumberOfGlobalVars;
+  HObjectAccess inobject_fields_[kNumberOfInobjectFields];
+  int num_inobject_fields_;
+};
+
+
+// Helper class for printing, because the effects don't know their tracker.
+struct TrackedEffects {
+  TrackedEffects(SideEffectsTracker* t, SideEffects e)
+      : tracker(t), effects(e) {}
+  SideEffectsTracker* tracker;
+  SideEffects effects;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const TrackedEffects& f);
+
+
+// Perform common subexpression elimination and loop-invariant code motion.
+class HGlobalValueNumberingPhase final : public HPhase {
+ public:
+  explicit HGlobalValueNumberingPhase(HGraph* graph);
+
+  void Run();
+
+ private:
+  SideEffects CollectSideEffectsOnPathsToDominatedBlock(
+      HBasicBlock* dominator,
+      HBasicBlock* dominated);
+  void AnalyzeGraph();
+  void ComputeBlockSideEffects();
+  void LoopInvariantCodeMotion();
+  void ProcessLoopBlock(HBasicBlock* block,
+                        HBasicBlock* before_loop,
+                        SideEffects loop_kills);
+  bool AllowCodeMotion();
+  bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+  TrackedEffects Print(SideEffects side_effects) {
+    return TrackedEffects(&side_effects_tracker_, side_effects);
+  }
+
+  SideEffectsTracker side_effects_tracker_;
+  bool removed_side_effects_;
+
+  // A map of block IDs to their side effects.
+  ZoneList<SideEffects> block_side_effects_;
+
+  // A map of loop header block IDs to their loop's side effects.
+  ZoneList<SideEffects> loop_side_effects_;
+
+  // Used when collecting side effects on paths from dominator to
+  // dominated.
+  BitVector visited_on_paths_;
+
+  DISALLOW_COPY_AND_ASSIGN(HGlobalValueNumberingPhase);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_GVN_H_
diff --git a/src/crankshaft/hydrogen-infer-representation.cc b/src/crankshaft/hydrogen-infer-representation.cc
new file mode 100644
index 0000000..74f264e
--- /dev/null
+++ b/src/crankshaft/hydrogen-infer-representation.cc
@@ -0,0 +1,162 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-infer-representation.h"
+
+namespace v8 {
+namespace internal {
+
+void HInferRepresentationPhase::AddToWorklist(HValue* current) {
+  if (current->representation().IsTagged()) return;
+  if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
+  if (in_worklist_.Contains(current->id())) return;
+  worklist_.Add(current, zone());
+  in_worklist_.Add(current->id());
+}
+
+
+void HInferRepresentationPhase::Run() {
+  // (1) Initialize bit vectors and count real uses. Each phi gets a
+  // bit-vector of length <number of phis>.
+  const ZoneList<HPhi*>* phi_list = graph()->phi_list();
+  int phi_count = phi_list->length();
+  ZoneList<BitVector*> connected_phis(phi_count, zone());
+  for (int i = 0; i < phi_count; ++i) {
+    phi_list->at(i)->InitRealUses(i);
+    BitVector* connected_set = new(zone()) BitVector(phi_count, zone());
+    connected_set->Add(i);
+    connected_phis.Add(connected_set, zone());
+  }
+
+  // (2) Do a fixed point iteration to find the set of connected phis.  A
+  // phi is connected to another phi if its value is used either directly or
+  // indirectly through a transitive closure of the def-use relation.
+  bool change = true;
+  while (change) {
+    change = false;
+    // We normally have far more "forward edges" than "backward edges",
+    // so we terminate faster when we walk backwards.
+    for (int i = phi_count - 1; i >= 0; --i) {
+      HPhi* phi = phi_list->at(i);
+      for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+        HValue* use = it.value();
+        if (use->IsPhi()) {
+          int id = HPhi::cast(use)->phi_id();
+          if (connected_phis[i]->UnionIsChanged(*connected_phis[id]))
+            change = true;
+        }
+      }
+    }
+  }
+
+  // Set truncation flags for groups of connected phis. This is a conservative
+  // approximation; the flag will be properly re-computed after representations
+  // have been determined.
+  if (phi_count > 0) {
+    BitVector done(phi_count, zone());
+    for (int i = 0; i < phi_count; ++i) {
+      if (done.Contains(i)) continue;
+
+      // Check if all uses of all connected phis in this group are truncating.
+      bool all_uses_everywhere_truncating_int32 = true;
+      bool all_uses_everywhere_truncating_smi = true;
+      for (BitVector::Iterator it(connected_phis[i]);
+           !it.Done();
+           it.Advance()) {
+        int index = it.Current();
+        all_uses_everywhere_truncating_int32 &=
+            phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToInt32);
+        all_uses_everywhere_truncating_smi &=
+            phi_list->at(index)->CheckFlag(HInstruction::kTruncatingToSmi);
+        done.Add(index);
+      }
+
+      if (!all_uses_everywhere_truncating_int32) {
+        // Clear truncation flag of this group of connected phis.
+        for (BitVector::Iterator it(connected_phis[i]);
+             !it.Done();
+             it.Advance()) {
+          int index = it.Current();
+          phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToInt32);
+        }
+      }
+      if (!all_uses_everywhere_truncating_smi) {
+        // Clear truncation flag of this group of connected phis.
+        for (BitVector::Iterator it(connected_phis[i]);
+             !it.Done();
+             it.Advance()) {
+          int index = it.Current();
+          phi_list->at(index)->ClearFlag(HInstruction::kTruncatingToSmi);
+        }
+      }
+    }
+  }
+
+  // Simplify constant phi inputs where possible.
+  // This step uses kTruncatingToInt32 flags of phis.
+  for (int i = 0; i < phi_count; ++i) {
+    phi_list->at(i)->SimplifyConstantInputs();
+  }
+
+  // Use the phi reachability information from step 2 to
+  // sum up the non-phi use counts of all connected phis.
+  for (int i = 0; i < phi_count; ++i) {
+    HPhi* phi = phi_list->at(i);
+    for (BitVector::Iterator it(connected_phis[i]);
+         !it.Done();
+         it.Advance()) {
+      int index = it.Current();
+      HPhi* it_use = phi_list->at(index);
+      if (index != i) phi->AddNonPhiUsesFrom(it_use);  // Don't count twice.
+    }
+  }
+
+  // Initialize work list
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); ++j) {
+      AddToWorklist(phis->at(j));
+    }
+
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* current = it.Current();
+      AddToWorklist(current);
+    }
+  }
+
+  // Do a fixed point iteration, trying to improve representations
+  while (!worklist_.is_empty()) {
+    HValue* current = worklist_.RemoveLast();
+    current->InferRepresentation(this);
+    in_worklist_.Remove(current->id());
+  }
+
+  // Lastly: any instruction that we don't have representation information
+  // for defaults to Tagged.
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); ++j) {
+      HPhi* phi = phis->at(j);
+      if (phi->representation().IsNone()) {
+        phi->ChangeRepresentation(Representation::Tagged());
+      }
+    }
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* current = it.Current();
+      if (current->representation().IsNone() &&
+          current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
+        if (current->CheckFlag(HInstruction::kCannotBeTagged)) {
+          current->ChangeRepresentation(Representation::Double());
+        } else {
+          current->ChangeRepresentation(Representation::Tagged());
+        }
+      }
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-infer-representation.h b/src/crankshaft/hydrogen-infer-representation.h
new file mode 100644
index 0000000..92f2bc8
--- /dev/null
+++ b/src/crankshaft/hydrogen-infer-representation.h
@@ -0,0 +1,35 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HInferRepresentationPhase : public HPhase {
+ public:
+  explicit HInferRepresentationPhase(HGraph* graph)
+      : HPhase("H_Infer representations", graph),
+        worklist_(8, zone()),
+        in_worklist_(graph->GetMaximumValueID(), zone()) { }
+
+  void Run();
+  void AddToWorklist(HValue* current);
+
+ private:
+  ZoneList<HValue*> worklist_;
+  BitVector in_worklist_;
+
+  DISALLOW_COPY_AND_ASSIGN(HInferRepresentationPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_INFER_REPRESENTATION_H_
diff --git a/src/crankshaft/hydrogen-infer-types.cc b/src/crankshaft/hydrogen-infer-types.cc
new file mode 100644
index 0000000..bfd3dd2
--- /dev/null
+++ b/src/crankshaft/hydrogen-infer-types.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-infer-types.h"
+
+namespace v8 {
+namespace internal {
+
+void HInferTypesPhase::InferTypes(int from_inclusive, int to_inclusive) {
+  for (int i = from_inclusive; i <= to_inclusive; ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); j++) {
+      phis->at(j)->UpdateInferredType();
+    }
+
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      it.Current()->UpdateInferredType();
+    }
+
+    if (block->IsLoopHeader()) {
+      HBasicBlock* last_back_edge =
+          block->loop_information()->GetLastBackEdge();
+      InferTypes(i + 1, last_back_edge->block_id());
+      // Skip all blocks already processed by the recursive call.
+      i = last_back_edge->block_id();
+      // Update phis of the loop header now after the whole loop body is
+      // guaranteed to be processed.
+      for (int j = 0; j < block->phis()->length(); ++j) {
+        HPhi* phi = block->phis()->at(j);
+        worklist_.Add(phi, zone());
+        in_worklist_.Add(phi->id());
+      }
+      while (!worklist_.is_empty()) {
+        HValue* current = worklist_.RemoveLast();
+        in_worklist_.Remove(current->id());
+        if (current->UpdateInferredType()) {
+          for (HUseIterator it(current->uses()); !it.Done(); it.Advance()) {
+            HValue* use = it.value();
+            if (!in_worklist_.Contains(use->id())) {
+              in_worklist_.Add(use->id());
+              worklist_.Add(use, zone());
+            }
+          }
+        }
+      }
+      DCHECK(in_worklist_.IsEmpty());
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-infer-types.h b/src/crankshaft/hydrogen-infer-types.h
new file mode 100644
index 0000000..8acfeab
--- /dev/null
+++ b/src/crankshaft/hydrogen-infer-types.h
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
+#define V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HInferTypesPhase : public HPhase {
+ public:
+  explicit HInferTypesPhase(HGraph* graph)
+      : HPhase("H_Inferring types", graph), worklist_(8, zone()),
+        in_worklist_(graph->GetMaximumValueID(), zone()) { }
+
+  void Run() {
+    InferTypes(0, graph()->blocks()->length() - 1);
+  }
+
+ private:
+  void InferTypes(int from_inclusive, int to_inclusive);
+
+  ZoneList<HValue*> worklist_;
+  BitVector in_worklist_;
+
+  DISALLOW_COPY_AND_ASSIGN(HInferTypesPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_INFER_TYPES_H_
diff --git a/src/crankshaft/hydrogen-instructions.cc b/src/crankshaft/hydrogen-instructions.cc
new file mode 100644
index 0000000..e2e026f
--- /dev/null
+++ b/src/crankshaft/hydrogen-instructions.cc
@@ -0,0 +1,4702 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-instructions.h"
+
+#include "src/base/bits.h"
+#include "src/base/safe_math.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
+#include "src/double.h"
+#include "src/elements.h"
+#include "src/factory.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/crankshaft/arm64/lithium-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/crankshaft/arm/lithium-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/crankshaft/ppc/lithium-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/crankshaft/mips/lithium-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/crankshaft/mips64/lithium-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-x87.h"  // NOLINT
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                                         \
+  LInstruction* H##type::CompileToLithium(LChunkBuilder* builder) {  \
+    return builder->Do##type(this);                                  \
+  }
+HYDROGEN_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+Isolate* HValue::isolate() const {
+  DCHECK(block() != NULL);
+  return block()->isolate();
+}
+
+
+void HValue::AssumeRepresentation(Representation r) {
+  if (CheckFlag(kFlexibleRepresentation)) {
+    ChangeRepresentation(r);
+    // The representation of the value is dictated by type feedback and
+    // will not be changed later.
+    ClearFlag(kFlexibleRepresentation);
+  }
+}
+
+
+void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
+  DCHECK(CheckFlag(kFlexibleRepresentation));
+  Representation new_rep = RepresentationFromInputs();
+  UpdateRepresentation(new_rep, h_infer, "inputs");
+  new_rep = RepresentationFromUses();
+  UpdateRepresentation(new_rep, h_infer, "uses");
+  if (representation().IsSmi() && HasNonSmiUse()) {
+    UpdateRepresentation(
+        Representation::Integer32(), h_infer, "use requirements");
+  }
+}
+
+
+Representation HValue::RepresentationFromUses() {
+  if (HasNoUses()) return Representation::None();
+  Representation result = Representation::None();
+
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    Representation rep = use->observed_input_representation(it.index());
+    result = result.generalize(rep);
+
+    if (FLAG_trace_representation) {
+      PrintF("#%d %s is used by #%d %s as %s%s\n",
+             id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
+             (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+    }
+  }
+  if (IsPhi()) {
+    result = result.generalize(
+        HPhi::cast(this)->representation_from_indirect_uses());
+  }
+
+  // External representations are dealt with separately.
+  return result.IsExternal() ? Representation::None() : result;
+}
+
+
+void HValue::UpdateRepresentation(Representation new_rep,
+                                  HInferRepresentationPhase* h_infer,
+                                  const char* reason) {
+  Representation r = representation();
+  if (new_rep.is_more_general_than(r)) {
+    if (CheckFlag(kCannotBeTagged) && new_rep.IsTagged()) return;
+    if (FLAG_trace_representation) {
+      PrintF("Changing #%d %s representation %s -> %s based on %s\n",
+             id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
+    }
+    ChangeRepresentation(new_rep);
+    AddDependantsToWorklist(h_infer);
+  }
+}
+
+
+void HValue::AddDependantsToWorklist(HInferRepresentationPhase* h_infer) {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    h_infer->AddToWorklist(it.value());
+  }
+  for (int i = 0; i < OperandCount(); ++i) {
+    h_infer->AddToWorklist(OperandAt(i));
+  }
+}
+
+
+static int32_t ConvertAndSetOverflow(Representation r,
+                                     int64_t result,
+                                     bool* overflow) {
+  if (r.IsSmi()) {
+    if (result > Smi::kMaxValue) {
+      *overflow = true;
+      return Smi::kMaxValue;
+    }
+    if (result < Smi::kMinValue) {
+      *overflow = true;
+      return Smi::kMinValue;
+    }
+  } else {
+    if (result > kMaxInt) {
+      *overflow = true;
+      return kMaxInt;
+    }
+    if (result < kMinInt) {
+      *overflow = true;
+      return kMinInt;
+    }
+  }
+  return static_cast<int32_t>(result);
+}
+
+
+static int32_t AddWithoutOverflow(Representation r,
+                                  int32_t a,
+                                  int32_t b,
+                                  bool* overflow) {
+  int64_t result = static_cast<int64_t>(a) + static_cast<int64_t>(b);
+  return ConvertAndSetOverflow(r, result, overflow);
+}
+
+
+static int32_t SubWithoutOverflow(Representation r,
+                                  int32_t a,
+                                  int32_t b,
+                                  bool* overflow) {
+  int64_t result = static_cast<int64_t>(a) - static_cast<int64_t>(b);
+  return ConvertAndSetOverflow(r, result, overflow);
+}
+
+
+static int32_t MulWithoutOverflow(const Representation& r,
+                                  int32_t a,
+                                  int32_t b,
+                                  bool* overflow) {
+  int64_t result = static_cast<int64_t>(a) * static_cast<int64_t>(b);
+  return ConvertAndSetOverflow(r, result, overflow);
+}
+
+
+int32_t Range::Mask() const {
+  if (lower_ == upper_) return lower_;
+  if (lower_ >= 0) {
+    int32_t res = 1;
+    while (res < upper_) {
+      res = (res << 1) | 1;
+    }
+    return res;
+  }
+  return 0xffffffff;
+}
+
+
+void Range::AddConstant(int32_t value) {
+  if (value == 0) return;
+  bool may_overflow = false;  // Overflow is ignored here.
+  Representation r = Representation::Integer32();
+  lower_ = AddWithoutOverflow(r, lower_, value, &may_overflow);
+  upper_ = AddWithoutOverflow(r, upper_, value, &may_overflow);
+#ifdef DEBUG
+  Verify();
+#endif
+}
+
+
+void Range::Intersect(Range* other) {
+  upper_ = Min(upper_, other->upper_);
+  lower_ = Max(lower_, other->lower_);
+  bool b = CanBeMinusZero() && other->CanBeMinusZero();
+  set_can_be_minus_zero(b);
+}
+
+
+void Range::Union(Range* other) {
+  upper_ = Max(upper_, other->upper_);
+  lower_ = Min(lower_, other->lower_);
+  bool b = CanBeMinusZero() || other->CanBeMinusZero();
+  set_can_be_minus_zero(b);
+}
+
+
+void Range::CombinedMax(Range* other) {
+  upper_ = Max(upper_, other->upper_);
+  lower_ = Max(lower_, other->lower_);
+  set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
+}
+
+
+void Range::CombinedMin(Range* other) {
+  upper_ = Min(upper_, other->upper_);
+  lower_ = Min(lower_, other->lower_);
+  set_can_be_minus_zero(CanBeMinusZero() || other->CanBeMinusZero());
+}
+
+
+void Range::Sar(int32_t value) {
+  int32_t bits = value & 0x1F;
+  lower_ = lower_ >> bits;
+  upper_ = upper_ >> bits;
+  set_can_be_minus_zero(false);
+}
+
+
+void Range::Shl(int32_t value) {
+  int32_t bits = value & 0x1F;
+  int old_lower = lower_;
+  int old_upper = upper_;
+  lower_ = lower_ << bits;
+  upper_ = upper_ << bits;
+  if (old_lower != lower_ >> bits || old_upper != upper_ >> bits) {
+    upper_ = kMaxInt;
+    lower_ = kMinInt;
+  }
+  set_can_be_minus_zero(false);
+}
+
+
+bool Range::AddAndCheckOverflow(const Representation& r, Range* other) {
+  bool may_overflow = false;
+  lower_ = AddWithoutOverflow(r, lower_, other->lower(), &may_overflow);
+  upper_ = AddWithoutOverflow(r, upper_, other->upper(), &may_overflow);
+  KeepOrder();
+#ifdef DEBUG
+  Verify();
+#endif
+  return may_overflow;
+}
+
+
+bool Range::SubAndCheckOverflow(const Representation& r, Range* other) {
+  bool may_overflow = false;
+  lower_ = SubWithoutOverflow(r, lower_, other->upper(), &may_overflow);
+  upper_ = SubWithoutOverflow(r, upper_, other->lower(), &may_overflow);
+  KeepOrder();
+#ifdef DEBUG
+  Verify();
+#endif
+  return may_overflow;
+}
+
+
+void Range::KeepOrder() {
+  if (lower_ > upper_) {
+    int32_t tmp = lower_;
+    lower_ = upper_;
+    upper_ = tmp;
+  }
+}
+
+
+#ifdef DEBUG
+void Range::Verify() const {
+  DCHECK(lower_ <= upper_);
+}
+#endif
+
+
+bool Range::MulAndCheckOverflow(const Representation& r, Range* other) {
+  bool may_overflow = false;
+  int v1 = MulWithoutOverflow(r, lower_, other->lower(), &may_overflow);
+  int v2 = MulWithoutOverflow(r, lower_, other->upper(), &may_overflow);
+  int v3 = MulWithoutOverflow(r, upper_, other->lower(), &may_overflow);
+  int v4 = MulWithoutOverflow(r, upper_, other->upper(), &may_overflow);
+  lower_ = Min(Min(v1, v2), Min(v3, v4));
+  upper_ = Max(Max(v1, v2), Max(v3, v4));
+#ifdef DEBUG
+  Verify();
+#endif
+  return may_overflow;
+}
+
+
+bool HValue::IsDefinedAfter(HBasicBlock* other) const {
+  return block()->block_id() > other->block_id();
+}
+
+
+HUseListNode* HUseListNode::tail() {
+  // Skip and remove dead items in the use list.
+  while (tail_ != NULL && tail_->value()->CheckFlag(HValue::kIsDead)) {
+    tail_ = tail_->tail_;
+  }
+  return tail_;
+}
+
+
+bool HValue::CheckUsesForFlag(Flag f) const {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (it.value()->IsSimulate()) continue;
+    if (!it.value()->CheckFlag(f)) return false;
+  }
+  return true;
+}
+
+
+bool HValue::CheckUsesForFlag(Flag f, HValue** value) const {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (it.value()->IsSimulate()) continue;
+    if (!it.value()->CheckFlag(f)) {
+      *value = it.value();
+      return false;
+    }
+  }
+  return true;
+}
+
+
+bool HValue::HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const {
+  bool return_value = false;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (it.value()->IsSimulate()) continue;
+    if (!it.value()->CheckFlag(f)) return false;
+    return_value = true;
+  }
+  return return_value;
+}
+
+
+HUseIterator::HUseIterator(HUseListNode* head) : next_(head) {
+  Advance();
+}
+
+
+void HUseIterator::Advance() {
+  current_ = next_;
+  if (current_ != NULL) {
+    next_ = current_->tail();
+    value_ = current_->value();
+    index_ = current_->index();
+  }
+}
+
+
+int HValue::UseCount() const {
+  int count = 0;
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) ++count;
+  return count;
+}
+
+
+HUseListNode* HValue::RemoveUse(HValue* value, int index) {
+  HUseListNode* previous = NULL;
+  HUseListNode* current = use_list_;
+  while (current != NULL) {
+    if (current->value() == value && current->index() == index) {
+      if (previous == NULL) {
+        use_list_ = current->tail();
+      } else {
+        previous->set_tail(current->tail());
+      }
+      break;
+    }
+
+    previous = current;
+    current = current->tail();
+  }
+
+#ifdef DEBUG
+  // Do not reuse use list nodes in debug mode, zap them.
+  if (current != NULL) {
+    HUseListNode* temp =
+        new(block()->zone())
+        HUseListNode(current->value(), current->index(), NULL);
+    current->Zap();
+    current = temp;
+  }
+#endif
+  return current;
+}
+
+
+bool HValue::Equals(HValue* other) {
+  if (other->opcode() != opcode()) return false;
+  if (!other->representation().Equals(representation())) return false;
+  if (!other->type_.Equals(type_)) return false;
+  if (other->flags() != flags()) return false;
+  if (OperandCount() != other->OperandCount()) return false;
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
+  }
+  bool result = DataEquals(other);
+  DCHECK(!result || Hashcode() == other->Hashcode());
+  return result;
+}
+
+
+intptr_t HValue::Hashcode() {
+  intptr_t result = opcode();
+  int count = OperandCount();
+  for (int i = 0; i < count; ++i) {
+    result = result * 19 + OperandAt(i)->id() + (result >> 7);
+  }
+  return result;
+}
+
+
+const char* HValue::Mnemonic() const {
+  switch (opcode()) {
+#define MAKE_CASE(type) case k##type: return #type;
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(MAKE_CASE)
+#undef MAKE_CASE
+    case kPhi: return "Phi";
+    default: return "";
+  }
+}
+
+
+bool HValue::CanReplaceWithDummyUses() {
+  return FLAG_unreachable_code_elimination &&
+      !(block()->IsReachable() ||
+        IsBlockEntry() ||
+        IsControlInstruction() ||
+        IsArgumentsObject() ||
+        IsCapturedObject() ||
+        IsSimulate() ||
+        IsEnterInlined() ||
+        IsLeaveInlined());
+}
+
+
+bool HValue::IsInteger32Constant() {
+  return IsConstant() && HConstant::cast(this)->HasInteger32Value();
+}
+
+
+int32_t HValue::GetInteger32Constant() {
+  return HConstant::cast(this)->Integer32Value();
+}
+
+
+bool HValue::EqualsInteger32Constant(int32_t value) {
+  return IsInteger32Constant() && GetInteger32Constant() == value;
+}
+
+
+void HValue::SetOperandAt(int index, HValue* value) {
+  RegisterUse(index, value);
+  InternalSetOperandAt(index, value);
+}
+
+
+void HValue::DeleteAndReplaceWith(HValue* other) {
+  // We replace all uses first, so Delete can assert that there are none.
+  if (other != NULL) ReplaceAllUsesWith(other);
+  Kill();
+  DeleteFromGraph();
+}
+
+
+void HValue::ReplaceAllUsesWith(HValue* other) {
+  while (use_list_ != NULL) {
+    HUseListNode* list_node = use_list_;
+    HValue* value = list_node->value();
+    DCHECK(!value->block()->IsStartBlock());
+    value->InternalSetOperandAt(list_node->index(), other);
+    use_list_ = list_node->tail();
+    list_node->set_tail(other->use_list_);
+    other->use_list_ = list_node;
+  }
+}
+
+
+void HValue::Kill() {
+  // Instead of going through the entire use list of each operand, we only
+  // check the first item in each use list and rely on the tail() method to
+  // skip dead items, removing them lazily next time we traverse the list.
+  SetFlag(kIsDead);
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* operand = OperandAt(i);
+    if (operand == NULL) continue;
+    HUseListNode* first = operand->use_list_;
+    if (first != NULL && first->value()->CheckFlag(kIsDead)) {
+      operand->use_list_ = first->tail();
+    }
+  }
+}
+
+
+void HValue::SetBlock(HBasicBlock* block) {
+  DCHECK(block_ == NULL || block == NULL);
+  block_ = block;
+  if (id_ == kNoNumber && block != NULL) {
+    id_ = block->graph()->GetNextValueID(this);
+  }
+}
+
+
+std::ostream& operator<<(std::ostream& os, const HValue& v) {
+  return v.PrintTo(os);
+}
+
+
+std::ostream& operator<<(std::ostream& os, const TypeOf& t) {
+  if (t.value->representation().IsTagged() &&
+      !t.value->type().Equals(HType::Tagged()))
+    return os;
+  return os << " type:" << t.value->type();
+}
+
+
+std::ostream& operator<<(std::ostream& os, const ChangesOf& c) {
+  GVNFlagSet changes_flags = c.value->ChangesFlags();
+  if (changes_flags.IsEmpty()) return os;
+  os << " changes[";
+  if (changes_flags == c.value->AllSideEffectsFlagSet()) {
+    os << "*";
+  } else {
+    bool add_comma = false;
+#define PRINT_DO(Type)                   \
+  if (changes_flags.Contains(k##Type)) { \
+    if (add_comma) os << ",";            \
+    add_comma = true;                    \
+    os << #Type;                         \
+  }
+    GVN_TRACKED_FLAG_LIST(PRINT_DO);
+    GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
+#undef PRINT_DO
+  }
+  return os << "]";
+}
+
+
+bool HValue::HasMonomorphicJSObjectType() {
+  return !GetMonomorphicJSObjectMap().is_null();
+}
+
+
+bool HValue::UpdateInferredType() {
+  HType type = CalculateInferredType();
+  bool result = (!type.Equals(type_));
+  type_ = type;
+  return result;
+}
+
+
+void HValue::RegisterUse(int index, HValue* new_value) {
+  HValue* old_value = OperandAt(index);
+  if (old_value == new_value) return;
+
+  HUseListNode* removed = NULL;
+  if (old_value != NULL) {
+    removed = old_value->RemoveUse(this, index);
+  }
+
+  if (new_value != NULL) {
+    if (removed == NULL) {
+      new_value->use_list_ = new(new_value->block()->zone()) HUseListNode(
+          this, index, new_value->use_list_);
+    } else {
+      removed->set_tail(new_value->use_list_);
+      new_value->use_list_ = removed;
+    }
+  }
+}
+
+
+void HValue::AddNewRange(Range* r, Zone* zone) {
+  if (!HasRange()) ComputeInitialRange(zone);
+  if (!HasRange()) range_ = new(zone) Range();
+  DCHECK(HasRange());
+  r->StackUpon(range_);
+  range_ = r;
+}
+
+
+void HValue::RemoveLastAddedRange() {
+  DCHECK(HasRange());
+  DCHECK(range_->next() != NULL);
+  range_ = range_->next();
+}
+
+
+void HValue::ComputeInitialRange(Zone* zone) {
+  DCHECK(!HasRange());
+  range_ = InferRange(zone);
+  DCHECK(HasRange());
+}
+
+
+std::ostream& HInstruction::PrintTo(std::ostream& os) const {  // NOLINT
+  os << Mnemonic() << " ";
+  PrintDataTo(os) << ChangesOf(this) << TypeOf(this);
+  if (CheckFlag(HValue::kHasNoObservableSideEffects)) os << " [noOSE]";
+  if (CheckFlag(HValue::kIsDead)) os << " [dead]";
+  return os;
+}
+
+
+std::ostream& HInstruction::PrintDataTo(std::ostream& os) const {  // NOLINT
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (i > 0) os << " ";
+    os << NameOf(OperandAt(i));
+  }
+  return os;
+}
+
+
+void HInstruction::Unlink() {
+  DCHECK(IsLinked());
+  DCHECK(!IsControlInstruction());  // Must never move control instructions.
+  DCHECK(!IsBlockEntry());  // Doesn't make sense to delete these.
+  DCHECK(previous_ != NULL);
+  previous_->next_ = next_;
+  if (next_ == NULL) {
+    DCHECK(block()->last() == this);
+    block()->set_last(previous_);
+  } else {
+    next_->previous_ = previous_;
+  }
+  clear_block();
+}
+
+
+void HInstruction::InsertBefore(HInstruction* next) {
+  DCHECK(!IsLinked());
+  DCHECK(!next->IsBlockEntry());
+  DCHECK(!IsControlInstruction());
+  DCHECK(!next->block()->IsStartBlock());
+  DCHECK(next->previous_ != NULL);
+  HInstruction* prev = next->previous();
+  prev->next_ = this;
+  next->previous_ = this;
+  next_ = next;
+  previous_ = prev;
+  SetBlock(next->block());
+  if (!has_position() && next->has_position()) {
+    set_position(next->position());
+  }
+}
+
+
+void HInstruction::InsertAfter(HInstruction* previous) {
+  DCHECK(!IsLinked());
+  DCHECK(!previous->IsControlInstruction());
+  DCHECK(!IsControlInstruction() || previous->next_ == NULL);
+  HBasicBlock* block = previous->block();
+  // Never insert anything except constants into the start block after finishing
+  // it.
+  if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
+    DCHECK(block->end()->SecondSuccessor() == NULL);
+    InsertAfter(block->end()->FirstSuccessor()->first());
+    return;
+  }
+
+  // If we're inserting after an instruction with side-effects that is
+  // followed by a simulate instruction, we need to insert after the
+  // simulate instruction instead.
+  HInstruction* next = previous->next_;
+  if (previous->HasObservableSideEffects() && next != NULL) {
+    DCHECK(next->IsSimulate());
+    previous = next;
+    next = previous->next_;
+  }
+
+  previous_ = previous;
+  next_ = next;
+  SetBlock(block);
+  previous->next_ = this;
+  if (next != NULL) next->previous_ = this;
+  if (block->last() == previous) {
+    block->set_last(this);
+  }
+  if (!has_position() && previous->has_position()) {
+    set_position(previous->position());
+  }
+}
+
+
+bool HInstruction::Dominates(HInstruction* other) {
+  if (block() != other->block()) {
+    return block()->Dominates(other->block());
+  }
+  // Both instructions are in the same basic block. This instruction
+  // should precede the other one in order to dominate it.
+  for (HInstruction* instr = next(); instr != NULL; instr = instr->next()) {
+    if (instr == other) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+#ifdef DEBUG
+void HInstruction::Verify() {
+  // Verify that input operands are defined before use.
+  HBasicBlock* cur_block = block();
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* other_operand = OperandAt(i);
+    if (other_operand == NULL) continue;
+    HBasicBlock* other_block = other_operand->block();
+    if (cur_block == other_block) {
+      if (!other_operand->IsPhi()) {
+        HInstruction* cur = this->previous();
+        while (cur != NULL) {
+          if (cur == other_operand) break;
+          cur = cur->previous();
+        }
+        // Must reach other operand in the same block!
+        DCHECK(cur == other_operand);
+      }
+    } else {
+      // If the following assert fires, you may have forgotten an
+      // AddInstruction.
+      DCHECK(other_block->Dominates(cur_block));
+    }
+  }
+
+  // Verify that instructions that may have side-effects are followed
+  // by a simulate instruction.
+  if (HasObservableSideEffects() && !IsOsrEntry()) {
+    DCHECK(next()->IsSimulate());
+  }
+
+  // Verify that instructions that can be eliminated by GVN have overridden
+  // HValue::DataEquals.  The default implementation is UNREACHABLE.  We
+  // don't actually care whether DataEquals returns true or false here.
+  if (CheckFlag(kUseGVN)) DataEquals(this);
+
+  // Verify that all uses are in the graph.
+  for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
+    if (use.value()->IsInstruction()) {
+      DCHECK(HInstruction::cast(use.value())->IsLinked());
+    }
+  }
+}
+#endif
+
+
+bool HInstruction::CanDeoptimize() {
+  // TODO(titzer): make this a virtual method?
+  switch (opcode()) {
+    case HValue::kAbnormalExit:
+    case HValue::kAccessArgumentsAt:
+    case HValue::kAllocate:
+    case HValue::kArgumentsElements:
+    case HValue::kArgumentsLength:
+    case HValue::kArgumentsObject:
+    case HValue::kBlockEntry:
+    case HValue::kBoundsCheckBaseIndexInformation:
+    case HValue::kCallFunction:
+    case HValue::kCallNewArray:
+    case HValue::kCallStub:
+    case HValue::kCapturedObject:
+    case HValue::kClassOfTestAndBranch:
+    case HValue::kCompareGeneric:
+    case HValue::kCompareHoleAndBranch:
+    case HValue::kCompareMap:
+    case HValue::kCompareMinusZeroAndBranch:
+    case HValue::kCompareNumericAndBranch:
+    case HValue::kCompareObjectEqAndBranch:
+    case HValue::kConstant:
+    case HValue::kConstructDouble:
+    case HValue::kContext:
+    case HValue::kDebugBreak:
+    case HValue::kDeclareGlobals:
+    case HValue::kDoubleBits:
+    case HValue::kDummyUse:
+    case HValue::kEnterInlined:
+    case HValue::kEnvironmentMarker:
+    case HValue::kForceRepresentation:
+    case HValue::kGetCachedArrayIndex:
+    case HValue::kGoto:
+    case HValue::kHasCachedArrayIndexAndBranch:
+    case HValue::kHasInstanceTypeAndBranch:
+    case HValue::kInnerAllocatedObject:
+    case HValue::kInstanceOf:
+    case HValue::kIsSmiAndBranch:
+    case HValue::kIsStringAndBranch:
+    case HValue::kIsUndetectableAndBranch:
+    case HValue::kLeaveInlined:
+    case HValue::kLoadFieldByIndex:
+    case HValue::kLoadGlobalGeneric:
+    case HValue::kLoadNamedField:
+    case HValue::kLoadNamedGeneric:
+    case HValue::kLoadRoot:
+    case HValue::kMapEnumLength:
+    case HValue::kMathMinMax:
+    case HValue::kParameter:
+    case HValue::kPhi:
+    case HValue::kPushArguments:
+    case HValue::kReturn:
+    case HValue::kSeqStringGetChar:
+    case HValue::kStoreCodeEntry:
+    case HValue::kStoreFrameContext:
+    case HValue::kStoreKeyed:
+    case HValue::kStoreNamedField:
+    case HValue::kStoreNamedGeneric:
+    case HValue::kStringCharCodeAt:
+    case HValue::kStringCharFromCode:
+    case HValue::kThisFunction:
+    case HValue::kTypeofIsAndBranch:
+    case HValue::kUnknownOSRValue:
+    case HValue::kUseConst:
+      return false;
+
+    case HValue::kAdd:
+    case HValue::kAllocateBlockContext:
+    case HValue::kApplyArguments:
+    case HValue::kBitwise:
+    case HValue::kBoundsCheck:
+    case HValue::kBranch:
+    case HValue::kCallJSFunction:
+    case HValue::kCallRuntime:
+    case HValue::kCallWithDescriptor:
+    case HValue::kChange:
+    case HValue::kCheckArrayBufferNotNeutered:
+    case HValue::kCheckHeapObject:
+    case HValue::kCheckInstanceType:
+    case HValue::kCheckMapValue:
+    case HValue::kCheckMaps:
+    case HValue::kCheckSmi:
+    case HValue::kCheckValue:
+    case HValue::kClampToUint8:
+    case HValue::kDeoptimize:
+    case HValue::kDiv:
+    case HValue::kForInCacheArray:
+    case HValue::kForInPrepareMap:
+    case HValue::kHasInPrototypeChainAndBranch:
+    case HValue::kInvokeFunction:
+    case HValue::kLoadContextSlot:
+    case HValue::kLoadFunctionPrototype:
+    case HValue::kLoadKeyed:
+    case HValue::kLoadKeyedGeneric:
+    case HValue::kMathFloorOfDiv:
+    case HValue::kMaybeGrowElements:
+    case HValue::kMod:
+    case HValue::kMul:
+    case HValue::kOsrEntry:
+    case HValue::kPower:
+    case HValue::kPrologue:
+    case HValue::kRor:
+    case HValue::kSar:
+    case HValue::kSeqStringSetChar:
+    case HValue::kShl:
+    case HValue::kShr:
+    case HValue::kSimulate:
+    case HValue::kStackCheck:
+    case HValue::kStoreContextSlot:
+    case HValue::kStoreKeyedGeneric:
+    case HValue::kStringAdd:
+    case HValue::kStringCompareAndBranch:
+    case HValue::kSub:
+    case HValue::kToFastProperties:
+    case HValue::kTransitionElementsKind:
+    case HValue::kTrapAllocationMemento:
+    case HValue::kTypeof:
+    case HValue::kUnaryMathOperation:
+    case HValue::kWrapReceiver:
+      return true;
+  }
+  UNREACHABLE();
+  return true;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const NameOf& v) {
+  return os << v.value->representation().Mnemonic() << v.value->id();
+}
+
+std::ostream& HDummyUse::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value());
+}
+
+
+std::ostream& HEnvironmentMarker::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << (kind() == BIND ? "bind" : "lookup") << " var[" << index()
+            << "]";
+}
+
+
+std::ostream& HUnaryCall::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value()) << " #" << argument_count();
+}
+
+
+std::ostream& HCallJSFunction::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(function()) << " #" << argument_count();
+}
+
+
+HCallJSFunction* HCallJSFunction::New(Isolate* isolate, Zone* zone,
+                                      HValue* context, HValue* function,
+                                      int argument_count) {
+  bool has_stack_check = false;
+  if (function->IsConstant()) {
+    HConstant* fun_const = HConstant::cast(function);
+    Handle<JSFunction> jsfun =
+        Handle<JSFunction>::cast(fun_const->handle(isolate));
+    has_stack_check = !jsfun.is_null() &&
+        (jsfun->code()->kind() == Code::FUNCTION ||
+         jsfun->code()->kind() == Code::OPTIMIZED_FUNCTION);
+  }
+
+  return new (zone) HCallJSFunction(function, argument_count, has_stack_check);
+}
+
+
+std::ostream& HBinaryCall::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(first()) << " " << NameOf(second()) << " #"
+            << argument_count();
+}
+
+
+std::ostream& HCallFunction::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(context()) << " " << NameOf(function());
+  if (HasVectorAndSlot()) {
+    os << " (type-feedback-vector icslot " << slot().ToInt() << ")";
+  }
+  os << " (convert mode" << convert_mode() << ")";
+  return os;
+}
+
+
+void HBoundsCheck::ApplyIndexChange() {
+  if (skip_check()) return;
+
+  DecompositionResult decomposition;
+  bool index_is_decomposable = index()->TryDecompose(&decomposition);
+  if (index_is_decomposable) {
+    DCHECK(decomposition.base() == base());
+    if (decomposition.offset() == offset() &&
+        decomposition.scale() == scale()) return;
+  } else {
+    return;
+  }
+
+  ReplaceAllUsesWith(index());
+
+  HValue* current_index = decomposition.base();
+  int actual_offset = decomposition.offset() + offset();
+  int actual_scale = decomposition.scale() + scale();
+
+  HGraph* graph = block()->graph();
+  Isolate* isolate = graph->isolate();
+  Zone* zone = graph->zone();
+  HValue* context = graph->GetInvalidContext();
+  if (actual_offset != 0) {
+    HConstant* add_offset =
+        HConstant::New(isolate, zone, context, actual_offset);
+    add_offset->InsertBefore(this);
+    HInstruction* add =
+        HAdd::New(isolate, zone, context, current_index, add_offset);
+    add->InsertBefore(this);
+    add->AssumeRepresentation(index()->representation());
+    add->ClearFlag(kCanOverflow);
+    current_index = add;
+  }
+
+  if (actual_scale != 0) {
+    HConstant* sar_scale = HConstant::New(isolate, zone, context, actual_scale);
+    sar_scale->InsertBefore(this);
+    HInstruction* sar =
+        HSar::New(isolate, zone, context, current_index, sar_scale);
+    sar->InsertBefore(this);
+    sar->AssumeRepresentation(index()->representation());
+    current_index = sar;
+  }
+
+  SetOperandAt(0, current_index);
+
+  base_ = NULL;
+  offset_ = 0;
+  scale_ = 0;
+}
+
+
+std::ostream& HBoundsCheck::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(index()) << " " << NameOf(length());
+  if (base() != NULL && (offset() != 0 || scale() != 0)) {
+    os << " base: ((";
+    if (base() != index()) {
+      os << NameOf(index());
+    } else {
+      os << "index";
+    }
+    os << " + " << offset() << ") >> " << scale() << ")";
+  }
+  if (skip_check()) os << " [DISABLED]";
+  return os;
+}
+
+
+void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
+  DCHECK(CheckFlag(kFlexibleRepresentation));
+  HValue* actual_index = index()->ActualValue();
+  HValue* actual_length = length()->ActualValue();
+  Representation index_rep = actual_index->representation();
+  Representation length_rep = actual_length->representation();
+  if (index_rep.IsTagged() && actual_index->type().IsSmi()) {
+    index_rep = Representation::Smi();
+  }
+  if (length_rep.IsTagged() && actual_length->type().IsSmi()) {
+    length_rep = Representation::Smi();
+  }
+  Representation r = index_rep.generalize(length_rep);
+  if (r.is_more_general_than(Representation::Integer32())) {
+    r = Representation::Integer32();
+  }
+  UpdateRepresentation(r, h_infer, "boundscheck");
+}
+
+
+Range* HBoundsCheck::InferRange(Zone* zone) {
+  Representation r = representation();
+  if (r.IsSmiOrInteger32() && length()->HasRange()) {
+    int upper = length()->range()->upper() - (allow_equality() ? 0 : 1);
+    int lower = 0;
+
+    Range* result = new(zone) Range(lower, upper);
+    if (index()->HasRange()) {
+      result->Intersect(index()->range());
+    }
+
+    // In case of Smi representation, clamp result to Smi::kMaxValue.
+    if (r.IsSmi()) result->ClampToSmi();
+    return result;
+  }
+  return HValue::InferRange(zone);
+}
+
+
+std::ostream& HBoundsCheckBaseIndexInformation::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  // TODO(svenpanne) This 2nd base_index() looks wrong...
+  return os << "base: " << NameOf(base_index())
+            << ", check: " << NameOf(base_index());
+}
+
+
+std::ostream& HCallWithDescriptor::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  for (int i = 0; i < OperandCount(); i++) {
+    os << NameOf(OperandAt(i)) << " ";
+  }
+  return os << "#" << argument_count();
+}
+
+
+std::ostream& HCallNewArray::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << ElementsKindToString(elements_kind()) << " ";
+  return HBinaryCall::PrintDataTo(os);
+}
+
+
+std::ostream& HCallRuntime::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << function()->name << " ";
+  if (save_doubles() == kSaveFPRegs) os << "[save doubles] ";
+  return os << "#" << argument_count();
+}
+
+
+std::ostream& HClassOfTestAndBranch::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << "class_of_test(" << NameOf(value()) << ", \""
+            << class_name()->ToCString().get() << "\")";
+}
+
+
+std::ostream& HWrapReceiver::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(receiver()) << " " << NameOf(function());
+}
+
+
+std::ostream& HAccessArgumentsAt::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << NameOf(arguments()) << "[" << NameOf(index()) << "], length "
+            << NameOf(length());
+}
+
+
+std::ostream& HAllocateBlockContext::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << NameOf(context()) << " " << NameOf(function());
+}
+
+
+std::ostream& HControlInstruction::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << " goto (";
+  bool first_block = true;
+  for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
+    if (!first_block) os << ", ";
+    os << *it.Current();
+    first_block = false;
+  }
+  return os << ")";
+}
+
+
+std::ostream& HUnaryControlInstruction::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << NameOf(value());
+  return HControlInstruction::PrintDataTo(os);
+}
+
+
+std::ostream& HReturn::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value()) << " (pop " << NameOf(parameter_count())
+            << " values)";
+}
+
+
+Representation HBranch::observed_input_representation(int index) {
+  if (expected_input_types_.Contains(ToBooleanStub::NULL_TYPE) ||
+      expected_input_types_.Contains(ToBooleanStub::SPEC_OBJECT) ||
+      expected_input_types_.Contains(ToBooleanStub::STRING) ||
+      expected_input_types_.Contains(ToBooleanStub::SYMBOL) ||
+      expected_input_types_.Contains(ToBooleanStub::SIMD_VALUE)) {
+    return Representation::Tagged();
+  }
+  if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
+    if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+      return Representation::Double();
+    }
+    return Representation::Tagged();
+  }
+  if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
+    return Representation::Double();
+  }
+  if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
+    return Representation::Smi();
+  }
+  return Representation::None();
+}
+
+
+bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  HValue* value = this->value();
+  if (value->EmitAtUses()) {
+    DCHECK(value->IsConstant());
+    DCHECK(!value->representation().IsDouble());
+    *block = HConstant::cast(value)->BooleanValue()
+        ? FirstSuccessor()
+        : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+std::ostream& HBranch::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return HUnaryControlInstruction::PrintDataTo(os) << " "
+                                                   << expected_input_types();
+}
+
+
+std::ostream& HCompareMap::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(value()) << " (" << *map().handle() << ")";
+  HControlInstruction::PrintDataTo(os);
+  if (known_successor_index() == 0) {
+    os << " [true]";
+  } else if (known_successor_index() == 1) {
+    os << " [false]";
+  }
+  return os;
+}
+
+
+const char* HUnaryMathOperation::OpName() const {
+  switch (op()) {
+    case kMathFloor:
+      return "floor";
+    case kMathFround:
+      return "fround";
+    case kMathRound:
+      return "round";
+    case kMathAbs:
+      return "abs";
+    case kMathLog:
+      return "log";
+    case kMathExp:
+      return "exp";
+    case kMathSqrt:
+      return "sqrt";
+    case kMathPowHalf:
+      return "pow-half";
+    case kMathClz32:
+      return "clz32";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+Range* HUnaryMathOperation::InferRange(Zone* zone) {
+  Representation r = representation();
+  if (op() == kMathClz32) return new(zone) Range(0, 32);
+  if (r.IsSmiOrInteger32() && value()->HasRange()) {
+    if (op() == kMathAbs) {
+      int upper = value()->range()->upper();
+      int lower = value()->range()->lower();
+      bool spans_zero = value()->range()->CanBeZero();
+      // Math.abs(kMinInt) overflows its representation, on which the
+      // instruction deopts. Hence clamp it to kMaxInt.
+      int abs_upper = upper == kMinInt ? kMaxInt : abs(upper);
+      int abs_lower = lower == kMinInt ? kMaxInt : abs(lower);
+      Range* result =
+          new(zone) Range(spans_zero ? 0 : Min(abs_lower, abs_upper),
+                          Max(abs_lower, abs_upper));
+      // In case of Smi representation, clamp Math.abs(Smi::kMinValue) to
+      // Smi::kMaxValue.
+      if (r.IsSmi()) result->ClampToSmi();
+      return result;
+    }
+  }
+  return HValue::InferRange(zone);
+}
+
+
+std::ostream& HUnaryMathOperation::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << OpName() << " " << NameOf(value());
+}
+
+
+std::ostream& HUnaryOperation::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value());
+}
+
+
+std::ostream& HHasInstanceTypeAndBranch::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << NameOf(value());
+  switch (from_) {
+    case FIRST_JS_RECEIVER_TYPE:
+      if (to_ == LAST_TYPE) os << " spec_object";
+      break;
+    case JS_REGEXP_TYPE:
+      if (to_ == JS_REGEXP_TYPE) os << " reg_exp";
+      break;
+    case JS_ARRAY_TYPE:
+      if (to_ == JS_ARRAY_TYPE) os << " array";
+      break;
+    case JS_FUNCTION_TYPE:
+      if (to_ == JS_FUNCTION_TYPE) os << " function";
+      break;
+    default:
+      break;
+  }
+  return os;
+}
+
+
+std::ostream& HTypeofIsAndBranch::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << NameOf(value()) << " == " << type_literal()->ToCString().get();
+  return HControlInstruction::PrintDataTo(os);
+}
+
+
+namespace {
+
+String* TypeOfString(HConstant* constant, Isolate* isolate) {
+  Heap* heap = isolate->heap();
+  if (constant->HasNumberValue()) return heap->number_string();
+  if (constant->IsUndetectable()) return heap->undefined_string();
+  if (constant->HasStringValue()) return heap->string_string();
+  switch (constant->GetInstanceType()) {
+    case ODDBALL_TYPE: {
+      Unique<Object> unique = constant->GetUnique();
+      if (unique.IsKnownGlobal(heap->true_value()) ||
+          unique.IsKnownGlobal(heap->false_value())) {
+        return heap->boolean_string();
+      }
+      if (unique.IsKnownGlobal(heap->null_value())) {
+        return heap->object_string();
+      }
+      DCHECK(unique.IsKnownGlobal(heap->undefined_value()));
+      return heap->undefined_string();
+    }
+    case SYMBOL_TYPE:
+      return heap->symbol_string();
+    case SIMD128_VALUE_TYPE: {
+      Unique<Map> map = constant->ObjectMap();
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
+  if (map.IsKnownGlobal(heap->type##_map())) {                \
+    return heap->type##_string();                             \
+  }
+      SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+      UNREACHABLE();
+      return nullptr;
+    }
+    default:
+      if (constant->IsCallable()) return heap->function_string();
+      return heap->object_string();
+  }
+}
+
+}  // namespace
+
+
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    HConstant* constant = HConstant::cast(value());
+    String* type_string = TypeOfString(constant, isolate());
+    bool same_type = type_literal_.IsKnownGlobal(type_string);
+    *block = same_type ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  } else if (value()->representation().IsSpecialization()) {
+    bool number_type =
+        type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
+    *block = number_type ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+std::ostream& HCheckMapValue::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value()) << " " << NameOf(map());
+}
+
+
+HValue* HCheckMapValue::Canonicalize() {
+  if (map()->IsConstant()) {
+    HConstant* c_map = HConstant::cast(map());
+    return HCheckMaps::CreateAndInsertAfter(
+        block()->graph()->zone(), value(), c_map->MapValue(),
+        c_map->HasStableMapValue(), this);
+  }
+  return this;
+}
+
+
+std::ostream& HForInPrepareMap::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(enumerable());
+}
+
+
+std::ostream& HForInCacheArray::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(enumerable()) << " " << NameOf(map()) << "[" << idx_
+            << "]";
+}
+
+
+std::ostream& HLoadFieldByIndex::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << NameOf(object()) << " " << NameOf(index());
+}
+
+
+static bool MatchLeftIsOnes(HValue* l, HValue* r, HValue** negated) {
+  if (!l->EqualsInteger32Constant(~0)) return false;
+  *negated = r;
+  return true;
+}
+
+
+static bool MatchNegationViaXor(HValue* instr, HValue** negated) {
+  if (!instr->IsBitwise()) return false;
+  HBitwise* b = HBitwise::cast(instr);
+  return (b->op() == Token::BIT_XOR) &&
+      (MatchLeftIsOnes(b->left(), b->right(), negated) ||
+       MatchLeftIsOnes(b->right(), b->left(), negated));
+}
+
+
+static bool MatchDoubleNegation(HValue* instr, HValue** arg) {
+  HValue* negated;
+  return MatchNegationViaXor(instr, &negated) &&
+      MatchNegationViaXor(negated, arg);
+}
+
+
+HValue* HBitwise::Canonicalize() {
+  if (!representation().IsSmiOrInteger32()) return this;
+  // If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
+  int32_t nop_constant = (op() == Token::BIT_AND) ? -1 : 0;
+  if (left()->EqualsInteger32Constant(nop_constant) &&
+      !right()->CheckFlag(kUint32)) {
+    return right();
+  }
+  if (right()->EqualsInteger32Constant(nop_constant) &&
+      !left()->CheckFlag(kUint32)) {
+    return left();
+  }
+  // Optimize double negation, a common pattern used for ToInt32(x).
+  HValue* arg;
+  if (MatchDoubleNegation(this, &arg) && !arg->CheckFlag(kUint32)) {
+    return arg;
+  }
+  return this;
+}
+
+
+// static
+HInstruction* HAdd::New(Isolate* isolate, Zone* zone, HValue* context,
+                        HValue* left, HValue* right, Strength strength,
+                        ExternalAddType external_add_type) {
+  // For everything else, you should use the other factory method without
+  // ExternalAddType.
+  DCHECK_EQ(external_add_type, AddOfExternalAndTagged);
+  return new (zone) HAdd(context, left, right, strength, external_add_type);
+}
+
+
+Representation HAdd::RepresentationFromInputs() {
+  Representation left_rep = left()->representation();
+  if (left_rep.IsExternal()) {
+    return Representation::External();
+  }
+  return HArithmeticBinaryOperation::RepresentationFromInputs();
+}
+
+
+Representation HAdd::RequiredInputRepresentation(int index) {
+  if (index == 2) {
+    Representation left_rep = left()->representation();
+    if (left_rep.IsExternal()) {
+      if (external_add_type_ == AddOfExternalAndTagged) {
+        return Representation::Tagged();
+      } else {
+        return Representation::Integer32();
+      }
+    }
+  }
+  return HArithmeticBinaryOperation::RequiredInputRepresentation(index);
+}
+
+
+static bool IsIdentityOperation(HValue* arg1, HValue* arg2, int32_t identity) {
+  return arg1->representation().IsSpecialization() &&
+    arg2->EqualsInteger32Constant(identity);
+}
+
+
+HValue* HAdd::Canonicalize() {
+  // Adding 0 is an identity operation except in case of -0: -0 + 0 = +0
+  if (IsIdentityOperation(left(), right(), 0) &&
+      !left()->representation().IsDouble()) {  // Left could be -0.
+    return left();
+  }
+  if (IsIdentityOperation(right(), left(), 0) &&
+      !left()->representation().IsDouble()) {  // Right could be -0.
+    return right();
+  }
+  return this;
+}
+
+
+HValue* HSub::Canonicalize() {
+  if (IsIdentityOperation(left(), right(), 0)) return left();
+  return this;
+}
+
+
+HValue* HMul::Canonicalize() {
+  if (IsIdentityOperation(left(), right(), 1)) return left();
+  if (IsIdentityOperation(right(), left(), 1)) return right();
+  return this;
+}
+
+
+bool HMul::MulMinusOne() {
+  if (left()->EqualsInteger32Constant(-1) ||
+      right()->EqualsInteger32Constant(-1)) {
+    return true;
+  }
+
+  return false;
+}
+
+
+HValue* HMod::Canonicalize() {
+  return this;
+}
+
+
+HValue* HDiv::Canonicalize() {
+  if (IsIdentityOperation(left(), right(), 1)) return left();
+  return this;
+}
+
+
+HValue* HChange::Canonicalize() {
+  return (from().Equals(to())) ? value() : this;
+}
+
+
+HValue* HWrapReceiver::Canonicalize() {
+  if (HasNoUses()) return NULL;
+  if (receiver()->type().IsJSReceiver()) {
+    return receiver();
+  }
+  return this;
+}
+
+
+std::ostream& HTypeof::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value());
+}
+
+
+HInstruction* HForceRepresentation::New(Isolate* isolate, Zone* zone,
+                                        HValue* context, HValue* value,
+                                        Representation representation) {
+  if (FLAG_fold_constants && value->IsConstant()) {
+    HConstant* c = HConstant::cast(value);
+    c = c->CopyToRepresentation(representation, zone);
+    if (c != NULL) return c;
+  }
+  return new(zone) HForceRepresentation(value, representation);
+}
+
+
+std::ostream& HForceRepresentation::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << representation().Mnemonic() << " " << NameOf(value());
+}
+
+
+std::ostream& HChange::PrintDataTo(std::ostream& os) const {  // NOLINT
+  HUnaryOperation::PrintDataTo(os);
+  os << " " << from().Mnemonic() << " to " << to().Mnemonic();
+
+  if (CanTruncateToSmi()) os << " truncating-smi";
+  if (CanTruncateToInt32()) os << " truncating-int32";
+  if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
+  if (CheckFlag(kAllowUndefinedAsNaN)) os << " allow-undefined-as-nan";
+  return os;
+}
+
+
+HValue* HUnaryMathOperation::Canonicalize() {
+  if (op() == kMathRound || op() == kMathFloor) {
+    HValue* val = value();
+    if (val->IsChange()) val = HChange::cast(val)->value();
+    if (val->representation().IsSmiOrInteger32()) {
+      if (val->representation().Equals(representation())) return val;
+      return Prepend(new(block()->zone()) HChange(
+          val, representation(), false, false));
+    }
+  }
+  if (op() == kMathFloor && value()->IsDiv() && value()->HasOneUse()) {
+    HDiv* hdiv = HDiv::cast(value());
+
+    HValue* left = hdiv->left();
+    if (left->representation().IsInteger32() && !left->CheckFlag(kUint32)) {
+      // A value with an integer representation does not need to be transformed.
+    } else if (left->IsChange() && HChange::cast(left)->from().IsInteger32() &&
+               !HChange::cast(left)->value()->CheckFlag(kUint32)) {
+      // A change from an integer32 can be replaced by the integer32 value.
+      left = HChange::cast(left)->value();
+    } else if (hdiv->observed_input_representation(1).IsSmiOrInteger32()) {
+      left = Prepend(new(block()->zone()) HChange(
+          left, Representation::Integer32(), false, false));
+    } else {
+      return this;
+    }
+
+    HValue* right = hdiv->right();
+    if (right->IsInteger32Constant()) {
+      right = Prepend(HConstant::cast(right)->CopyToRepresentation(
+          Representation::Integer32(), right->block()->zone()));
+    } else if (right->representation().IsInteger32() &&
+               !right->CheckFlag(kUint32)) {
+      // A value with an integer representation does not need to be transformed.
+    } else if (right->IsChange() &&
+               HChange::cast(right)->from().IsInteger32() &&
+               !HChange::cast(right)->value()->CheckFlag(kUint32)) {
+      // A change from an integer32 can be replaced by the integer32 value.
+      right = HChange::cast(right)->value();
+    } else if (hdiv->observed_input_representation(2).IsSmiOrInteger32()) {
+      right = Prepend(new(block()->zone()) HChange(
+          right, Representation::Integer32(), false, false));
+    } else {
+      return this;
+    }
+
+    return Prepend(HMathFloorOfDiv::New(
+        block()->graph()->isolate(), block()->zone(), context(), left, right));
+  }
+  return this;
+}
+
+
+HValue* HCheckInstanceType::Canonicalize() {
+  if ((check_ == IS_JS_RECEIVER && value()->type().IsJSReceiver()) ||
+      (check_ == IS_JS_ARRAY && value()->type().IsJSArray()) ||
+      (check_ == IS_STRING && value()->type().IsString())) {
+    return value();
+  }
+
+  if (check_ == IS_INTERNALIZED_STRING && value()->IsConstant()) {
+    if (HConstant::cast(value())->HasInternalizedStringValue()) {
+      return value();
+    }
+  }
+  return this;
+}
+
+
+void HCheckInstanceType::GetCheckInterval(InstanceType* first,
+                                          InstanceType* last) {
+  DCHECK(is_interval_check());
+  switch (check_) {
+    case IS_JS_RECEIVER:
+      *first = FIRST_JS_RECEIVER_TYPE;
+      *last = LAST_JS_RECEIVER_TYPE;
+      return;
+    case IS_JS_ARRAY:
+      *first = *last = JS_ARRAY_TYPE;
+      return;
+    case IS_JS_DATE:
+      *first = *last = JS_DATE_TYPE;
+      return;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
+  DCHECK(!is_interval_check());
+  switch (check_) {
+    case IS_STRING:
+      *mask = kIsNotStringMask;
+      *tag = kStringTag;
+      return;
+    case IS_INTERNALIZED_STRING:
+      *mask = kIsNotStringMask | kIsNotInternalizedMask;
+      *tag = kInternalizedTag;
+      return;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+std::ostream& HCheckMaps::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(value()) << " [" << *maps()->at(0).handle();
+  for (int i = 1; i < maps()->size(); ++i) {
+    os << "," << *maps()->at(i).handle();
+  }
+  os << "]";
+  if (IsStabilityCheck()) os << "(stability-check)";
+  return os;
+}
+
+
+HValue* HCheckMaps::Canonicalize() {
+  if (!IsStabilityCheck() && maps_are_stable() && value()->IsConstant()) {
+    HConstant* c_value = HConstant::cast(value());
+    if (c_value->HasObjectMap()) {
+      for (int i = 0; i < maps()->size(); ++i) {
+        if (c_value->ObjectMap() == maps()->at(i)) {
+          if (maps()->size() > 1) {
+            set_maps(new(block()->graph()->zone()) UniqueSet<Map>(
+                    maps()->at(i), block()->graph()->zone()));
+          }
+          MarkAsStabilityCheck();
+          break;
+        }
+      }
+    }
+  }
+  return this;
+}
+
+
+std::ostream& HCheckValue::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value()) << " " << Brief(*object().handle());
+}
+
+
+HValue* HCheckValue::Canonicalize() {
+  return (value()->IsConstant() &&
+          HConstant::cast(value())->EqualsUnique(object_)) ? NULL : this;
+}
+
+
+const char* HCheckInstanceType::GetCheckName() const {
+  switch (check_) {
+    case IS_JS_RECEIVER: return "object";
+    case IS_JS_ARRAY: return "array";
+    case IS_JS_DATE:
+      return "date";
+    case IS_STRING: return "string";
+    case IS_INTERNALIZED_STRING: return "internalized_string";
+  }
+  UNREACHABLE();
+  return "";
+}
+
+
+std::ostream& HCheckInstanceType::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << GetCheckName() << " ";
+  return HUnaryOperation::PrintDataTo(os);
+}
+
+
+std::ostream& HCallStub::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << CodeStub::MajorName(major_key_) << " ";
+  return HUnaryCall::PrintDataTo(os);
+}
+
+
+std::ostream& HUnknownOSRValue::PrintDataTo(std::ostream& os) const {  // NOLINT
+  const char* type = "expression";
+  if (environment_->is_local_index(index_)) type = "local";
+  if (environment_->is_special_index(index_)) type = "special";
+  if (environment_->is_parameter_index(index_)) type = "parameter";
+  return os << type << " @ " << index_;
+}
+
+
+std::ostream& HInstanceOf::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(left()) << " " << NameOf(right()) << " "
+            << NameOf(context());
+}
+
+
+Range* HValue::InferRange(Zone* zone) {
+  Range* result;
+  if (representation().IsSmi() || type().IsSmi()) {
+    result = new(zone) Range(Smi::kMinValue, Smi::kMaxValue);
+    result->set_can_be_minus_zero(false);
+  } else {
+    result = new(zone) Range();
+    result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32));
+    // TODO(jkummerow): The range cannot be minus zero when the upper type
+    // bound is Integer32.
+  }
+  return result;
+}
+
+
+Range* HChange::InferRange(Zone* zone) {
+  Range* input_range = value()->range();
+  if (from().IsInteger32() && !value()->CheckFlag(HInstruction::kUint32) &&
+      (to().IsSmi() ||
+       (to().IsTagged() &&
+        input_range != NULL &&
+        input_range->IsInSmiRange()))) {
+    set_type(HType::Smi());
+    ClearChangesFlag(kNewSpacePromotion);
+  }
+  if (to().IsSmiOrTagged() &&
+      input_range != NULL &&
+      input_range->IsInSmiRange() &&
+      (!SmiValuesAre32Bits() ||
+       !value()->CheckFlag(HValue::kUint32) ||
+       input_range->upper() != kMaxInt)) {
+    // The Range class can't express upper bounds in the (kMaxInt, kMaxUint32]
+    // interval, so we treat kMaxInt as a sentinel for this entire interval.
+    ClearFlag(kCanOverflow);
+  }
+  Range* result = (input_range != NULL)
+      ? input_range->Copy(zone)
+      : HValue::InferRange(zone);
+  result->set_can_be_minus_zero(!to().IsSmiOrInteger32() ||
+                                !(CheckFlag(kAllUsesTruncatingToInt32) ||
+                                  CheckFlag(kAllUsesTruncatingToSmi)));
+  if (to().IsSmi()) result->ClampToSmi();
+  return result;
+}
+
+
+Range* HConstant::InferRange(Zone* zone) {
+  if (HasInteger32Value()) {
+    Range* result = new(zone) Range(int32_value_, int32_value_);
+    result->set_can_be_minus_zero(false);
+    return result;
+  }
+  return HValue::InferRange(zone);
+}
+
+
+SourcePosition HPhi::position() const { return block()->first()->position(); }
+
+
+Range* HPhi::InferRange(Zone* zone) {
+  Representation r = representation();
+  if (r.IsSmiOrInteger32()) {
+    if (block()->IsLoopHeader()) {
+      Range* range = r.IsSmi()
+          ? new(zone) Range(Smi::kMinValue, Smi::kMaxValue)
+          : new(zone) Range(kMinInt, kMaxInt);
+      return range;
+    } else {
+      Range* range = OperandAt(0)->range()->Copy(zone);
+      for (int i = 1; i < OperandCount(); ++i) {
+        range->Union(OperandAt(i)->range());
+      }
+      return range;
+    }
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+Range* HAdd::InferRange(Zone* zone) {
+  Representation r = representation();
+  if (r.IsSmiOrInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy(zone);
+    if (!res->AddAndCheckOverflow(r, b) ||
+        (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+        (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
+      ClearFlag(kCanOverflow);
+    }
+    res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+                               !CheckFlag(kAllUsesTruncatingToInt32) &&
+                               a->CanBeMinusZero() && b->CanBeMinusZero());
+    return res;
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+Range* HSub::InferRange(Zone* zone) {
+  Representation r = representation();
+  if (r.IsSmiOrInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy(zone);
+    if (!res->SubAndCheckOverflow(r, b) ||
+        (r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+        (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) {
+      ClearFlag(kCanOverflow);
+    }
+    res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+                               !CheckFlag(kAllUsesTruncatingToInt32) &&
+                               a->CanBeMinusZero() && b->CanBeZero());
+    return res;
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+Range* HMul::InferRange(Zone* zone) {
+  Representation r = representation();
+  if (r.IsSmiOrInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy(zone);
+    if (!res->MulAndCheckOverflow(r, b) ||
+        (((r.IsInteger32() && CheckFlag(kAllUsesTruncatingToInt32)) ||
+         (r.IsSmi() && CheckFlag(kAllUsesTruncatingToSmi))) &&
+         MulMinusOne())) {
+      // Truncated int multiplication is too precise and therefore not the
+      // same as converting to Double and back.
+      // Handle truncated integer multiplication by -1 special.
+      ClearFlag(kCanOverflow);
+    }
+    res->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToSmi) &&
+                               !CheckFlag(kAllUsesTruncatingToInt32) &&
+                               ((a->CanBeZero() && b->CanBeNegative()) ||
+                                (a->CanBeNegative() && b->CanBeZero())));
+    return res;
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+Range* HDiv::InferRange(Zone* zone) {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* result = new(zone) Range();
+    result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+                                  (a->CanBeMinusZero() ||
+                                   (a->CanBeZero() && b->CanBeNegative())));
+    if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+      ClearFlag(kCanOverflow);
+    }
+
+    if (!b->CanBeZero()) {
+      ClearFlag(kCanBeDivByZero);
+    }
+    return result;
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+Range* HMathFloorOfDiv::InferRange(Zone* zone) {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* result = new(zone) Range();
+    result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+                                  (a->CanBeMinusZero() ||
+                                   (a->CanBeZero() && b->CanBeNegative())));
+    if (!a->Includes(kMinInt)) {
+      ClearFlag(kLeftCanBeMinInt);
+    }
+
+    if (!a->CanBeNegative()) {
+      ClearFlag(HValue::kLeftCanBeNegative);
+    }
+
+    if (!a->CanBePositive()) {
+      ClearFlag(HValue::kLeftCanBePositive);
+    }
+
+    if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+      ClearFlag(kCanOverflow);
+    }
+
+    if (!b->CanBeZero()) {
+      ClearFlag(kCanBeDivByZero);
+    }
+    return result;
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+// Returns the absolute value of its argument minus one, avoiding undefined
+// behavior at kMinInt.
+static int32_t AbsMinus1(int32_t a) { return a < 0 ? -(a + 1) : (a - 1); }
+
+
+Range* HMod::InferRange(Zone* zone) {
+  if (representation().IsInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+
+    // The magnitude of the modulus is bounded by the right operand.
+    int32_t positive_bound = Max(AbsMinus1(b->lower()), AbsMinus1(b->upper()));
+
+    // The result of the modulo operation has the sign of its left operand.
+    bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative();
+    Range* result = new(zone) Range(left_can_be_negative ? -positive_bound : 0,
+                                    a->CanBePositive() ? positive_bound : 0);
+
+    result->set_can_be_minus_zero(!CheckFlag(kAllUsesTruncatingToInt32) &&
+                                  left_can_be_negative);
+
+    if (!a->CanBeNegative()) {
+      ClearFlag(HValue::kLeftCanBeNegative);
+    }
+
+    if (!a->Includes(kMinInt) || !b->Includes(-1)) {
+      ClearFlag(HValue::kCanOverflow);
+    }
+
+    if (!b->CanBeZero()) {
+      ClearFlag(HValue::kCanBeDivByZero);
+    }
+    return result;
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+InductionVariableData* InductionVariableData::ExaminePhi(HPhi* phi) {
+  if (phi->block()->loop_information() == NULL) return NULL;
+  if (phi->OperandCount() != 2) return NULL;
+  int32_t candidate_increment;
+
+  candidate_increment = ComputeIncrement(phi, phi->OperandAt(0));
+  if (candidate_increment != 0) {
+    return new(phi->block()->graph()->zone())
+        InductionVariableData(phi, phi->OperandAt(1), candidate_increment);
+  }
+
+  candidate_increment = ComputeIncrement(phi, phi->OperandAt(1));
+  if (candidate_increment != 0) {
+    return new(phi->block()->graph()->zone())
+        InductionVariableData(phi, phi->OperandAt(0), candidate_increment);
+  }
+
+  return NULL;
+}
+
+
+/*
+ * This function tries to match the following patterns (and all the relevant
+ * variants related to |, & and + being commutative):
+ * base | constant_or_mask
+ * base & constant_and_mask
+ * (base + constant_offset) & constant_and_mask
+ * (base - constant_offset) & constant_and_mask
+ */
+void InductionVariableData::DecomposeBitwise(
+    HValue* value,
+    BitwiseDecompositionResult* result) {
+  HValue* base = IgnoreOsrValue(value);
+  result->base = value;
+
+  if (!base->representation().IsInteger32()) return;
+
+  if (base->IsBitwise()) {
+    bool allow_offset = false;
+    int32_t mask = 0;
+
+    HBitwise* bitwise = HBitwise::cast(base);
+    if (bitwise->right()->IsInteger32Constant()) {
+      mask = bitwise->right()->GetInteger32Constant();
+      base = bitwise->left();
+    } else if (bitwise->left()->IsInteger32Constant()) {
+      mask = bitwise->left()->GetInteger32Constant();
+      base = bitwise->right();
+    } else {
+      return;
+    }
+    if (bitwise->op() == Token::BIT_AND) {
+      result->and_mask = mask;
+      allow_offset = true;
+    } else if (bitwise->op() == Token::BIT_OR) {
+      result->or_mask = mask;
+    } else {
+      return;
+    }
+
+    result->context = bitwise->context();
+
+    if (allow_offset) {
+      if (base->IsAdd()) {
+        HAdd* add = HAdd::cast(base);
+        if (add->right()->IsInteger32Constant()) {
+          base = add->left();
+        } else if (add->left()->IsInteger32Constant()) {
+          base = add->right();
+        }
+      } else if (base->IsSub()) {
+        HSub* sub = HSub::cast(base);
+        if (sub->right()->IsInteger32Constant()) {
+          base = sub->left();
+        }
+      }
+    }
+
+    result->base = base;
+  }
+}
+
+
+void InductionVariableData::AddCheck(HBoundsCheck* check,
+                                     int32_t upper_limit) {
+  DCHECK(limit_validity() != NULL);
+  if (limit_validity() != check->block() &&
+      !limit_validity()->Dominates(check->block())) return;
+  if (!phi()->block()->current_loop()->IsNestedInThisLoop(
+      check->block()->current_loop())) return;
+
+  ChecksRelatedToLength* length_checks = checks();
+  while (length_checks != NULL) {
+    if (length_checks->length() == check->length()) break;
+    length_checks = length_checks->next();
+  }
+  if (length_checks == NULL) {
+    length_checks = new(check->block()->zone())
+        ChecksRelatedToLength(check->length(), checks());
+    checks_ = length_checks;
+  }
+
+  length_checks->AddCheck(check, upper_limit);
+}
+
+
+void InductionVariableData::ChecksRelatedToLength::CloseCurrentBlock() {
+  if (checks() != NULL) {
+    InductionVariableCheck* c = checks();
+    HBasicBlock* current_block = c->check()->block();
+    while (c != NULL && c->check()->block() == current_block) {
+      c->set_upper_limit(current_upper_limit_);
+      c = c->next();
+    }
+  }
+}
+
+
+void InductionVariableData::ChecksRelatedToLength::UseNewIndexInCurrentBlock(
+    Token::Value token,
+    int32_t mask,
+    HValue* index_base,
+    HValue* context) {
+  DCHECK(first_check_in_block() != NULL);
+  HValue* previous_index = first_check_in_block()->index();
+  DCHECK(context != NULL);
+
+  Zone* zone = index_base->block()->graph()->zone();
+  Isolate* isolate = index_base->block()->graph()->isolate();
+  set_added_constant(HConstant::New(isolate, zone, context, mask));
+  if (added_index() != NULL) {
+    added_constant()->InsertBefore(added_index());
+  } else {
+    added_constant()->InsertBefore(first_check_in_block());
+  }
+
+  if (added_index() == NULL) {
+    first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
+    HInstruction* new_index = HBitwise::New(isolate, zone, context, token,
+                                            index_base, added_constant());
+    DCHECK(new_index->IsBitwise());
+    new_index->ClearAllSideEffects();
+    new_index->AssumeRepresentation(Representation::Integer32());
+    set_added_index(HBitwise::cast(new_index));
+    added_index()->InsertBefore(first_check_in_block());
+  }
+  DCHECK(added_index()->op() == token);
+
+  added_index()->SetOperandAt(1, index_base);
+  added_index()->SetOperandAt(2, added_constant());
+  first_check_in_block()->SetOperandAt(0, added_index());
+  if (previous_index->HasNoUses()) {
+    previous_index->DeleteAndReplaceWith(NULL);
+  }
+}
+
+void InductionVariableData::ChecksRelatedToLength::AddCheck(
+    HBoundsCheck* check,
+    int32_t upper_limit) {
+  BitwiseDecompositionResult decomposition;
+  InductionVariableData::DecomposeBitwise(check->index(), &decomposition);
+
+  if (first_check_in_block() == NULL ||
+      first_check_in_block()->block() != check->block()) {
+    CloseCurrentBlock();
+
+    first_check_in_block_ = check;
+    set_added_index(NULL);
+    set_added_constant(NULL);
+    current_and_mask_in_block_ = decomposition.and_mask;
+    current_or_mask_in_block_ = decomposition.or_mask;
+    current_upper_limit_ = upper_limit;
+
+    InductionVariableCheck* new_check = new(check->block()->graph()->zone())
+        InductionVariableCheck(check, checks_, upper_limit);
+    checks_ = new_check;
+    return;
+  }
+
+  if (upper_limit > current_upper_limit()) {
+    current_upper_limit_ = upper_limit;
+  }
+
+  if (decomposition.and_mask != 0 &&
+      current_or_mask_in_block() == 0) {
+    if (current_and_mask_in_block() == 0 ||
+        decomposition.and_mask > current_and_mask_in_block()) {
+      UseNewIndexInCurrentBlock(Token::BIT_AND,
+                                decomposition.and_mask,
+                                decomposition.base,
+                                decomposition.context);
+      current_and_mask_in_block_ = decomposition.and_mask;
+    }
+    check->set_skip_check();
+  }
+  if (current_and_mask_in_block() == 0) {
+    if (decomposition.or_mask > current_or_mask_in_block()) {
+      UseNewIndexInCurrentBlock(Token::BIT_OR,
+                                decomposition.or_mask,
+                                decomposition.base,
+                                decomposition.context);
+      current_or_mask_in_block_ = decomposition.or_mask;
+    }
+    check->set_skip_check();
+  }
+
+  if (!check->skip_check()) {
+    InductionVariableCheck* new_check = new(check->block()->graph()->zone())
+        InductionVariableCheck(check, checks_, upper_limit);
+    checks_ = new_check;
+  }
+}
+
+
+/*
+ * This method detects if phi is an induction variable, with phi_operand as
+ * its "incremented" value (the other operand would be the "base" value).
+ *
+ * It cheks is phi_operand has the form "phi + constant".
+ * If yes, the constant is the increment that the induction variable gets at
+ * every loop iteration.
+ * Otherwise it returns 0.
+ */
+int32_t InductionVariableData::ComputeIncrement(HPhi* phi,
+                                                HValue* phi_operand) {
+  if (!phi_operand->representation().IsSmiOrInteger32()) return 0;
+
+  if (phi_operand->IsAdd()) {
+    HAdd* operation = HAdd::cast(phi_operand);
+    if (operation->left() == phi &&
+        operation->right()->IsInteger32Constant()) {
+      return operation->right()->GetInteger32Constant();
+    } else if (operation->right() == phi &&
+               operation->left()->IsInteger32Constant()) {
+      return operation->left()->GetInteger32Constant();
+    }
+  } else if (phi_operand->IsSub()) {
+    HSub* operation = HSub::cast(phi_operand);
+    if (operation->left() == phi &&
+        operation->right()->IsInteger32Constant()) {
+      int constant = operation->right()->GetInteger32Constant();
+      if (constant == kMinInt) return 0;
+      return -constant;
+    }
+  }
+
+  return 0;
+}
+
+
+/*
+ * Swaps the information in "update" with the one contained in "this".
+ * The swapping is important because this method is used while doing a
+ * dominator tree traversal, and "update" will retain the old data that
+ * will be restored while backtracking.
+ */
+void InductionVariableData::UpdateAdditionalLimit(
+    InductionVariableLimitUpdate* update) {
+  DCHECK(update->updated_variable == this);
+  if (update->limit_is_upper) {
+    swap(&additional_upper_limit_, &update->limit);
+    swap(&additional_upper_limit_is_included_, &update->limit_is_included);
+  } else {
+    swap(&additional_lower_limit_, &update->limit);
+    swap(&additional_lower_limit_is_included_, &update->limit_is_included);
+  }
+}
+
+
+int32_t InductionVariableData::ComputeUpperLimit(int32_t and_mask,
+                                                 int32_t or_mask) {
+  // Should be Smi::kMaxValue but it must fit 32 bits; lower is safe anyway.
+  const int32_t MAX_LIMIT = 1 << 30;
+
+  int32_t result = MAX_LIMIT;
+
+  if (limit() != NULL &&
+      limit()->IsInteger32Constant()) {
+    int32_t limit_value = limit()->GetInteger32Constant();
+    if (!limit_included()) {
+      limit_value--;
+    }
+    if (limit_value < result) result = limit_value;
+  }
+
+  if (additional_upper_limit() != NULL &&
+      additional_upper_limit()->IsInteger32Constant()) {
+    int32_t limit_value = additional_upper_limit()->GetInteger32Constant();
+    if (!additional_upper_limit_is_included()) {
+      limit_value--;
+    }
+    if (limit_value < result) result = limit_value;
+  }
+
+  if (and_mask > 0 && and_mask < MAX_LIMIT) {
+    if (and_mask < result) result = and_mask;
+    return result;
+  }
+
+  // Add the effect of the or_mask.
+  result |= or_mask;
+
+  return result >= MAX_LIMIT ? kNoLimit : result;
+}
+
+
+HValue* InductionVariableData::IgnoreOsrValue(HValue* v) {
+  if (!v->IsPhi()) return v;
+  HPhi* phi = HPhi::cast(v);
+  if (phi->OperandCount() != 2) return v;
+  if (phi->OperandAt(0)->block()->is_osr_entry()) {
+    return phi->OperandAt(1);
+  } else if (phi->OperandAt(1)->block()->is_osr_entry()) {
+    return phi->OperandAt(0);
+  } else {
+    return v;
+  }
+}
+
+
+InductionVariableData* InductionVariableData::GetInductionVariableData(
+    HValue* v) {
+  v = IgnoreOsrValue(v);
+  if (v->IsPhi()) {
+    return HPhi::cast(v)->induction_variable_data();
+  }
+  return NULL;
+}
+
+
+/*
+ * Check if a conditional branch to "current_branch" with token "token" is
+ * the branch that keeps the induction loop running (and, conversely, will
+ * terminate it if the "other_branch" is taken).
+ *
+ * Three conditions must be met:
+ * - "current_branch" must be in the induction loop.
+ * - "other_branch" must be out of the induction loop.
+ * - "token" and the induction increment must be "compatible": the token should
+ *   be a condition that keeps the execution inside the loop until the limit is
+ *   reached.
+ */
+bool InductionVariableData::CheckIfBranchIsLoopGuard(
+    Token::Value token,
+    HBasicBlock* current_branch,
+    HBasicBlock* other_branch) {
+  if (!phi()->block()->current_loop()->IsNestedInThisLoop(
+      current_branch->current_loop())) {
+    return false;
+  }
+
+  if (phi()->block()->current_loop()->IsNestedInThisLoop(
+      other_branch->current_loop())) {
+    return false;
+  }
+
+  if (increment() > 0 && (token == Token::LT || token == Token::LTE)) {
+    return true;
+  }
+  if (increment() < 0 && (token == Token::GT || token == Token::GTE)) {
+    return true;
+  }
+  if (Token::IsInequalityOp(token) && (increment() == 1 || increment() == -1)) {
+    return true;
+  }
+
+  return false;
+}
+
+
+void InductionVariableData::ComputeLimitFromPredecessorBlock(
+    HBasicBlock* block,
+    LimitFromPredecessorBlock* result) {
+  if (block->predecessors()->length() != 1) return;
+  HBasicBlock* predecessor = block->predecessors()->at(0);
+  HInstruction* end = predecessor->last();
+
+  if (!end->IsCompareNumericAndBranch()) return;
+  HCompareNumericAndBranch* branch = HCompareNumericAndBranch::cast(end);
+
+  Token::Value token = branch->token();
+  if (!Token::IsArithmeticCompareOp(token)) return;
+
+  HBasicBlock* other_target;
+  if (block == branch->SuccessorAt(0)) {
+    other_target = branch->SuccessorAt(1);
+  } else {
+    other_target = branch->SuccessorAt(0);
+    token = Token::NegateCompareOp(token);
+    DCHECK(block == branch->SuccessorAt(1));
+  }
+
+  InductionVariableData* data;
+
+  data = GetInductionVariableData(branch->left());
+  HValue* limit = branch->right();
+  if (data == NULL) {
+    data = GetInductionVariableData(branch->right());
+    token = Token::ReverseCompareOp(token);
+    limit = branch->left();
+  }
+
+  if (data != NULL) {
+    result->variable = data;
+    result->token = token;
+    result->limit = limit;
+    result->other_target = other_target;
+  }
+}
+
+
+/*
+ * Compute the limit that is imposed on an induction variable when entering
+ * "block" (if any).
+ * If the limit is the "proper" induction limit (the one that makes the loop
+ * terminate when the induction variable reaches it) it is stored directly in
+ * the induction variable data.
+ * Otherwise the limit is written in "additional_limit" and the method
+ * returns true.
+ */
+bool InductionVariableData::ComputeInductionVariableLimit(
+    HBasicBlock* block,
+    InductionVariableLimitUpdate* additional_limit) {
+  LimitFromPredecessorBlock limit;
+  ComputeLimitFromPredecessorBlock(block, &limit);
+  if (!limit.LimitIsValid()) return false;
+
+  if (limit.variable->CheckIfBranchIsLoopGuard(limit.token,
+                                               block,
+                                               limit.other_target)) {
+    limit.variable->limit_ = limit.limit;
+    limit.variable->limit_included_ = limit.LimitIsIncluded();
+    limit.variable->limit_validity_ = block;
+    limit.variable->induction_exit_block_ = block->predecessors()->at(0);
+    limit.variable->induction_exit_target_ = limit.other_target;
+    return false;
+  } else {
+    additional_limit->updated_variable = limit.variable;
+    additional_limit->limit = limit.limit;
+    additional_limit->limit_is_upper = limit.LimitIsUpper();
+    additional_limit->limit_is_included = limit.LimitIsIncluded();
+    return true;
+  }
+}
+
+
+Range* HMathMinMax::InferRange(Zone* zone) {
+  if (representation().IsSmiOrInteger32()) {
+    Range* a = left()->range();
+    Range* b = right()->range();
+    Range* res = a->Copy(zone);
+    if (operation_ == kMathMax) {
+      res->CombinedMax(b);
+    } else {
+      DCHECK(operation_ == kMathMin);
+      res->CombinedMin(b);
+    }
+    return res;
+  } else {
+    return HValue::InferRange(zone);
+  }
+}
+
+
+void HPushArguments::AddInput(HValue* value) {
+  inputs_.Add(NULL, value->block()->zone());
+  SetOperandAt(OperandCount() - 1, value);
+}
+
+
+std::ostream& HPhi::PrintTo(std::ostream& os) const {  // NOLINT
+  os << "[";
+  for (int i = 0; i < OperandCount(); ++i) {
+    os << " " << NameOf(OperandAt(i)) << " ";
+  }
+  return os << " uses" << UseCount()
+            << representation_from_indirect_uses().Mnemonic() << " "
+            << TypeOf(this) << "]";
+}
+
+
+void HPhi::AddInput(HValue* value) {
+  inputs_.Add(NULL, value->block()->zone());
+  SetOperandAt(OperandCount() - 1, value);
+  // Mark phis that may have 'arguments' directly or indirectly as an operand.
+  if (!CheckFlag(kIsArguments) && value->CheckFlag(kIsArguments)) {
+    SetFlag(kIsArguments);
+  }
+}
+
+
+bool HPhi::HasRealUses() {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    if (!it.value()->IsPhi()) return true;
+  }
+  return false;
+}
+
+
+HValue* HPhi::GetRedundantReplacement() {
+  HValue* candidate = NULL;
+  int count = OperandCount();
+  int position = 0;
+  while (position < count && candidate == NULL) {
+    HValue* current = OperandAt(position++);
+    if (current != this) candidate = current;
+  }
+  while (position < count) {
+    HValue* current = OperandAt(position++);
+    if (current != this && current != candidate) return NULL;
+  }
+  DCHECK(candidate != this);
+  return candidate;
+}
+
+
+void HPhi::DeleteFromGraph() {
+  DCHECK(block() != NULL);
+  block()->RemovePhi(this);
+  DCHECK(block() == NULL);
+}
+
+
+void HPhi::InitRealUses(int phi_id) {
+  // Initialize real uses.
+  phi_id_ = phi_id;
+  // Compute a conservative approximation of truncating uses before inferring
+  // representations. The proper, exact computation will be done later, when
+  // inserting representation changes.
+  SetFlag(kTruncatingToSmi);
+  SetFlag(kTruncatingToInt32);
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* value = it.value();
+    if (!value->IsPhi()) {
+      Representation rep = value->observed_input_representation(it.index());
+      representation_from_non_phi_uses_ =
+          representation_from_non_phi_uses().generalize(rep);
+      if (rep.IsSmi() || rep.IsInteger32() || rep.IsDouble()) {
+        has_type_feedback_from_uses_ = true;
+      }
+
+      if (FLAG_trace_representation) {
+        PrintF("#%d Phi is used by real #%d %s as %s\n",
+               id(), value->id(), value->Mnemonic(), rep.Mnemonic());
+      }
+      if (!value->IsSimulate()) {
+        if (!value->CheckFlag(kTruncatingToSmi)) {
+          ClearFlag(kTruncatingToSmi);
+        }
+        if (!value->CheckFlag(kTruncatingToInt32)) {
+          ClearFlag(kTruncatingToInt32);
+        }
+      }
+    }
+  }
+}
+
+
+void HPhi::AddNonPhiUsesFrom(HPhi* other) {
+  if (FLAG_trace_representation) {
+    PrintF(
+        "generalizing use representation '%s' of #%d Phi "
+        "with uses of #%d Phi '%s'\n",
+        representation_from_indirect_uses().Mnemonic(), id(), other->id(),
+        other->representation_from_non_phi_uses().Mnemonic());
+  }
+
+  representation_from_indirect_uses_ =
+      representation_from_indirect_uses().generalize(
+          other->representation_from_non_phi_uses());
+}
+
+
+void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
+  while (!list->is_empty()) {
+    HSimulate* from = list->RemoveLast();
+    ZoneList<HValue*>* from_values = &from->values_;
+    for (int i = 0; i < from_values->length(); ++i) {
+      if (from->HasAssignedIndexAt(i)) {
+        int index = from->GetAssignedIndexAt(i);
+        if (HasValueForIndex(index)) continue;
+        AddAssignedValue(index, from_values->at(i));
+      } else {
+        if (pop_count_ > 0) {
+          pop_count_--;
+        } else {
+          AddPushedValue(from_values->at(i));
+        }
+      }
+    }
+    pop_count_ += from->pop_count_;
+    from->DeleteAndReplaceWith(NULL);
+  }
+}
+
+
+std::ostream& HSimulate::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << "id=" << ast_id().ToInt();
+  if (pop_count_ > 0) os << " pop " << pop_count_;
+  if (values_.length() > 0) {
+    if (pop_count_ > 0) os << " /";
+    for (int i = values_.length() - 1; i >= 0; --i) {
+      if (HasAssignedIndexAt(i)) {
+        os << " var[" << GetAssignedIndexAt(i) << "] = ";
+      } else {
+        os << " push ";
+      }
+      os << NameOf(values_[i]);
+      if (i > 0) os << ",";
+    }
+  }
+  return os;
+}
+
+
+void HSimulate::ReplayEnvironment(HEnvironment* env) {
+  if (is_done_with_replay()) return;
+  DCHECK(env != NULL);
+  env->set_ast_id(ast_id());
+  env->Drop(pop_count());
+  for (int i = values()->length() - 1; i >= 0; --i) {
+    HValue* value = values()->at(i);
+    if (HasAssignedIndexAt(i)) {
+      env->Bind(GetAssignedIndexAt(i), value);
+    } else {
+      env->Push(value);
+    }
+  }
+  set_done_with_replay();
+}
+
+
+static void ReplayEnvironmentNested(const ZoneList<HValue*>* values,
+                                    HCapturedObject* other) {
+  for (int i = 0; i < values->length(); ++i) {
+    HValue* value = values->at(i);
+    if (value->IsCapturedObject()) {
+      if (HCapturedObject::cast(value)->capture_id() == other->capture_id()) {
+        values->at(i) = other;
+      } else {
+        ReplayEnvironmentNested(HCapturedObject::cast(value)->values(), other);
+      }
+    }
+  }
+}
+
+
+// Replay captured objects by replacing all captured objects with the
+// same capture id in the current and all outer environments.
+void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
+  DCHECK(env != NULL);
+  while (env != NULL) {
+    ReplayEnvironmentNested(env->values(), this);
+    env = env->outer();
+  }
+}
+
+
+std::ostream& HCapturedObject::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << "#" << capture_id() << " ";
+  return HDematerializedObject::PrintDataTo(os);
+}
+
+
+void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
+                                         Zone* zone) {
+  DCHECK(return_target->IsInlineReturnTarget());
+  return_targets_.Add(return_target, zone);
+}
+
+
+std::ostream& HEnterInlined::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << function()->debug_name()->ToCString().get();
+}
+
+
+static bool IsInteger32(double value) {
+  if (value >= std::numeric_limits<int32_t>::min() &&
+      value <= std::numeric_limits<int32_t>::max()) {
+    double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
+    return bit_cast<int64_t>(roundtrip_value) == bit_cast<int64_t>(value);
+  }
+  return false;
+}
+
+
+HConstant::HConstant(Special special)
+    : HTemplateInstruction<0>(HType::TaggedNumber()),
+      object_(Handle<Object>::null()),
+      object_map_(Handle<Map>::null()),
+      bit_field_(HasDoubleValueField::encode(true) |
+                 InstanceTypeField::encode(kUnknownInstanceType)),
+      int32_value_(0) {
+  DCHECK_EQ(kHoleNaN, special);
+  std::memcpy(&double_value_, &kHoleNanInt64, sizeof(double_value_));
+  Initialize(Representation::Double());
+}
+
+
+HConstant::HConstant(Handle<Object> object, Representation r)
+    : HTemplateInstruction<0>(HType::FromValue(object)),
+      object_(Unique<Object>::CreateUninitialized(object)),
+      object_map_(Handle<Map>::null()),
+      bit_field_(
+          HasStableMapValueField::encode(false) |
+          HasSmiValueField::encode(false) | HasInt32ValueField::encode(false) |
+          HasDoubleValueField::encode(false) |
+          HasExternalReferenceValueField::encode(false) |
+          IsNotInNewSpaceField::encode(true) |
+          BooleanValueField::encode(object->BooleanValue()) |
+          IsUndetectableField::encode(false) | IsCallableField::encode(false) |
+          InstanceTypeField::encode(kUnknownInstanceType)) {
+  if (object->IsHeapObject()) {
+    Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+    Isolate* isolate = heap_object->GetIsolate();
+    Handle<Map> map(heap_object->map(), isolate);
+    bit_field_ = IsNotInNewSpaceField::update(
+        bit_field_, !isolate->heap()->InNewSpace(*object));
+    bit_field_ = InstanceTypeField::update(bit_field_, map->instance_type());
+    bit_field_ =
+        IsUndetectableField::update(bit_field_, map->is_undetectable());
+    bit_field_ = IsCallableField::update(bit_field_, map->is_callable());
+    if (map->is_stable()) object_map_ = Unique<Map>::CreateImmovable(map);
+    bit_field_ = HasStableMapValueField::update(
+        bit_field_,
+        HasMapValue() && Handle<Map>::cast(heap_object)->is_stable());
+  }
+  if (object->IsNumber()) {
+    double n = object->Number();
+    bool has_int32_value = IsInteger32(n);
+    bit_field_ = HasInt32ValueField::update(bit_field_, has_int32_value);
+    int32_value_ = DoubleToInt32(n);
+    bit_field_ = HasSmiValueField::update(
+        bit_field_, has_int32_value && Smi::IsValid(int32_value_));
+    double_value_ = n;
+    bit_field_ = HasDoubleValueField::update(bit_field_, true);
+    // TODO(titzer): if this heap number is new space, tenure a new one.
+  }
+
+  Initialize(r);
+}
+
+
+HConstant::HConstant(Unique<Object> object, Unique<Map> object_map,
+                     bool has_stable_map_value, Representation r, HType type,
+                     bool is_not_in_new_space, bool boolean_value,
+                     bool is_undetectable, InstanceType instance_type)
+    : HTemplateInstruction<0>(type),
+      object_(object),
+      object_map_(object_map),
+      bit_field_(HasStableMapValueField::encode(has_stable_map_value) |
+                 HasSmiValueField::encode(false) |
+                 HasInt32ValueField::encode(false) |
+                 HasDoubleValueField::encode(false) |
+                 HasExternalReferenceValueField::encode(false) |
+                 IsNotInNewSpaceField::encode(is_not_in_new_space) |
+                 BooleanValueField::encode(boolean_value) |
+                 IsUndetectableField::encode(is_undetectable) |
+                 InstanceTypeField::encode(instance_type)) {
+  DCHECK(!object.handle().is_null());
+  DCHECK(!type.IsTaggedNumber() || type.IsNone());
+  Initialize(r);
+}
+
+
+HConstant::HConstant(int32_t integer_value, Representation r,
+                     bool is_not_in_new_space, Unique<Object> object)
+    : object_(object),
+      object_map_(Handle<Map>::null()),
+      bit_field_(HasStableMapValueField::encode(false) |
+                 HasSmiValueField::encode(Smi::IsValid(integer_value)) |
+                 HasInt32ValueField::encode(true) |
+                 HasDoubleValueField::encode(true) |
+                 HasExternalReferenceValueField::encode(false) |
+                 IsNotInNewSpaceField::encode(is_not_in_new_space) |
+                 BooleanValueField::encode(integer_value != 0) |
+                 IsUndetectableField::encode(false) |
+                 InstanceTypeField::encode(kUnknownInstanceType)),
+      int32_value_(integer_value),
+      double_value_(FastI2D(integer_value)) {
+  // It's possible to create a constant with a value in Smi-range but stored
+  // in a (pre-existing) HeapNumber. See crbug.com/349878.
+  bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
+  bool is_smi = HasSmiValue() && !could_be_heapobject;
+  set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
+  Initialize(r);
+}
+
+
+HConstant::HConstant(double double_value, Representation r,
+                     bool is_not_in_new_space, Unique<Object> object)
+    : object_(object),
+      object_map_(Handle<Map>::null()),
+      bit_field_(HasStableMapValueField::encode(false) |
+                 HasInt32ValueField::encode(IsInteger32(double_value)) |
+                 HasDoubleValueField::encode(true) |
+                 HasExternalReferenceValueField::encode(false) |
+                 IsNotInNewSpaceField::encode(is_not_in_new_space) |
+                 BooleanValueField::encode(double_value != 0 &&
+                                           !std::isnan(double_value)) |
+                 IsUndetectableField::encode(false) |
+                 InstanceTypeField::encode(kUnknownInstanceType)),
+      int32_value_(DoubleToInt32(double_value)),
+      double_value_(double_value) {
+  bit_field_ = HasSmiValueField::update(
+      bit_field_, HasInteger32Value() && Smi::IsValid(int32_value_));
+  // It's possible to create a constant with a value in Smi-range but stored
+  // in a (pre-existing) HeapNumber. See crbug.com/349878.
+  bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
+  bool is_smi = HasSmiValue() && !could_be_heapobject;
+  set_type(is_smi ? HType::Smi() : HType::TaggedNumber());
+  Initialize(r);
+}
+
+
+HConstant::HConstant(ExternalReference reference)
+    : HTemplateInstruction<0>(HType::Any()),
+      object_(Unique<Object>(Handle<Object>::null())),
+      object_map_(Handle<Map>::null()),
+      bit_field_(
+          HasStableMapValueField::encode(false) |
+          HasSmiValueField::encode(false) | HasInt32ValueField::encode(false) |
+          HasDoubleValueField::encode(false) |
+          HasExternalReferenceValueField::encode(true) |
+          IsNotInNewSpaceField::encode(true) | BooleanValueField::encode(true) |
+          IsUndetectableField::encode(false) |
+          InstanceTypeField::encode(kUnknownInstanceType)),
+      external_reference_value_(reference) {
+  Initialize(Representation::External());
+}
+
+
+void HConstant::Initialize(Representation r) {
+  if (r.IsNone()) {
+    if (HasSmiValue() && SmiValuesAre31Bits()) {
+      r = Representation::Smi();
+    } else if (HasInteger32Value()) {
+      r = Representation::Integer32();
+    } else if (HasDoubleValue()) {
+      r = Representation::Double();
+    } else if (HasExternalReferenceValue()) {
+      r = Representation::External();
+    } else {
+      Handle<Object> object = object_.handle();
+      if (object->IsJSObject()) {
+        // Try to eagerly migrate JSObjects that have deprecated maps.
+        Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+        if (js_object->map()->is_deprecated()) {
+          JSObject::TryMigrateInstance(js_object);
+        }
+      }
+      r = Representation::Tagged();
+    }
+  }
+  if (r.IsSmi()) {
+    // If we have an existing handle, zap it, because it might be a heap
+    // number which we must not re-use when copying this HConstant to
+    // Tagged representation later, because having Smi representation now
+    // could cause heap object checks not to get emitted.
+    object_ = Unique<Object>(Handle<Object>::null());
+  }
+  if (r.IsSmiOrInteger32() && object_.handle().is_null()) {
+    // If it's not a heap object, it can't be in new space.
+    bit_field_ = IsNotInNewSpaceField::update(bit_field_, true);
+  }
+  set_representation(r);
+  SetFlag(kUseGVN);
+}
+
+
+bool HConstant::ImmortalImmovable() const {
+  if (HasInteger32Value()) {
+    return false;
+  }
+  if (HasDoubleValue()) {
+    if (IsSpecialDouble()) {
+      return true;
+    }
+    return false;
+  }
+  if (HasExternalReferenceValue()) {
+    return false;
+  }
+
+  DCHECK(!object_.handle().is_null());
+  Heap* heap = isolate()->heap();
+  DCHECK(!object_.IsKnownGlobal(heap->minus_zero_value()));
+  DCHECK(!object_.IsKnownGlobal(heap->nan_value()));
+  return
+#define IMMORTAL_IMMOVABLE_ROOT(name) \
+  object_.IsKnownGlobal(heap->root(Heap::k##name##RootIndex)) ||
+      IMMORTAL_IMMOVABLE_ROOT_LIST(IMMORTAL_IMMOVABLE_ROOT)
+#undef IMMORTAL_IMMOVABLE_ROOT
+#define INTERNALIZED_STRING(name, value) \
+      object_.IsKnownGlobal(heap->name()) ||
+      INTERNALIZED_STRING_LIST(INTERNALIZED_STRING)
+#undef INTERNALIZED_STRING
+#define STRING_TYPE(NAME, size, name, Name) \
+      object_.IsKnownGlobal(heap->name##_map()) ||
+      STRING_TYPE_LIST(STRING_TYPE)
+#undef STRING_TYPE
+      false;
+}
+
+
+bool HConstant::EmitAtUses() {
+  DCHECK(IsLinked());
+  if (block()->graph()->has_osr() &&
+      block()->graph()->IsStandardConstant(this)) {
+    // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
+    return true;
+  }
+  if (HasNoUses()) return true;
+  if (IsCell()) return false;
+  if (representation().IsDouble()) return false;
+  if (representation().IsExternal()) return false;
+  return true;
+}
+
+
+HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
+  if (r.IsSmi() && !HasSmiValue()) return NULL;
+  if (r.IsInteger32() && !HasInteger32Value()) return NULL;
+  if (r.IsDouble() && !HasDoubleValue()) return NULL;
+  if (r.IsExternal() && !HasExternalReferenceValue()) return NULL;
+  if (HasInteger32Value()) {
+    return new (zone) HConstant(int32_value_, r, NotInNewSpace(), object_);
+  }
+  if (HasDoubleValue()) {
+    return new (zone) HConstant(double_value_, r, NotInNewSpace(), object_);
+  }
+  if (HasExternalReferenceValue()) {
+    return new(zone) HConstant(external_reference_value_);
+  }
+  DCHECK(!object_.handle().is_null());
+  return new (zone) HConstant(object_, object_map_, HasStableMapValue(), r,
+                              type_, NotInNewSpace(), BooleanValue(),
+                              IsUndetectable(), GetInstanceType());
+}
+
+
+Maybe<HConstant*> HConstant::CopyToTruncatedInt32(Zone* zone) {
+  HConstant* res = NULL;
+  if (HasInteger32Value()) {
+    res = new (zone) HConstant(int32_value_, Representation::Integer32(),
+                               NotInNewSpace(), object_);
+  } else if (HasDoubleValue()) {
+    res = new (zone)
+        HConstant(DoubleToInt32(double_value_), Representation::Integer32(),
+                  NotInNewSpace(), object_);
+  }
+  return res != NULL ? Just(res) : Nothing<HConstant*>();
+}
+
+
+Maybe<HConstant*> HConstant::CopyToTruncatedNumber(Isolate* isolate,
+                                                   Zone* zone) {
+  HConstant* res = NULL;
+  Handle<Object> handle = this->handle(isolate);
+  if (handle->IsBoolean()) {
+    res = handle->BooleanValue() ?
+      new(zone) HConstant(1) : new(zone) HConstant(0);
+  } else if (handle->IsUndefined()) {
+    res = new (zone) HConstant(std::numeric_limits<double>::quiet_NaN());
+  } else if (handle->IsNull()) {
+    res = new(zone) HConstant(0);
+  }
+  return res != NULL ? Just(res) : Nothing<HConstant*>();
+}
+
+
+std::ostream& HConstant::PrintDataTo(std::ostream& os) const {  // NOLINT
+  if (HasInteger32Value()) {
+    os << int32_value_ << " ";
+  } else if (HasDoubleValue()) {
+    os << double_value_ << " ";
+  } else if (HasExternalReferenceValue()) {
+    os << reinterpret_cast<void*>(external_reference_value_.address()) << " ";
+  } else {
+    // The handle() method is silently and lazily mutating the object.
+    Handle<Object> h = const_cast<HConstant*>(this)->handle(isolate());
+    os << Brief(*h) << " ";
+    if (HasStableMapValue()) os << "[stable-map] ";
+    if (HasObjectMap()) os << "[map " << *ObjectMap().handle() << "] ";
+  }
+  if (!NotInNewSpace()) os << "[new space] ";
+  return os;
+}
+
+
+std::ostream& HBinaryOperation::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(left()) << " " << NameOf(right());
+  if (CheckFlag(kCanOverflow)) os << " !";
+  if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
+  return os;
+}
+
+
+void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
+  DCHECK(CheckFlag(kFlexibleRepresentation));
+  Representation new_rep = RepresentationFromInputs();
+  UpdateRepresentation(new_rep, h_infer, "inputs");
+
+  if (representation().IsSmi() && HasNonSmiUse()) {
+    UpdateRepresentation(
+        Representation::Integer32(), h_infer, "use requirements");
+  }
+
+  if (observed_output_representation_.IsNone()) {
+    new_rep = RepresentationFromUses();
+    UpdateRepresentation(new_rep, h_infer, "uses");
+  } else {
+    new_rep = RepresentationFromOutput();
+    UpdateRepresentation(new_rep, h_infer, "output");
+  }
+}
+
+
+Representation HBinaryOperation::RepresentationFromInputs() {
+  // Determine the worst case of observed input representations and
+  // the currently assumed output representation.
+  Representation rep = representation();
+  for (int i = 1; i <= 2; ++i) {
+    rep = rep.generalize(observed_input_representation(i));
+  }
+  // If any of the actual input representation is more general than what we
+  // have so far but not Tagged, use that representation instead.
+  Representation left_rep = left()->representation();
+  Representation right_rep = right()->representation();
+  if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
+  if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
+
+  return rep;
+}
+
+
+bool HBinaryOperation::IgnoreObservedOutputRepresentation(
+    Representation current_rep) {
+  return ((current_rep.IsInteger32() && CheckUsesForFlag(kTruncatingToInt32)) ||
+          (current_rep.IsSmi() && CheckUsesForFlag(kTruncatingToSmi))) &&
+         // Mul in Integer32 mode would be too precise.
+         (!this->IsMul() || HMul::cast(this)->MulMinusOne());
+}
+
+
+Representation HBinaryOperation::RepresentationFromOutput() {
+  Representation rep = representation();
+  // Consider observed output representation, but ignore it if it's Double,
+  // this instruction is not a division, and all its uses are truncating
+  // to Integer32.
+  if (observed_output_representation_.is_more_general_than(rep) &&
+      !IgnoreObservedOutputRepresentation(rep)) {
+    return observed_output_representation_;
+  }
+  return Representation::None();
+}
+
+
+void HBinaryOperation::AssumeRepresentation(Representation r) {
+  set_observed_input_representation(1, r);
+  set_observed_input_representation(2, r);
+  HValue::AssumeRepresentation(r);
+}
+
+
+void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) {
+  DCHECK(CheckFlag(kFlexibleRepresentation));
+  Representation new_rep = RepresentationFromInputs();
+  UpdateRepresentation(new_rep, h_infer, "inputs");
+  // Do not care about uses.
+}
+
+
+Range* HBitwise::InferRange(Zone* zone) {
+  if (op() == Token::BIT_XOR) {
+    if (left()->HasRange() && right()->HasRange()) {
+      // The maximum value has the high bit, and all bits below, set:
+      // (1 << high) - 1.
+      // If the range can be negative, the minimum int is a negative number with
+      // the high bit, and all bits below, unset:
+      // -(1 << high).
+      // If it cannot be negative, conservatively choose 0 as minimum int.
+      int64_t left_upper = left()->range()->upper();
+      int64_t left_lower = left()->range()->lower();
+      int64_t right_upper = right()->range()->upper();
+      int64_t right_lower = right()->range()->lower();
+
+      if (left_upper < 0) left_upper = ~left_upper;
+      if (left_lower < 0) left_lower = ~left_lower;
+      if (right_upper < 0) right_upper = ~right_upper;
+      if (right_lower < 0) right_lower = ~right_lower;
+
+      int high = MostSignificantBit(
+          static_cast<uint32_t>(
+              left_upper | left_lower | right_upper | right_lower));
+
+      int64_t limit = 1;
+      limit <<= high;
+      int32_t min = (left()->range()->CanBeNegative() ||
+                     right()->range()->CanBeNegative())
+                    ? static_cast<int32_t>(-limit) : 0;
+      return new(zone) Range(min, static_cast<int32_t>(limit - 1));
+    }
+    Range* result = HValue::InferRange(zone);
+    result->set_can_be_minus_zero(false);
+    return result;
+  }
+  const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
+  int32_t left_mask = (left()->range() != NULL)
+      ? left()->range()->Mask()
+      : kDefaultMask;
+  int32_t right_mask = (right()->range() != NULL)
+      ? right()->range()->Mask()
+      : kDefaultMask;
+  int32_t result_mask = (op() == Token::BIT_AND)
+      ? left_mask & right_mask
+      : left_mask | right_mask;
+  if (result_mask >= 0) return new(zone) Range(0, result_mask);
+
+  Range* result = HValue::InferRange(zone);
+  result->set_can_be_minus_zero(false);
+  return result;
+}
+
+
+Range* HSar::InferRange(Zone* zone) {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      Range* result = (left()->range() != NULL)
+          ? left()->range()->Copy(zone)
+          : new(zone) Range();
+      result->Sar(c->Integer32Value());
+      return result;
+    }
+  }
+  return HValue::InferRange(zone);
+}
+
+
+Range* HShr::InferRange(Zone* zone) {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      int shift_count = c->Integer32Value() & 0x1f;
+      if (left()->range()->CanBeNegative()) {
+        // Only compute bounds if the result always fits into an int32.
+        return (shift_count >= 1)
+            ? new(zone) Range(0,
+                              static_cast<uint32_t>(0xffffffff) >> shift_count)
+            : new(zone) Range();
+      } else {
+        // For positive inputs we can use the >> operator.
+        Range* result = (left()->range() != NULL)
+            ? left()->range()->Copy(zone)
+            : new(zone) Range();
+        result->Sar(c->Integer32Value());
+        return result;
+      }
+    }
+  }
+  return HValue::InferRange(zone);
+}
+
+
+Range* HShl::InferRange(Zone* zone) {
+  if (right()->IsConstant()) {
+    HConstant* c = HConstant::cast(right());
+    if (c->HasInteger32Value()) {
+      Range* result = (left()->range() != NULL)
+          ? left()->range()->Copy(zone)
+          : new(zone) Range();
+      result->Shl(c->Integer32Value());
+      return result;
+    }
+  }
+  return HValue::InferRange(zone);
+}
+
+
+Range* HLoadNamedField::InferRange(Zone* zone) {
+  if (access().representation().IsInteger8()) {
+    return new(zone) Range(kMinInt8, kMaxInt8);
+  }
+  if (access().representation().IsUInteger8()) {
+    return new(zone) Range(kMinUInt8, kMaxUInt8);
+  }
+  if (access().representation().IsInteger16()) {
+    return new(zone) Range(kMinInt16, kMaxInt16);
+  }
+  if (access().representation().IsUInteger16()) {
+    return new(zone) Range(kMinUInt16, kMaxUInt16);
+  }
+  if (access().IsStringLength()) {
+    return new(zone) Range(0, String::kMaxLength);
+  }
+  return HValue::InferRange(zone);
+}
+
+
+Range* HLoadKeyed::InferRange(Zone* zone) {
+  switch (elements_kind()) {
+    case INT8_ELEMENTS:
+      return new(zone) Range(kMinInt8, kMaxInt8);
+    case UINT8_ELEMENTS:
+    case UINT8_CLAMPED_ELEMENTS:
+      return new(zone) Range(kMinUInt8, kMaxUInt8);
+    case INT16_ELEMENTS:
+      return new(zone) Range(kMinInt16, kMaxInt16);
+    case UINT16_ELEMENTS:
+      return new(zone) Range(kMinUInt16, kMaxUInt16);
+    default:
+      return HValue::InferRange(zone);
+  }
+}
+
+
+std::ostream& HCompareGeneric::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << Token::Name(token()) << " ";
+  return HBinaryOperation::PrintDataTo(os);
+}
+
+
+std::ostream& HStringCompareAndBranch::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << Token::Name(token()) << " ";
+  return HControlInstruction::PrintDataTo(os);
+}
+
+
+std::ostream& HCompareNumericAndBranch::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << Token::Name(token()) << " " << NameOf(left()) << " " << NameOf(right());
+  return HControlInstruction::PrintDataTo(os);
+}
+
+
+std::ostream& HCompareObjectEqAndBranch::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << NameOf(left()) << " " << NameOf(right());
+  return HControlInstruction::PrintDataTo(os);
+}
+
+
+bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (known_successor_index() != kNoKnownSuccessorIndex) {
+    *block = SuccessorAt(known_successor_index());
+    return true;
+  }
+  if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) {
+    *block = HConstant::cast(left())->DataEquals(HConstant::cast(right()))
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (known_successor_index() != kNoKnownSuccessorIndex) {
+    *block = SuccessorAt(known_successor_index());
+    return true;
+  }
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    *block = HConstant::cast(value())->HasStringValue()
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  if (value()->type().IsString()) {
+    *block = FirstSuccessor();
+    return true;
+  }
+  if (value()->type().IsSmi() ||
+      value()->type().IsNull() ||
+      value()->type().IsBoolean() ||
+      value()->type().IsUndefined() ||
+      value()->type().IsJSReceiver()) {
+    *block = SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    *block = HConstant::cast(value())->IsUndetectable()
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    InstanceType type = HConstant::cast(value())->GetInstanceType();
+    *block = (from_ <= type) && (type <= to_)
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+void HCompareHoleAndBranch::InferRepresentation(
+    HInferRepresentationPhase* h_infer) {
+  ChangeRepresentation(value()->representation());
+}
+
+
+bool HCompareNumericAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (left() == right() &&
+      left()->representation().IsSmiOrInteger32()) {
+    *block = (token() == Token::EQ ||
+              token() == Token::EQ_STRICT ||
+              token() == Token::LTE ||
+              token() == Token::GTE)
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    HConstant* constant = HConstant::cast(value());
+    if (constant->HasDoubleValue()) {
+      *block = IsMinusZero(constant->DoubleValue())
+          ? FirstSuccessor() : SecondSuccessor();
+      return true;
+    }
+  }
+  if (value()->representation().IsSmiOrInteger32()) {
+    // A Smi or Integer32 cannot contain minus zero.
+    *block = SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+void HCompareMinusZeroAndBranch::InferRepresentation(
+    HInferRepresentationPhase* h_infer) {
+  ChangeRepresentation(value()->representation());
+}
+
+
+std::ostream& HGoto::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << *SuccessorAt(0);
+}
+
+
+void HCompareNumericAndBranch::InferRepresentation(
+    HInferRepresentationPhase* h_infer) {
+  Representation left_rep = left()->representation();
+  Representation right_rep = right()->representation();
+  Representation observed_left = observed_input_representation(0);
+  Representation observed_right = observed_input_representation(1);
+
+  Representation rep = Representation::None();
+  rep = rep.generalize(observed_left);
+  rep = rep.generalize(observed_right);
+  if (rep.IsNone() || rep.IsSmiOrInteger32()) {
+    if (!left_rep.IsTagged()) rep = rep.generalize(left_rep);
+    if (!right_rep.IsTagged()) rep = rep.generalize(right_rep);
+  } else {
+    rep = Representation::Double();
+  }
+
+  if (rep.IsDouble()) {
+    // According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
+    // and !=) have special handling of undefined, e.g. undefined == undefined
+    // is 'true'. Relational comparisons have a different semantic, first
+    // calling ToPrimitive() on their arguments.  The standard Crankshaft
+    // tagged-to-double conversion to ensure the HCompareNumericAndBranch's
+    // inputs are doubles caused 'undefined' to be converted to NaN. That's
+    // compatible out-of-the box with ordered relational comparisons (<, >, <=,
+    // >=). However, for equality comparisons (and for 'in' and 'instanceof'),
+    // it is not consistent with the spec. For example, it would cause undefined
+    // == undefined (should be true) to be evaluated as NaN == NaN
+    // (false). Therefore, any comparisons other than ordered relational
+    // comparisons must cause a deopt when one of their arguments is undefined.
+    // See also v8:1434
+    if (Token::IsOrderedRelationalCompareOp(token_) && !is_strong(strength())) {
+      SetFlag(kAllowUndefinedAsNaN);
+    }
+  }
+  ChangeRepresentation(rep);
+}
+
+
+std::ostream& HParameter::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << index();
+}
+
+
+std::ostream& HLoadNamedField::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(object()) << access_;
+
+  if (maps() != NULL) {
+    os << " [" << *maps()->at(0).handle();
+    for (int i = 1; i < maps()->size(); ++i) {
+      os << "," << *maps()->at(i).handle();
+    }
+    os << "]";
+  }
+
+  if (HasDependency()) os << " " << NameOf(dependency());
+  return os;
+}
+
+
+std::ostream& HLoadNamedGeneric::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  Handle<String> n = Handle<String>::cast(name());
+  return os << NameOf(object()) << "." << n->ToCString().get();
+}
+
+
+std::ostream& HLoadKeyed::PrintDataTo(std::ostream& os) const {  // NOLINT
+  if (!is_fixed_typed_array()) {
+    os << NameOf(elements());
+  } else {
+    DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
+           elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+    os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
+  }
+
+  os << "[" << NameOf(key());
+  if (IsDehoisted()) os << " + " << base_offset();
+  os << "]";
+
+  if (HasDependency()) os << " " << NameOf(dependency());
+  if (RequiresHoleCheck()) os << " check_hole";
+  return os;
+}
+
+
+bool HLoadKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) {
+  // The base offset is usually simply the size of the array header, except
+  // with dehoisting adds an addition offset due to a array index key
+  // manipulation, in which case it becomes (array header size +
+  // constant-offset-from-key * kPointerSize)
+  uint32_t base_offset = BaseOffsetField::decode(bit_field_);
+  v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset;
+  addition_result += increase_by_value;
+  if (!addition_result.IsValid()) return false;
+  base_offset = addition_result.ValueOrDie();
+  if (!BaseOffsetField::is_valid(base_offset)) return false;
+  bit_field_ = BaseOffsetField::update(bit_field_, base_offset);
+  return true;
+}
+
+
+bool HLoadKeyed::UsesMustHandleHole() const {
+  if (IsFastPackedElementsKind(elements_kind())) {
+    return false;
+  }
+
+  if (IsFixedTypedArrayElementsKind(elements_kind())) {
+    return false;
+  }
+
+  if (hole_mode() == ALLOW_RETURN_HOLE) {
+    if (IsFastDoubleElementsKind(elements_kind())) {
+      return AllUsesCanTreatHoleAsNaN();
+    }
+    return true;
+  }
+
+  if (IsFastDoubleElementsKind(elements_kind())) {
+    return false;
+  }
+
+  // Holes are only returned as tagged values.
+  if (!representation().IsTagged()) {
+    return false;
+  }
+
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (!use->IsChange()) return false;
+  }
+
+  return true;
+}
+
+
+bool HLoadKeyed::AllUsesCanTreatHoleAsNaN() const {
+  return IsFastDoubleElementsKind(elements_kind()) &&
+      CheckUsesForFlag(HValue::kAllowUndefinedAsNaN);
+}
+
+
+bool HLoadKeyed::RequiresHoleCheck() const {
+  if (IsFastPackedElementsKind(elements_kind())) {
+    return false;
+  }
+
+  if (IsFixedTypedArrayElementsKind(elements_kind())) {
+    return false;
+  }
+
+  if (hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    return false;
+  }
+
+  return !UsesMustHandleHole();
+}
+
+
+std::ostream& HLoadKeyedGeneric::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << NameOf(object()) << "[" << NameOf(key()) << "]";
+}
+
+
+HValue* HLoadKeyedGeneric::Canonicalize() {
+  // Recognize generic keyed loads that use property name generated
+  // by for-in statement as a key and rewrite them into fast property load
+  // by index.
+  if (key()->IsLoadKeyed()) {
+    HLoadKeyed* key_load = HLoadKeyed::cast(key());
+    if (key_load->elements()->IsForInCacheArray()) {
+      HForInCacheArray* names_cache =
+          HForInCacheArray::cast(key_load->elements());
+
+      if (names_cache->enumerable() == object()) {
+        HForInCacheArray* index_cache =
+            names_cache->index_cache();
+        HCheckMapValue* map_check = HCheckMapValue::New(
+            block()->graph()->isolate(), block()->graph()->zone(),
+            block()->graph()->GetInvalidContext(), object(),
+            names_cache->map());
+        HInstruction* index = HLoadKeyed::New(
+            block()->graph()->isolate(), block()->graph()->zone(),
+            block()->graph()->GetInvalidContext(), index_cache, key_load->key(),
+            key_load->key(), nullptr, key_load->elements_kind());
+        map_check->InsertBefore(this);
+        index->InsertBefore(this);
+        return Prepend(new(block()->zone()) HLoadFieldByIndex(
+            object(), index));
+      }
+    }
+  }
+
+  return this;
+}
+
+
+std::ostream& HStoreNamedGeneric::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  Handle<String> n = Handle<String>::cast(name());
+  return os << NameOf(object()) << "." << n->ToCString().get() << " = "
+            << NameOf(value());
+}
+
+
+std::ostream& HStoreNamedField::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(object()) << access_ << " = " << NameOf(value());
+  if (NeedsWriteBarrier()) os << " (write-barrier)";
+  if (has_transition()) os << " (transition map " << *transition_map() << ")";
+  return os;
+}
+
+
+std::ostream& HStoreKeyed::PrintDataTo(std::ostream& os) const {  // NOLINT
+  if (!is_fixed_typed_array()) {
+    os << NameOf(elements());
+  } else {
+    DCHECK(elements_kind() >= FIRST_FIXED_TYPED_ARRAY_ELEMENTS_KIND &&
+           elements_kind() <= LAST_FIXED_TYPED_ARRAY_ELEMENTS_KIND);
+    os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
+  }
+
+  os << "[" << NameOf(key());
+  if (IsDehoisted()) os << " + " << base_offset();
+  return os << "] = " << NameOf(value());
+}
+
+
+std::ostream& HStoreKeyedGeneric::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << NameOf(object()) << "[" << NameOf(key())
+            << "] = " << NameOf(value());
+}
+
+
+std::ostream& HTransitionElementsKind::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << NameOf(object());
+  ElementsKind from_kind = original_map().handle()->elements_kind();
+  ElementsKind to_kind = transitioned_map().handle()->elements_kind();
+  os << " " << *original_map().handle() << " ["
+     << ElementsAccessor::ForKind(from_kind)->name() << "] -> "
+     << *transitioned_map().handle() << " ["
+     << ElementsAccessor::ForKind(to_kind)->name() << "]";
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) os << " (simple)";
+  return os;
+}
+
+
+std::ostream& HLoadGlobalGeneric::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << name()->ToCString().get() << " ";
+}
+
+
+std::ostream& HInnerAllocatedObject::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  os << NameOf(base_object()) << " offset ";
+  return offset()->PrintTo(os);
+}
+
+
+std::ostream& HLoadContextSlot::PrintDataTo(std::ostream& os) const {  // NOLINT
+  return os << NameOf(value()) << "[" << slot_index() << "]";
+}
+
+
+std::ostream& HStoreContextSlot::PrintDataTo(
+    std::ostream& os) const {  // NOLINT
+  return os << NameOf(context()) << "[" << slot_index()
+            << "] = " << NameOf(value());
+}
+
+
+// Implementation of type inference and type conversions. Calculates
+// the inferred type of this instruction based on the input operands.
+
+HType HValue::CalculateInferredType() {
+  return type_;
+}
+
+
+HType HPhi::CalculateInferredType() {
+  if (OperandCount() == 0) return HType::Tagged();
+  HType result = OperandAt(0)->type();
+  for (int i = 1; i < OperandCount(); ++i) {
+    HType current = OperandAt(i)->type();
+    result = result.Combine(current);
+  }
+  return result;
+}
+
+
+HType HChange::CalculateInferredType() {
+  if (from().IsDouble() && to().IsTagged()) return HType::HeapNumber();
+  return type();
+}
+
+
+Representation HUnaryMathOperation::RepresentationFromInputs() {
+  if (SupportsFlexibleFloorAndRound() &&
+      (op_ == kMathFloor || op_ == kMathRound)) {
+    // Floor and Round always take a double input. The integral result can be
+    // used as an integer or a double. Infer the representation from the uses.
+    return Representation::None();
+  }
+  Representation rep = representation();
+  // If any of the actual input representation is more general than what we
+  // have so far but not Tagged, use that representation instead.
+  Representation input_rep = value()->representation();
+  if (!input_rep.IsTagged()) {
+    rep = rep.generalize(input_rep);
+  }
+  return rep;
+}
+
+
+bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
+                                          HValue* dominator) {
+  DCHECK(side_effect == kNewSpacePromotion);
+  Zone* zone = block()->zone();
+  Isolate* isolate = block()->isolate();
+  if (!FLAG_use_allocation_folding) return false;
+
+  // Try to fold allocations together with their dominating allocations.
+  if (!dominator->IsAllocate()) {
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) cannot fold into #%d (%s)\n",
+          id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+    }
+    return false;
+  }
+
+  // Check whether we are folding within the same block for local folding.
+  if (FLAG_use_local_allocation_folding && dominator->block() != block()) {
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) cannot fold into #%d (%s), crosses basic blocks\n",
+          id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+    }
+    return false;
+  }
+
+  HAllocate* dominator_allocate = HAllocate::cast(dominator);
+  HValue* dominator_size = dominator_allocate->size();
+  HValue* current_size = size();
+
+  // TODO(hpayer): Add support for non-constant allocation in dominator.
+  if (!dominator_size->IsInteger32Constant()) {
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) cannot fold into #%d (%s), "
+             "dynamic allocation size in dominator\n",
+          id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+    }
+    return false;
+  }
+
+
+  if (!IsFoldable(dominator_allocate)) {
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) cannot fold into #%d (%s), different spaces\n", id(),
+             Mnemonic(), dominator->id(), dominator->Mnemonic());
+    }
+    return false;
+  }
+
+  if (!has_size_upper_bound()) {
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) cannot fold into #%d (%s), "
+             "can't estimate total allocation size\n",
+          id(), Mnemonic(), dominator->id(), dominator->Mnemonic());
+    }
+    return false;
+  }
+
+  if (!current_size->IsInteger32Constant()) {
+    // If it's not constant then it is a size_in_bytes calculation graph
+    // like this: (const_header_size + const_element_size * size).
+    DCHECK(current_size->IsInstruction());
+
+    HInstruction* current_instr = HInstruction::cast(current_size);
+    if (!current_instr->Dominates(dominator_allocate)) {
+      if (FLAG_trace_allocation_folding) {
+        PrintF("#%d (%s) cannot fold into #%d (%s), dynamic size "
+               "value does not dominate target allocation\n",
+            id(), Mnemonic(), dominator_allocate->id(),
+            dominator_allocate->Mnemonic());
+      }
+      return false;
+    }
+  }
+
+  DCHECK(
+      (IsNewSpaceAllocation() && dominator_allocate->IsNewSpaceAllocation()) ||
+      (IsOldSpaceAllocation() && dominator_allocate->IsOldSpaceAllocation()));
+
+  // First update the size of the dominator allocate instruction.
+  dominator_size = dominator_allocate->size();
+  int32_t original_object_size =
+      HConstant::cast(dominator_size)->GetInteger32Constant();
+  int32_t dominator_size_constant = original_object_size;
+
+  if (MustAllocateDoubleAligned()) {
+    if ((dominator_size_constant & kDoubleAlignmentMask) != 0) {
+      dominator_size_constant += kDoubleSize / 2;
+    }
+  }
+
+  int32_t current_size_max_value = size_upper_bound()->GetInteger32Constant();
+  int32_t new_dominator_size = dominator_size_constant + current_size_max_value;
+
+  // Since we clear the first word after folded memory, we cannot use the
+  // whole Page::kMaxRegularHeapObjectSize memory.
+  if (new_dominator_size > Page::kMaxRegularHeapObjectSize - kPointerSize) {
+    if (FLAG_trace_allocation_folding) {
+      PrintF("#%d (%s) cannot fold into #%d (%s) due to size: %d\n",
+          id(), Mnemonic(), dominator_allocate->id(),
+          dominator_allocate->Mnemonic(), new_dominator_size);
+    }
+    return false;
+  }
+
+  HInstruction* new_dominator_size_value;
+
+  if (current_size->IsInteger32Constant()) {
+    new_dominator_size_value = HConstant::CreateAndInsertBefore(
+        isolate, zone, context(), new_dominator_size, Representation::None(),
+        dominator_allocate);
+  } else {
+    HValue* new_dominator_size_constant = HConstant::CreateAndInsertBefore(
+        isolate, zone, context(), dominator_size_constant,
+        Representation::Integer32(), dominator_allocate);
+
+    // Add old and new size together and insert.
+    current_size->ChangeRepresentation(Representation::Integer32());
+
+    new_dominator_size_value = HAdd::New(
+        isolate, zone, context(), new_dominator_size_constant, current_size);
+    new_dominator_size_value->ClearFlag(HValue::kCanOverflow);
+    new_dominator_size_value->ChangeRepresentation(Representation::Integer32());
+
+    new_dominator_size_value->InsertBefore(dominator_allocate);
+  }
+
+  dominator_allocate->UpdateSize(new_dominator_size_value);
+
+  if (MustAllocateDoubleAligned()) {
+    if (!dominator_allocate->MustAllocateDoubleAligned()) {
+      dominator_allocate->MakeDoubleAligned();
+    }
+  }
+
+  bool keep_new_space_iterable = FLAG_log_gc || FLAG_heap_stats;
+#ifdef VERIFY_HEAP
+  keep_new_space_iterable = keep_new_space_iterable || FLAG_verify_heap;
+#endif
+
+  if (keep_new_space_iterable && dominator_allocate->IsNewSpaceAllocation()) {
+    dominator_allocate->MakePrefillWithFiller();
+  } else {
+    // TODO(hpayer): This is a short-term hack to make allocation mementos
+    // work again in new space.
+    dominator_allocate->ClearNextMapWord(original_object_size);
+  }
+
+  dominator_allocate->UpdateClearNextMapWord(MustClearNextMapWord());
+
+  // After that replace the dominated allocate instruction.
+  HInstruction* inner_offset = HConstant::CreateAndInsertBefore(
+      isolate, zone, context(), dominator_size_constant, Representation::None(),
+      this);
+
+  HInstruction* dominated_allocate_instr = HInnerAllocatedObject::New(
+      isolate, zone, context(), dominator_allocate, inner_offset, type());
+  dominated_allocate_instr->InsertBefore(this);
+  DeleteAndReplaceWith(dominated_allocate_instr);
+  if (FLAG_trace_allocation_folding) {
+    PrintF("#%d (%s) folded into #%d (%s)\n",
+        id(), Mnemonic(), dominator_allocate->id(),
+        dominator_allocate->Mnemonic());
+  }
+  return true;
+}
+
+
+void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
+  DCHECK(filler_free_space_size_ != NULL);
+  Zone* zone = block()->zone();
+  // We must explicitly force Smi representation here because on x64 we
+  // would otherwise automatically choose int32, but the actual store
+  // requires a Smi-tagged value.
+  HConstant* new_free_space_size = HConstant::CreateAndInsertBefore(
+      block()->isolate(), zone, context(),
+      filler_free_space_size_->value()->GetInteger32Constant() +
+          free_space_size,
+      Representation::Smi(), filler_free_space_size_);
+  filler_free_space_size_->UpdateValue(new_free_space_size);
+}
+
+
+void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
+  DCHECK(filler_free_space_size_ == NULL);
+  Isolate* isolate = block()->isolate();
+  Zone* zone = block()->zone();
+  HInstruction* free_space_instr =
+      HInnerAllocatedObject::New(isolate, zone, context(), dominating_allocate_,
+                                 dominating_allocate_->size(), type());
+  free_space_instr->InsertBefore(this);
+  HConstant* filler_map = HConstant::CreateAndInsertAfter(
+      zone, Unique<Map>::CreateImmovable(isolate->factory()->free_space_map()),
+      true, free_space_instr);
+  HInstruction* store_map =
+      HStoreNamedField::New(isolate, zone, context(), free_space_instr,
+                            HObjectAccess::ForMap(), filler_map);
+  store_map->SetFlag(HValue::kHasNoObservableSideEffects);
+  store_map->InsertAfter(filler_map);
+
+  // We must explicitly force Smi representation here because on x64 we
+  // would otherwise automatically choose int32, but the actual store
+  // requires a Smi-tagged value.
+  HConstant* filler_size =
+      HConstant::CreateAndInsertAfter(isolate, zone, context(), free_space_size,
+                                      Representation::Smi(), store_map);
+  // Must force Smi representation for x64 (see comment above).
+  HObjectAccess access = HObjectAccess::ForMapAndOffset(
+      isolate->factory()->free_space_map(), FreeSpace::kSizeOffset,
+      Representation::Smi());
+  HStoreNamedField* store_size = HStoreNamedField::New(
+      isolate, zone, context(), free_space_instr, access, filler_size);
+  store_size->SetFlag(HValue::kHasNoObservableSideEffects);
+  store_size->InsertAfter(filler_size);
+  filler_free_space_size_ = store_size;
+}
+
+
+void HAllocate::ClearNextMapWord(int offset) {
+  if (MustClearNextMapWord()) {
+    Zone* zone = block()->zone();
+    HObjectAccess access =
+        HObjectAccess::ForObservableJSObjectOffset(offset);
+    HStoreNamedField* clear_next_map =
+        HStoreNamedField::New(block()->isolate(), zone, context(), this, access,
+                              block()->graph()->GetConstant0());
+    clear_next_map->ClearAllSideEffects();
+    clear_next_map->InsertAfter(this);
+  }
+}
+
+
+std::ostream& HAllocate::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << NameOf(size()) << " (";
+  if (IsNewSpaceAllocation()) os << "N";
+  if (IsOldSpaceAllocation()) os << "P";
+  if (MustAllocateDoubleAligned()) os << "A";
+  if (MustPrefillWithFiller()) os << "F";
+  return os << ")";
+}
+
+
+bool HStoreKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) {
+  // The base offset is usually simply the size of the array header, except
+  // with dehoisting adds an addition offset due to a array index key
+  // manipulation, in which case it becomes (array header size +
+  // constant-offset-from-key * kPointerSize)
+  v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset_;
+  addition_result += increase_by_value;
+  if (!addition_result.IsValid()) return false;
+  base_offset_ = addition_result.ValueOrDie();
+  return true;
+}
+
+
+bool HStoreKeyed::NeedsCanonicalization() {
+  switch (value()->opcode()) {
+    case kLoadKeyed: {
+      ElementsKind load_kind = HLoadKeyed::cast(value())->elements_kind();
+      return IsFixedFloatElementsKind(load_kind);
+    }
+    case kChange: {
+      Representation from = HChange::cast(value())->from();
+      return from.IsTagged() || from.IsHeapObject();
+    }
+    case kLoadNamedField:
+    case kPhi: {
+      // Better safe than sorry...
+      return true;
+    }
+    default:
+      return false;
+  }
+}
+
+
+#define H_CONSTANT_INT(val) \
+  HConstant::New(isolate, zone, context, static_cast<int32_t>(val))
+#define H_CONSTANT_DOUBLE(val) \
+  HConstant::New(isolate, zone, context, static_cast<double>(val))
+
+#define DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HInstr, op)                      \
+  HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context,    \
+                            HValue* left, HValue* right, Strength strength) { \
+    if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {   \
+      HConstant* c_left = HConstant::cast(left);                              \
+      HConstant* c_right = HConstant::cast(right);                            \
+      if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {          \
+        double double_res = c_left->DoubleValue() op c_right->DoubleValue();  \
+        if (IsInt32Double(double_res)) {                                      \
+          return H_CONSTANT_INT(double_res);                                  \
+        }                                                                     \
+        return H_CONSTANT_DOUBLE(double_res);                                 \
+      }                                                                       \
+    }                                                                         \
+    return new (zone) HInstr(context, left, right, strength);                 \
+  }
+
+
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HAdd, +)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HMul, *)
+DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR(HSub, -)
+
+#undef DEFINE_NEW_H_SIMPLE_ARITHMETIC_INSTR
+
+
+HInstruction* HStringAdd::New(Isolate* isolate, Zone* zone, HValue* context,
+                              HValue* left, HValue* right,
+                              PretenureFlag pretenure_flag,
+                              StringAddFlags flags,
+                              Handle<AllocationSite> allocation_site) {
+  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+    HConstant* c_right = HConstant::cast(right);
+    HConstant* c_left = HConstant::cast(left);
+    if (c_left->HasStringValue() && c_right->HasStringValue()) {
+      Handle<String> left_string = c_left->StringValue();
+      Handle<String> right_string = c_right->StringValue();
+      // Prevent possible exception by invalid string length.
+      if (left_string->length() + right_string->length() < String::kMaxLength) {
+        MaybeHandle<String> concat = isolate->factory()->NewConsString(
+            c_left->StringValue(), c_right->StringValue());
+        return HConstant::New(isolate, zone, context, concat.ToHandleChecked());
+      }
+    }
+  }
+  return new (zone)
+      HStringAdd(context, left, right, pretenure_flag, flags, allocation_site);
+}
+
+
+std::ostream& HStringAdd::PrintDataTo(std::ostream& os) const {  // NOLINT
+  if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
+    os << "_CheckBoth";
+  } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) {
+    os << "_CheckLeft";
+  } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
+    os << "_CheckRight";
+  }
+  HBinaryOperation::PrintDataTo(os);
+  os << " (";
+  if (pretenure_flag() == NOT_TENURED)
+    os << "N";
+  else if (pretenure_flag() == TENURED)
+    os << "D";
+  return os << ")";
+}
+
+
+HInstruction* HStringCharFromCode::New(Isolate* isolate, Zone* zone,
+                                       HValue* context, HValue* char_code) {
+  if (FLAG_fold_constants && char_code->IsConstant()) {
+    HConstant* c_code = HConstant::cast(char_code);
+    if (c_code->HasNumberValue()) {
+      if (std::isfinite(c_code->DoubleValue())) {
+        uint32_t code = c_code->NumberValueAsInteger32() & 0xffff;
+        return HConstant::New(
+            isolate, zone, context,
+            isolate->factory()->LookupSingleCharacterStringFromCode(code));
+      }
+      return HConstant::New(isolate, zone, context,
+                            isolate->factory()->empty_string());
+    }
+  }
+  return new(zone) HStringCharFromCode(context, char_code);
+}
+
+
+HInstruction* HUnaryMathOperation::New(Isolate* isolate, Zone* zone,
+                                       HValue* context, HValue* value,
+                                       BuiltinFunctionId op) {
+  do {
+    if (!FLAG_fold_constants) break;
+    if (!value->IsConstant()) break;
+    HConstant* constant = HConstant::cast(value);
+    if (!constant->HasNumberValue()) break;
+    double d = constant->DoubleValue();
+    if (std::isnan(d)) {  // NaN poisons everything.
+      return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
+    }
+    if (std::isinf(d)) {  // +Infinity and -Infinity.
+      switch (op) {
+        case kMathExp:
+          return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
+        case kMathLog:
+        case kMathSqrt:
+          return H_CONSTANT_DOUBLE(
+              (d > 0.0) ? d : std::numeric_limits<double>::quiet_NaN());
+        case kMathPowHalf:
+        case kMathAbs:
+          return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
+        case kMathRound:
+        case kMathFround:
+        case kMathFloor:
+          return H_CONSTANT_DOUBLE(d);
+        case kMathClz32:
+          return H_CONSTANT_INT(32);
+        default:
+          UNREACHABLE();
+          break;
+      }
+    }
+    switch (op) {
+      case kMathExp:
+        lazily_initialize_fast_exp(isolate);
+        return H_CONSTANT_DOUBLE(fast_exp(d, isolate));
+      case kMathLog:
+        return H_CONSTANT_DOUBLE(std::log(d));
+      case kMathSqrt:
+        lazily_initialize_fast_sqrt(isolate);
+        return H_CONSTANT_DOUBLE(fast_sqrt(d, isolate));
+      case kMathPowHalf:
+        return H_CONSTANT_DOUBLE(power_double_double(d, 0.5));
+      case kMathAbs:
+        return H_CONSTANT_DOUBLE((d >= 0.0) ? d + 0.0 : -d);
+      case kMathRound:
+        // -0.5 .. -0.0 round to -0.0.
+        if ((d >= -0.5 && Double(d).Sign() < 0)) return H_CONSTANT_DOUBLE(-0.0);
+        // Doubles are represented as Significant * 2 ^ Exponent. If the
+        // Exponent is not negative, the double value is already an integer.
+        if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
+        return H_CONSTANT_DOUBLE(Floor(d + 0.5));
+      case kMathFround:
+        return H_CONSTANT_DOUBLE(static_cast<double>(static_cast<float>(d)));
+      case kMathFloor:
+        return H_CONSTANT_DOUBLE(Floor(d));
+      case kMathClz32: {
+        uint32_t i = DoubleToUint32(d);
+        return H_CONSTANT_INT(base::bits::CountLeadingZeros32(i));
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } while (false);
+  return new(zone) HUnaryMathOperation(context, value, op);
+}
+
+
+Representation HUnaryMathOperation::RepresentationFromUses() {
+  if (op_ != kMathFloor && op_ != kMathRound) {
+    return HValue::RepresentationFromUses();
+  }
+
+  // The instruction can have an int32 or double output. Prefer a double
+  // representation if there are double uses.
+  bool use_double = false;
+
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    int use_index = it.index();
+    Representation rep_observed = use->observed_input_representation(use_index);
+    Representation rep_required = use->RequiredInputRepresentation(use_index);
+    use_double |= (rep_observed.IsDouble() || rep_required.IsDouble());
+    if (use_double && !FLAG_trace_representation) {
+      // Having seen one double is enough.
+      break;
+    }
+    if (FLAG_trace_representation) {
+      if (!rep_required.IsDouble() || rep_observed.IsDouble()) {
+        PrintF("#%d %s is used by #%d %s as %s%s\n",
+               id(), Mnemonic(), use->id(),
+               use->Mnemonic(), rep_observed.Mnemonic(),
+               (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+      } else {
+        PrintF("#%d %s is required by #%d %s as %s%s\n",
+               id(), Mnemonic(), use->id(),
+               use->Mnemonic(), rep_required.Mnemonic(),
+               (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
+      }
+    }
+  }
+  return use_double ? Representation::Double() : Representation::Integer32();
+}
+
+
+HInstruction* HPower::New(Isolate* isolate, Zone* zone, HValue* context,
+                          HValue* left, HValue* right) {
+  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
+      double result =
+          power_helper(isolate, c_left->DoubleValue(), c_right->DoubleValue());
+      return H_CONSTANT_DOUBLE(std::isnan(result)
+                                   ? std::numeric_limits<double>::quiet_NaN()
+                                   : result);
+    }
+  }
+  return new(zone) HPower(left, right);
+}
+
+
+HInstruction* HMathMinMax::New(Isolate* isolate, Zone* zone, HValue* context,
+                               HValue* left, HValue* right, Operation op) {
+  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
+      double d_left = c_left->DoubleValue();
+      double d_right = c_right->DoubleValue();
+      if (op == kMathMin) {
+        if (d_left > d_right) return H_CONSTANT_DOUBLE(d_right);
+        if (d_left < d_right) return H_CONSTANT_DOUBLE(d_left);
+        if (d_left == d_right) {
+          // Handle +0 and -0.
+          return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_left
+                                                                 : d_right);
+        }
+      } else {
+        if (d_left < d_right) return H_CONSTANT_DOUBLE(d_right);
+        if (d_left > d_right) return H_CONSTANT_DOUBLE(d_left);
+        if (d_left == d_right) {
+          // Handle +0 and -0.
+          return H_CONSTANT_DOUBLE((Double(d_left).Sign() == -1) ? d_right
+                                                                 : d_left);
+        }
+      }
+      // All comparisons failed, must be NaN.
+      return H_CONSTANT_DOUBLE(std::numeric_limits<double>::quiet_NaN());
+    }
+  }
+  return new(zone) HMathMinMax(context, left, right, op);
+}
+
+
+HInstruction* HMod::New(Isolate* isolate, Zone* zone, HValue* context,
+                        HValue* left, HValue* right, Strength strength) {
+  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if (c_left->HasInteger32Value() && c_right->HasInteger32Value()) {
+      int32_t dividend = c_left->Integer32Value();
+      int32_t divisor = c_right->Integer32Value();
+      if (dividend == kMinInt && divisor == -1) {
+        return H_CONSTANT_DOUBLE(-0.0);
+      }
+      if (divisor != 0) {
+        int32_t res = dividend % divisor;
+        if ((res == 0) && (dividend < 0)) {
+          return H_CONSTANT_DOUBLE(-0.0);
+        }
+        return H_CONSTANT_INT(res);
+      }
+    }
+  }
+  return new (zone) HMod(context, left, right, strength);
+}
+
+
+HInstruction* HDiv::New(Isolate* isolate, Zone* zone, HValue* context,
+                        HValue* left, HValue* right, Strength strength) {
+  // If left and right are constant values, try to return a constant value.
+  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      if (c_right->DoubleValue() != 0) {
+        double double_res = c_left->DoubleValue() / c_right->DoubleValue();
+        if (IsInt32Double(double_res)) {
+          return H_CONSTANT_INT(double_res);
+        }
+        return H_CONSTANT_DOUBLE(double_res);
+      } else {
+        int sign = Double(c_left->DoubleValue()).Sign() *
+                   Double(c_right->DoubleValue()).Sign();  // Right could be -0.
+        return H_CONSTANT_DOUBLE(sign * V8_INFINITY);
+      }
+    }
+  }
+  return new (zone) HDiv(context, left, right, strength);
+}
+
+
+HInstruction* HBitwise::New(Isolate* isolate, Zone* zone, HValue* context,
+                            Token::Value op, HValue* left, HValue* right,
+                            Strength strength) {
+  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t result;
+      int32_t v_left = c_left->NumberValueAsInteger32();
+      int32_t v_right = c_right->NumberValueAsInteger32();
+      switch (op) {
+        case Token::BIT_XOR:
+          result = v_left ^ v_right;
+          break;
+        case Token::BIT_AND:
+          result = v_left & v_right;
+          break;
+        case Token::BIT_OR:
+          result = v_left | v_right;
+          break;
+        default:
+          result = 0;  // Please the compiler.
+          UNREACHABLE();
+      }
+      return H_CONSTANT_INT(result);
+    }
+  }
+  return new (zone) HBitwise(context, op, left, right, strength);
+}
+
+
+#define DEFINE_NEW_H_BITWISE_INSTR(HInstr, result)                            \
+  HInstruction* HInstr::New(Isolate* isolate, Zone* zone, HValue* context,    \
+                            HValue* left, HValue* right, Strength strength) { \
+    if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {   \
+      HConstant* c_left = HConstant::cast(left);                              \
+      HConstant* c_right = HConstant::cast(right);                            \
+      if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {          \
+        return H_CONSTANT_INT(result);                                        \
+      }                                                                       \
+    }                                                                         \
+    return new (zone) HInstr(context, left, right, strength);                 \
+  }
+
+
+DEFINE_NEW_H_BITWISE_INSTR(HSar,
+c_left->NumberValueAsInteger32() >> (c_right->NumberValueAsInteger32() & 0x1f))
+DEFINE_NEW_H_BITWISE_INSTR(HShl,
+c_left->NumberValueAsInteger32() << (c_right->NumberValueAsInteger32() & 0x1f))
+
+#undef DEFINE_NEW_H_BITWISE_INSTR
+
+
+HInstruction* HShr::New(Isolate* isolate, Zone* zone, HValue* context,
+                        HValue* left, HValue* right, Strength strength) {
+  if (FLAG_fold_constants && left->IsConstant() && right->IsConstant()) {
+    HConstant* c_left = HConstant::cast(left);
+    HConstant* c_right = HConstant::cast(right);
+    if ((c_left->HasNumberValue() && c_right->HasNumberValue())) {
+      int32_t left_val = c_left->NumberValueAsInteger32();
+      int32_t right_val = c_right->NumberValueAsInteger32() & 0x1f;
+      if ((right_val == 0) && (left_val < 0)) {
+        return H_CONSTANT_DOUBLE(static_cast<uint32_t>(left_val));
+      }
+      return H_CONSTANT_INT(static_cast<uint32_t>(left_val) >> right_val);
+    }
+  }
+  return new (zone) HShr(context, left, right, strength);
+}
+
+
+HInstruction* HSeqStringGetChar::New(Isolate* isolate, Zone* zone,
+                                     HValue* context, String::Encoding encoding,
+                                     HValue* string, HValue* index) {
+  if (FLAG_fold_constants && string->IsConstant() && index->IsConstant()) {
+    HConstant* c_string = HConstant::cast(string);
+    HConstant* c_index = HConstant::cast(index);
+    if (c_string->HasStringValue() && c_index->HasInteger32Value()) {
+      Handle<String> s = c_string->StringValue();
+      int32_t i = c_index->Integer32Value();
+      DCHECK_LE(0, i);
+      DCHECK_LT(i, s->length());
+      return H_CONSTANT_INT(s->Get(i));
+    }
+  }
+  return new(zone) HSeqStringGetChar(encoding, string, index);
+}
+
+
+#undef H_CONSTANT_INT
+#undef H_CONSTANT_DOUBLE
+
+
+std::ostream& HBitwise::PrintDataTo(std::ostream& os) const {  // NOLINT
+  os << Token::Name(op_) << " ";
+  return HBitwiseBinaryOperation::PrintDataTo(os);
+}
+
+
+void HPhi::SimplifyConstantInputs() {
+  // Convert constant inputs to integers when all uses are truncating.
+  // This must happen before representation inference takes place.
+  if (!CheckUsesForFlag(kTruncatingToInt32)) return;
+  for (int i = 0; i < OperandCount(); ++i) {
+    if (!OperandAt(i)->IsConstant()) return;
+  }
+  HGraph* graph = block()->graph();
+  for (int i = 0; i < OperandCount(); ++i) {
+    HConstant* operand = HConstant::cast(OperandAt(i));
+    if (operand->HasInteger32Value()) {
+      continue;
+    } else if (operand->HasDoubleValue()) {
+      HConstant* integer_input = HConstant::New(
+          graph->isolate(), graph->zone(), graph->GetInvalidContext(),
+          DoubleToInt32(operand->DoubleValue()));
+      integer_input->InsertAfter(operand);
+      SetOperandAt(i, integer_input);
+    } else if (operand->HasBooleanValue()) {
+      SetOperandAt(i, operand->BooleanValue() ? graph->GetConstant1()
+                                              : graph->GetConstant0());
+    } else if (operand->ImmortalImmovable()) {
+      SetOperandAt(i, graph->GetConstant0());
+    }
+  }
+  // Overwrite observed input representations because they are likely Tagged.
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+    if (use->IsBinaryOperation()) {
+      HBinaryOperation::cast(use)->set_observed_input_representation(
+          it.index(), Representation::Smi());
+    }
+  }
+}
+
+
+void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
+  DCHECK(CheckFlag(kFlexibleRepresentation));
+  Representation new_rep = RepresentationFromUses();
+  UpdateRepresentation(new_rep, h_infer, "uses");
+  new_rep = RepresentationFromInputs();
+  UpdateRepresentation(new_rep, h_infer, "inputs");
+  new_rep = RepresentationFromUseRequirements();
+  UpdateRepresentation(new_rep, h_infer, "use requirements");
+}
+
+
+Representation HPhi::RepresentationFromInputs() {
+  Representation r = representation();
+  for (int i = 0; i < OperandCount(); ++i) {
+    // Ignore conservative Tagged assumption of parameters if we have
+    // reason to believe that it's too conservative.
+    if (has_type_feedback_from_uses() && OperandAt(i)->IsParameter()) {
+      continue;
+    }
+
+    r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
+  }
+  return r;
+}
+
+
+// Returns a representation if all uses agree on the same representation.
+// Integer32 is also returned when some uses are Smi but others are Integer32.
+Representation HValue::RepresentationFromUseRequirements() {
+  Representation rep = Representation::None();
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    // Ignore the use requirement from never run code
+    if (it.value()->block()->IsUnreachable()) continue;
+
+    // We check for observed_input_representation elsewhere.
+    Representation use_rep =
+        it.value()->RequiredInputRepresentation(it.index());
+    if (rep.IsNone()) {
+      rep = use_rep;
+      continue;
+    }
+    if (use_rep.IsNone() || rep.Equals(use_rep)) continue;
+    if (rep.generalize(use_rep).IsInteger32()) {
+      rep = Representation::Integer32();
+      continue;
+    }
+    return Representation::None();
+  }
+  return rep;
+}
+
+
+bool HValue::HasNonSmiUse() {
+  for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
+    // We check for observed_input_representation elsewhere.
+    Representation use_rep =
+        it.value()->RequiredInputRepresentation(it.index());
+    if (!use_rep.IsNone() &&
+        !use_rep.IsSmi() &&
+        !use_rep.IsTagged()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+// Node-specific verification code is only included in debug mode.
+#ifdef DEBUG
+
+void HPhi::Verify() {
+  DCHECK(OperandCount() == block()->predecessors()->length());
+  for (int i = 0; i < OperandCount(); ++i) {
+    HValue* value = OperandAt(i);
+    HBasicBlock* defining_block = value->block();
+    HBasicBlock* predecessor_block = block()->predecessors()->at(i);
+    DCHECK(defining_block == predecessor_block ||
+           defining_block->Dominates(predecessor_block));
+  }
+}
+
+
+void HSimulate::Verify() {
+  HInstruction::Verify();
+  DCHECK(HasAstId() || next()->IsEnterInlined());
+}
+
+
+void HCheckHeapObject::Verify() {
+  HInstruction::Verify();
+  DCHECK(HasNoUses());
+}
+
+
+void HCheckValue::Verify() {
+  HInstruction::Verify();
+  DCHECK(HasNoUses());
+}
+
+#endif
+
+
+HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
+  DCHECK(offset >= 0);
+  DCHECK(offset < FixedArray::kHeaderSize);
+  if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength();
+  return HObjectAccess(kInobject, offset);
+}
+
+
+HObjectAccess HObjectAccess::ForMapAndOffset(Handle<Map> map, int offset,
+    Representation representation) {
+  DCHECK(offset >= 0);
+  Portion portion = kInobject;
+
+  if (offset == JSObject::kElementsOffset) {
+    portion = kElementsPointer;
+  } else if (offset == JSObject::kMapOffset) {
+    portion = kMaps;
+  }
+  bool existing_inobject_property = true;
+  if (!map.is_null()) {
+    existing_inobject_property = (offset <
+        map->instance_size() - map->unused_property_fields() * kPointerSize);
+  }
+  return HObjectAccess(portion, offset, representation, Handle<String>::null(),
+                       false, existing_inobject_property);
+}
+
+
+HObjectAccess HObjectAccess::ForAllocationSiteOffset(int offset) {
+  switch (offset) {
+    case AllocationSite::kTransitionInfoOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    case AllocationSite::kNestedSiteOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    case AllocationSite::kPretenureDataOffset:
+      return HObjectAccess(kInobject, offset, Representation::Smi());
+    case AllocationSite::kPretenureCreateCountOffset:
+      return HObjectAccess(kInobject, offset, Representation::Smi());
+    case AllocationSite::kDependentCodeOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    case AllocationSite::kWeakNextOffset:
+      return HObjectAccess(kInobject, offset, Representation::Tagged());
+    default:
+      UNREACHABLE();
+  }
+  return HObjectAccess(kInobject, offset);
+}
+
+
+HObjectAccess HObjectAccess::ForContextSlot(int index) {
+  DCHECK(index >= 0);
+  Portion portion = kInobject;
+  int offset = Context::kHeaderSize + index * kPointerSize;
+  DCHECK_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
+  return HObjectAccess(portion, offset, Representation::Tagged());
+}
+
+
+HObjectAccess HObjectAccess::ForScriptContext(int index) {
+  DCHECK(index >= 0);
+  Portion portion = kInobject;
+  int offset = ScriptContextTable::GetContextOffset(index);
+  return HObjectAccess(portion, offset, Representation::Tagged());
+}
+
+
+HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
+  DCHECK(offset >= 0);
+  Portion portion = kInobject;
+
+  if (offset == JSObject::kElementsOffset) {
+    portion = kElementsPointer;
+  } else if (offset == JSArray::kLengthOffset) {
+    portion = kArrayLengths;
+  } else if (offset == JSObject::kMapOffset) {
+    portion = kMaps;
+  }
+  return HObjectAccess(portion, offset);
+}
+
+
+HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
+    Representation representation) {
+  DCHECK(offset >= 0);
+  return HObjectAccess(kBackingStore, offset, representation,
+                       Handle<String>::null(), false, false);
+}
+
+
+HObjectAccess HObjectAccess::ForField(Handle<Map> map, int index,
+                                      Representation representation,
+                                      Handle<Name> name) {
+  if (index < 0) {
+    // Negative property indices are in-object properties, indexed
+    // from the end of the fixed part of the object.
+    int offset = (index * kPointerSize) + map->instance_size();
+    return HObjectAccess(kInobject, offset, representation, name, false, true);
+  } else {
+    // Non-negative property indices are in the properties array.
+    int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
+    return HObjectAccess(kBackingStore, offset, representation, name,
+                         false, false);
+  }
+}
+
+
+void HObjectAccess::SetGVNFlags(HValue *instr, PropertyAccessType access_type) {
+  // set the appropriate GVN flags for a given load or store instruction
+  if (access_type == STORE) {
+    // track dominating allocations in order to eliminate write barriers
+    instr->SetDependsOnFlag(::v8::internal::kNewSpacePromotion);
+    instr->SetFlag(HValue::kTrackSideEffectDominators);
+  } else {
+    // try to GVN loads, but don't hoist above map changes
+    instr->SetFlag(HValue::kUseGVN);
+    instr->SetDependsOnFlag(::v8::internal::kMaps);
+  }
+
+  switch (portion()) {
+    case kArrayLengths:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kArrayLengths);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kArrayLengths);
+      }
+      break;
+    case kStringLengths:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kStringLengths);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kStringLengths);
+      }
+      break;
+    case kInobject:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kInobjectFields);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kInobjectFields);
+      }
+      break;
+    case kDouble:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kDoubleFields);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kDoubleFields);
+      }
+      break;
+    case kBackingStore:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kBackingStoreFields);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kBackingStoreFields);
+      }
+      break;
+    case kElementsPointer:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kElementsPointer);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kElementsPointer);
+      }
+      break;
+    case kMaps:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kMaps);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kMaps);
+      }
+      break;
+    case kExternalMemory:
+      if (access_type == STORE) {
+        instr->SetChangesFlag(::v8::internal::kExternalMemory);
+      } else {
+        instr->SetDependsOnFlag(::v8::internal::kExternalMemory);
+      }
+      break;
+  }
+}
+
+
+std::ostream& operator<<(std::ostream& os, const HObjectAccess& access) {
+  os << ".";
+
+  switch (access.portion()) {
+    case HObjectAccess::kArrayLengths:
+    case HObjectAccess::kStringLengths:
+      os << "%length";
+      break;
+    case HObjectAccess::kElementsPointer:
+      os << "%elements";
+      break;
+    case HObjectAccess::kMaps:
+      os << "%map";
+      break;
+    case HObjectAccess::kDouble:  // fall through
+    case HObjectAccess::kInobject:
+      if (!access.name().is_null() && access.name()->IsString()) {
+        os << Handle<String>::cast(access.name())->ToCString().get();
+      }
+      os << "[in-object]";
+      break;
+    case HObjectAccess::kBackingStore:
+      if (!access.name().is_null() && access.name()->IsString()) {
+        os << Handle<String>::cast(access.name())->ToCString().get();
+      }
+      os << "[backing-store]";
+      break;
+    case HObjectAccess::kExternalMemory:
+      os << "[external-memory]";
+      break;
+  }
+
+  return os << "@" << access.offset();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-instructions.h b/src/crankshaft/hydrogen-instructions.h
new file mode 100644
index 0000000..13ada8c
--- /dev/null
+++ b/src/crankshaft/hydrogen-instructions.h
@@ -0,0 +1,7818 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
+#define V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
+
+#include <cstring>
+#include <iosfwd>
+
+#include "src/allocation.h"
+#include "src/base/bits.h"
+#include "src/bit-vector.h"
+#include "src/code-stubs.h"
+#include "src/conversions.h"
+#include "src/crankshaft/hydrogen-types.h"
+#include "src/crankshaft/unique.h"
+#include "src/deoptimizer.h"
+#include "src/small-pointer-list.h"
+#include "src/utils.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+struct ChangesOf;
+class HBasicBlock;
+class HDiv;
+class HEnvironment;
+class HInferRepresentationPhase;
+class HInstruction;
+class HLoopInformation;
+class HStoreNamedField;
+class HValue;
+class LInstruction;
+class LChunkBuilder;
+
+#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
+  V(ArithmeticBinaryOperation)                \
+  V(BinaryOperation)                          \
+  V(BitwiseBinaryOperation)                   \
+  V(ControlInstruction)                       \
+  V(Instruction)
+
+
+#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AbnormalExit)                             \
+  V(AccessArgumentsAt)                        \
+  V(Add)                                      \
+  V(AllocateBlockContext)                     \
+  V(Allocate)                                 \
+  V(ApplyArguments)                           \
+  V(ArgumentsElements)                        \
+  V(ArgumentsLength)                          \
+  V(ArgumentsObject)                          \
+  V(Bitwise)                                  \
+  V(BlockEntry)                               \
+  V(BoundsCheck)                              \
+  V(BoundsCheckBaseIndexInformation)          \
+  V(Branch)                                   \
+  V(CallWithDescriptor)                       \
+  V(CallJSFunction)                           \
+  V(CallFunction)                             \
+  V(CallNewArray)                             \
+  V(CallRuntime)                              \
+  V(CallStub)                                 \
+  V(CapturedObject)                           \
+  V(Change)                                   \
+  V(CheckArrayBufferNotNeutered)              \
+  V(CheckHeapObject)                          \
+  V(CheckInstanceType)                        \
+  V(CheckMaps)                                \
+  V(CheckMapValue)                            \
+  V(CheckSmi)                                 \
+  V(CheckValue)                               \
+  V(ClampToUint8)                             \
+  V(ClassOfTestAndBranch)                     \
+  V(CompareNumericAndBranch)                  \
+  V(CompareHoleAndBranch)                     \
+  V(CompareGeneric)                           \
+  V(CompareMinusZeroAndBranch)                \
+  V(CompareObjectEqAndBranch)                 \
+  V(CompareMap)                               \
+  V(Constant)                                 \
+  V(ConstructDouble)                          \
+  V(Context)                                  \
+  V(DebugBreak)                               \
+  V(DeclareGlobals)                           \
+  V(Deoptimize)                               \
+  V(Div)                                      \
+  V(DoubleBits)                               \
+  V(DummyUse)                                 \
+  V(EnterInlined)                             \
+  V(EnvironmentMarker)                        \
+  V(ForceRepresentation)                      \
+  V(ForInCacheArray)                          \
+  V(ForInPrepareMap)                          \
+  V(GetCachedArrayIndex)                      \
+  V(Goto)                                     \
+  V(HasCachedArrayIndexAndBranch)             \
+  V(HasInstanceTypeAndBranch)                 \
+  V(InnerAllocatedObject)                     \
+  V(InstanceOf)                               \
+  V(InvokeFunction)                           \
+  V(HasInPrototypeChainAndBranch)             \
+  V(IsStringAndBranch)                        \
+  V(IsSmiAndBranch)                           \
+  V(IsUndetectableAndBranch)                  \
+  V(LeaveInlined)                             \
+  V(LoadContextSlot)                          \
+  V(LoadFieldByIndex)                         \
+  V(LoadFunctionPrototype)                    \
+  V(LoadGlobalGeneric)                        \
+  V(LoadKeyed)                                \
+  V(LoadKeyedGeneric)                         \
+  V(LoadNamedField)                           \
+  V(LoadNamedGeneric)                         \
+  V(LoadRoot)                                 \
+  V(MapEnumLength)                            \
+  V(MathFloorOfDiv)                           \
+  V(MathMinMax)                               \
+  V(MaybeGrowElements)                        \
+  V(Mod)                                      \
+  V(Mul)                                      \
+  V(OsrEntry)                                 \
+  V(Parameter)                                \
+  V(Power)                                    \
+  V(Prologue)                                 \
+  V(PushArguments)                            \
+  V(Return)                                   \
+  V(Ror)                                      \
+  V(Sar)                                      \
+  V(SeqStringGetChar)                         \
+  V(SeqStringSetChar)                         \
+  V(Shl)                                      \
+  V(Shr)                                      \
+  V(Simulate)                                 \
+  V(StackCheck)                               \
+  V(StoreCodeEntry)                           \
+  V(StoreContextSlot)                         \
+  V(StoreFrameContext)                        \
+  V(StoreKeyed)                               \
+  V(StoreKeyedGeneric)                        \
+  V(StoreNamedField)                          \
+  V(StoreNamedGeneric)                        \
+  V(StringAdd)                                \
+  V(StringCharCodeAt)                         \
+  V(StringCharFromCode)                       \
+  V(StringCompareAndBranch)                   \
+  V(Sub)                                      \
+  V(ThisFunction)                             \
+  V(ToFastProperties)                         \
+  V(TransitionElementsKind)                   \
+  V(TrapAllocationMemento)                    \
+  V(Typeof)                                   \
+  V(TypeofIsAndBranch)                        \
+  V(UnaryMathOperation)                       \
+  V(UnknownOSRValue)                          \
+  V(UseConst)                                 \
+  V(WrapReceiver)
+
+#define GVN_TRACKED_FLAG_LIST(V)               \
+  V(NewSpacePromotion)
+
+#define GVN_UNTRACKED_FLAG_LIST(V)             \
+  V(ArrayElements)                             \
+  V(ArrayLengths)                              \
+  V(StringLengths)                             \
+  V(BackingStoreFields)                        \
+  V(Calls)                                     \
+  V(ContextSlots)                              \
+  V(DoubleArrayElements)                       \
+  V(DoubleFields)                              \
+  V(ElementsKind)                              \
+  V(ElementsPointer)                           \
+  V(GlobalVars)                                \
+  V(InobjectFields)                            \
+  V(Maps)                                      \
+  V(OsrEntries)                                \
+  V(ExternalMemory)                            \
+  V(StringChars)                               \
+  V(TypedArrayElements)
+
+
+#define DECLARE_ABSTRACT_INSTRUCTION(type)     \
+  bool Is##type() const final { return true; } \
+  static H##type* cast(HValue* value) {        \
+    DCHECK(value->Is##type());                 \
+    return reinterpret_cast<H##type*>(value);  \
+  }
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type)                      \
+  LInstruction* CompileToLithium(LChunkBuilder* builder) final; \
+  static H##type* cast(HValue* value) {                         \
+    DCHECK(value->Is##type());                                  \
+    return reinterpret_cast<H##type*>(value);                   \
+  }                                                             \
+  Opcode opcode() const final { return HValue::k##type; }
+
+
+enum PropertyAccessType { LOAD, STORE };
+
+
+class Range final : public ZoneObject {
+ public:
+  Range()
+      : lower_(kMinInt),
+        upper_(kMaxInt),
+        next_(NULL),
+        can_be_minus_zero_(false) { }
+
+  Range(int32_t lower, int32_t upper)
+      : lower_(lower),
+        upper_(upper),
+        next_(NULL),
+        can_be_minus_zero_(false) { }
+
+  int32_t upper() const { return upper_; }
+  int32_t lower() const { return lower_; }
+  Range* next() const { return next_; }
+  Range* CopyClearLower(Zone* zone) const {
+    return new(zone) Range(kMinInt, upper_);
+  }
+  Range* CopyClearUpper(Zone* zone) const {
+    return new(zone) Range(lower_, kMaxInt);
+  }
+  Range* Copy(Zone* zone) const {
+    Range* result = new(zone) Range(lower_, upper_);
+    result->set_can_be_minus_zero(CanBeMinusZero());
+    return result;
+  }
+  int32_t Mask() const;
+  void set_can_be_minus_zero(bool b) { can_be_minus_zero_ = b; }
+  bool CanBeMinusZero() const { return CanBeZero() && can_be_minus_zero_; }
+  bool CanBeZero() const { return upper_ >= 0 && lower_ <= 0; }
+  bool CanBeNegative() const { return lower_ < 0; }
+  bool CanBePositive() const { return upper_ > 0; }
+  bool Includes(int value) const { return lower_ <= value && upper_ >= value; }
+  bool IsMostGeneric() const {
+    return lower_ == kMinInt && upper_ == kMaxInt && CanBeMinusZero();
+  }
+  bool IsInSmiRange() const {
+    return lower_ >= Smi::kMinValue && upper_ <= Smi::kMaxValue;
+  }
+  void ClampToSmi() {
+    lower_ = Max(lower_, Smi::kMinValue);
+    upper_ = Min(upper_, Smi::kMaxValue);
+  }
+  void KeepOrder();
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+  void StackUpon(Range* other) {
+    Intersect(other);
+    next_ = other;
+  }
+
+  void Intersect(Range* other);
+  void Union(Range* other);
+  void CombinedMax(Range* other);
+  void CombinedMin(Range* other);
+
+  void AddConstant(int32_t value);
+  void Sar(int32_t value);
+  void Shl(int32_t value);
+  bool AddAndCheckOverflow(const Representation& r, Range* other);
+  bool SubAndCheckOverflow(const Representation& r, Range* other);
+  bool MulAndCheckOverflow(const Representation& r, Range* other);
+
+ private:
+  int32_t lower_;
+  int32_t upper_;
+  Range* next_;
+  bool can_be_minus_zero_;
+};
+
+
+class HUseListNode: public ZoneObject {
+ public:
+  HUseListNode(HValue* value, int index, HUseListNode* tail)
+      : tail_(tail), value_(value), index_(index) {
+  }
+
+  HUseListNode* tail();
+  HValue* value() const { return value_; }
+  int index() const { return index_; }
+
+  void set_tail(HUseListNode* list) { tail_ = list; }
+
+#ifdef DEBUG
+  void Zap() {
+    tail_ = reinterpret_cast<HUseListNode*>(1);
+    value_ = NULL;
+    index_ = -1;
+  }
+#endif
+
+ private:
+  HUseListNode* tail_;
+  HValue* value_;
+  int index_;
+};
+
+
+// We reuse use list nodes behind the scenes as uses are added and deleted.
+// This class is the safe way to iterate uses while deleting them.
+class HUseIterator final BASE_EMBEDDED {
+ public:
+  bool Done() { return current_ == NULL; }
+  void Advance();
+
+  HValue* value() {
+    DCHECK(!Done());
+    return value_;
+  }
+
+  int index() {
+    DCHECK(!Done());
+    return index_;
+  }
+
+ private:
+  explicit HUseIterator(HUseListNode* head);
+
+  HUseListNode* current_;
+  HUseListNode* next_;
+  HValue* value_;
+  int index_;
+
+  friend class HValue;
+};
+
+
+// All tracked flags should appear before untracked ones.
+enum GVNFlag {
+  // Declare global value numbering flags.
+#define DECLARE_FLAG(Type) k##Type,
+  GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
+  GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
+#undef DECLARE_FLAG
+#define COUNT_FLAG(Type) + 1
+  kNumberOfTrackedSideEffects = 0 GVN_TRACKED_FLAG_LIST(COUNT_FLAG),
+  kNumberOfUntrackedSideEffects = 0 GVN_UNTRACKED_FLAG_LIST(COUNT_FLAG),
+#undef COUNT_FLAG
+  kNumberOfFlags = kNumberOfTrackedSideEffects + kNumberOfUntrackedSideEffects
+};
+
+
+static inline GVNFlag GVNFlagFromInt(int i) {
+  DCHECK(i >= 0);
+  DCHECK(i < kNumberOfFlags);
+  return static_cast<GVNFlag>(i);
+}
+
+
+class DecompositionResult final BASE_EMBEDDED {
+ public:
+  DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
+
+  HValue* base() { return base_; }
+  int offset() { return offset_; }
+  int scale() { return scale_; }
+
+  bool Apply(HValue* other_base, int other_offset, int other_scale = 0) {
+    if (base_ == NULL) {
+      base_ = other_base;
+      offset_ = other_offset;
+      scale_ = other_scale;
+      return true;
+    } else {
+      if (scale_ == 0) {
+        base_ = other_base;
+        offset_ += other_offset;
+        scale_ = other_scale;
+        return true;
+      } else {
+        return false;
+      }
+    }
+  }
+
+  void SwapValues(HValue** other_base, int* other_offset, int* other_scale) {
+    swap(&base_, other_base);
+    swap(&offset_, other_offset);
+    swap(&scale_, other_scale);
+  }
+
+ private:
+  template <class T> void swap(T* a, T* b) {
+    T c(*a);
+    *a = *b;
+    *b = c;
+  }
+
+  HValue* base_;
+  int offset_;
+  int scale_;
+};
+
+
+typedef EnumSet<GVNFlag, int32_t> GVNFlagSet;
+
+
+class HValue : public ZoneObject {
+ public:
+  static const int kNoNumber = -1;
+
+  enum Flag {
+    kFlexibleRepresentation,
+    kCannotBeTagged,
+    // Participate in Global Value Numbering, i.e. elimination of
+    // unnecessary recomputations. If an instruction sets this flag, it must
+    // implement DataEquals(), which will be used to determine if other
+    // occurrences of the instruction are indeed the same.
+    kUseGVN,
+    // Track instructions that are dominating side effects. If an instruction
+    // sets this flag, it must implement HandleSideEffectDominator() and should
+    // indicate which side effects to track by setting GVN flags.
+    kTrackSideEffectDominators,
+    kCanOverflow,
+    kBailoutOnMinusZero,
+    kCanBeDivByZero,
+    kLeftCanBeMinInt,
+    kLeftCanBeNegative,
+    kLeftCanBePositive,
+    kAllowUndefinedAsNaN,
+    kIsArguments,
+    kTruncatingToInt32,
+    kAllUsesTruncatingToInt32,
+    kTruncatingToSmi,
+    kAllUsesTruncatingToSmi,
+    // Set after an instruction is killed.
+    kIsDead,
+    // Instructions that are allowed to produce full range unsigned integer
+    // values are marked with kUint32 flag. If arithmetic shift or a load from
+    // EXTERNAL_UINT32_ELEMENTS array is not marked with this flag
+    // it will deoptimize if result does not fit into signed integer range.
+    // HGraph::ComputeSafeUint32Operations is responsible for setting this
+    // flag.
+    kUint32,
+    kHasNoObservableSideEffects,
+    // Indicates an instruction shouldn't be replaced by optimization, this flag
+    // is useful to set in cases where recomputing a value is cheaper than
+    // extending the value's live range and spilling it.
+    kCantBeReplaced,
+    // Indicates the instruction is live during dead code elimination.
+    kIsLive,
+
+    // HEnvironmentMarkers are deleted before dead code
+    // elimination takes place, so they can repurpose the kIsLive flag:
+    kEndsLiveRange = kIsLive,
+
+    // TODO(everyone): Don't forget to update this!
+    kLastFlag = kIsLive
+  };
+
+  STATIC_ASSERT(kLastFlag < kBitsPerInt);
+
+  static HValue* cast(HValue* value) { return value; }
+
+  enum Opcode {
+    // Declare a unique enum value for each hydrogen instruction.
+  #define DECLARE_OPCODE(type) k##type,
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kPhi
+  #undef DECLARE_OPCODE
+  };
+  virtual Opcode opcode() const = 0;
+
+  // Declare a non-virtual predicates for each concrete HInstruction or HValue.
+  #define DECLARE_PREDICATE(type) \
+    bool Is##type() const { return opcode() == k##type; }
+    HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+  #undef DECLARE_PREDICATE
+    bool IsPhi() const { return opcode() == kPhi; }
+
+  // Declare virtual predicates for abstract HInstruction or HValue
+  #define DECLARE_PREDICATE(type) \
+    virtual bool Is##type() const { return false; }
+    HYDROGEN_ABSTRACT_INSTRUCTION_LIST(DECLARE_PREDICATE)
+  #undef DECLARE_PREDICATE
+
+  bool IsBitwiseBinaryShift() {
+    return IsShl() || IsShr() || IsSar();
+  }
+
+  explicit HValue(HType type = HType::Tagged())
+      : block_(NULL),
+        id_(kNoNumber),
+        type_(type),
+        use_list_(NULL),
+        range_(NULL),
+#ifdef DEBUG
+        range_poisoned_(false),
+#endif
+        flags_(0) {}
+  virtual ~HValue() {}
+
+  virtual SourcePosition position() const { return SourcePosition::Unknown(); }
+  virtual SourcePosition operand_position(int index) const {
+    return position();
+  }
+
+  HBasicBlock* block() const { return block_; }
+  void SetBlock(HBasicBlock* block);
+
+  // Note: Never call this method for an unlinked value.
+  Isolate* isolate() const;
+
+  int id() const { return id_; }
+  void set_id(int id) { id_ = id; }
+
+  HUseIterator uses() const { return HUseIterator(use_list_); }
+
+  virtual bool EmitAtUses() { return false; }
+
+  Representation representation() const { return representation_; }
+  void ChangeRepresentation(Representation r) {
+    DCHECK(CheckFlag(kFlexibleRepresentation));
+    DCHECK(!CheckFlag(kCannotBeTagged) || !r.IsTagged());
+    RepresentationChanged(r);
+    representation_ = r;
+    if (r.IsTagged()) {
+      // Tagged is the bottom of the lattice, don't go any further.
+      ClearFlag(kFlexibleRepresentation);
+    }
+  }
+  virtual void AssumeRepresentation(Representation r);
+
+  virtual Representation KnownOptimalRepresentation() {
+    Representation r = representation();
+    if (r.IsTagged()) {
+      HType t = type();
+      if (t.IsSmi()) return Representation::Smi();
+      if (t.IsHeapNumber()) return Representation::Double();
+      if (t.IsHeapObject()) return r;
+      return Representation::None();
+    }
+    return r;
+  }
+
+  HType type() const { return type_; }
+  void set_type(HType new_type) {
+    DCHECK(new_type.IsSubtypeOf(type_));
+    type_ = new_type;
+  }
+
+  // There are HInstructions that do not really change a value, they
+  // only add pieces of information to it (like bounds checks, map checks,
+  // smi checks...).
+  // We call these instructions "informative definitions", or "iDef".
+  // One of the iDef operands is special because it is the value that is
+  // "transferred" to the output, we call it the "redefined operand".
+  // If an HValue is an iDef it must override RedefinedOperandIndex() so that
+  // it does not return kNoRedefinedOperand;
+  static const int kNoRedefinedOperand = -1;
+  virtual int RedefinedOperandIndex() { return kNoRedefinedOperand; }
+  bool IsInformativeDefinition() {
+    return RedefinedOperandIndex() != kNoRedefinedOperand;
+  }
+  HValue* RedefinedOperand() {
+    int index = RedefinedOperandIndex();
+    return index == kNoRedefinedOperand ? NULL : OperandAt(index);
+  }
+
+  bool CanReplaceWithDummyUses();
+
+  virtual int argument_delta() const { return 0; }
+
+  // A purely informative definition is an idef that will not emit code and
+  // should therefore be removed from the graph in the RestoreActualValues
+  // phase (so that live ranges will be shorter).
+  virtual bool IsPurelyInformativeDefinition() { return false; }
+
+  // This method must always return the original HValue SSA definition,
+  // regardless of any chain of iDefs of this value.
+  HValue* ActualValue() {
+    HValue* value = this;
+    int index;
+    while ((index = value->RedefinedOperandIndex()) != kNoRedefinedOperand) {
+      value = value->OperandAt(index);
+    }
+    return value;
+  }
+
+  bool IsInteger32Constant();
+  int32_t GetInteger32Constant();
+  bool EqualsInteger32Constant(int32_t value);
+
+  bool IsDefinedAfter(HBasicBlock* other) const;
+
+  // Operands.
+  virtual int OperandCount() const = 0;
+  virtual HValue* OperandAt(int index) const = 0;
+  void SetOperandAt(int index, HValue* value);
+
+  void DeleteAndReplaceWith(HValue* other);
+  void ReplaceAllUsesWith(HValue* other);
+  bool HasNoUses() const { return use_list_ == NULL; }
+  bool HasOneUse() const {
+    return use_list_ != NULL && use_list_->tail() == NULL;
+  }
+  bool HasMultipleUses() const {
+    return use_list_ != NULL && use_list_->tail() != NULL;
+  }
+  int UseCount() const;
+
+  // Mark this HValue as dead and to be removed from other HValues' use lists.
+  void Kill();
+
+  int flags() const { return flags_; }
+  void SetFlag(Flag f) { flags_ |= (1 << f); }
+  void ClearFlag(Flag f) { flags_ &= ~(1 << f); }
+  bool CheckFlag(Flag f) const { return (flags_ & (1 << f)) != 0; }
+  void CopyFlag(Flag f, HValue* other) {
+    if (other->CheckFlag(f)) SetFlag(f);
+  }
+
+  // Returns true if the flag specified is set for all uses, false otherwise.
+  bool CheckUsesForFlag(Flag f) const;
+  // Same as before and the first one without the flag is returned in value.
+  bool CheckUsesForFlag(Flag f, HValue** value) const;
+  // Returns true if the flag specified is set for all uses, and this set
+  // of uses is non-empty.
+  bool HasAtLeastOneUseWithFlagAndNoneWithout(Flag f) const;
+
+  GVNFlagSet ChangesFlags() const { return changes_flags_; }
+  GVNFlagSet DependsOnFlags() const { return depends_on_flags_; }
+  void SetChangesFlag(GVNFlag f) { changes_flags_.Add(f); }
+  void SetDependsOnFlag(GVNFlag f) { depends_on_flags_.Add(f); }
+  void ClearChangesFlag(GVNFlag f) { changes_flags_.Remove(f); }
+  void ClearDependsOnFlag(GVNFlag f) { depends_on_flags_.Remove(f); }
+  bool CheckChangesFlag(GVNFlag f) const {
+    return changes_flags_.Contains(f);
+  }
+  bool CheckDependsOnFlag(GVNFlag f) const {
+    return depends_on_flags_.Contains(f);
+  }
+  void SetAllSideEffects() { changes_flags_.Add(AllSideEffectsFlagSet()); }
+  void ClearAllSideEffects() {
+    changes_flags_.Remove(AllSideEffectsFlagSet());
+  }
+  bool HasSideEffects() const {
+    return changes_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
+  }
+  bool HasObservableSideEffects() const {
+    return !CheckFlag(kHasNoObservableSideEffects) &&
+        changes_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
+  }
+
+  GVNFlagSet SideEffectFlags() const {
+    GVNFlagSet result = ChangesFlags();
+    result.Intersect(AllSideEffectsFlagSet());
+    return result;
+  }
+
+  GVNFlagSet ObservableChangesFlags() const {
+    GVNFlagSet result = ChangesFlags();
+    result.Intersect(AllObservableSideEffectsFlagSet());
+    return result;
+  }
+
+  Range* range() const {
+    DCHECK(!range_poisoned_);
+    return range_;
+  }
+  bool HasRange() const {
+    DCHECK(!range_poisoned_);
+    return range_ != NULL;
+  }
+#ifdef DEBUG
+  void PoisonRange() { range_poisoned_ = true; }
+#endif
+  void AddNewRange(Range* r, Zone* zone);
+  void RemoveLastAddedRange();
+  void ComputeInitialRange(Zone* zone);
+
+  // Escape analysis helpers.
+  virtual bool HasEscapingOperandAt(int index) { return true; }
+  virtual bool HasOutOfBoundsAccess(int size) { return false; }
+
+  // Representation helpers.
+  virtual Representation observed_input_representation(int index) {
+    return Representation::None();
+  }
+  virtual Representation RequiredInputRepresentation(int index) = 0;
+  virtual void InferRepresentation(HInferRepresentationPhase* h_infer);
+
+  // This gives the instruction an opportunity to replace itself with an
+  // instruction that does the same in some better way.  To replace an
+  // instruction with a new one, first add the new instruction to the graph,
+  // then return it.  Return NULL to have the instruction deleted.
+  virtual HValue* Canonicalize() { return this; }
+
+  bool Equals(HValue* other);
+  virtual intptr_t Hashcode();
+
+  // Compute unique ids upfront that is safe wrt GC and concurrent compilation.
+  virtual void FinalizeUniqueness() { }
+
+  // Printing support.
+  virtual std::ostream& PrintTo(std::ostream& os) const = 0;  // NOLINT
+
+  const char* Mnemonic() const;
+
+  // Type information helpers.
+  bool HasMonomorphicJSObjectType();
+
+  // TODO(mstarzinger): For now instructions can override this function to
+  // specify statically known types, once HType can convey more information
+  // it should be based on the HType.
+  virtual Handle<Map> GetMonomorphicJSObjectMap() { return Handle<Map>(); }
+
+  // Updated the inferred type of this instruction and returns true if
+  // it has changed.
+  bool UpdateInferredType();
+
+  virtual HType CalculateInferredType();
+
+  // This function must be overridden for instructions which have the
+  // kTrackSideEffectDominators flag set, to track instructions that are
+  // dominating side effects.
+  // It returns true if it removed an instruction which had side effects.
+  virtual bool HandleSideEffectDominator(GVNFlag side_effect,
+                                         HValue* dominator) {
+    UNREACHABLE();
+    return false;
+  }
+
+  // Check if this instruction has some reason that prevents elimination.
+  bool CannotBeEliminated() const {
+    return HasObservableSideEffects() || !IsDeletable();
+  }
+
+#ifdef DEBUG
+  virtual void Verify() = 0;
+#endif
+
+  virtual bool TryDecompose(DecompositionResult* decomposition) {
+    if (RedefinedOperand() != NULL) {
+      return RedefinedOperand()->TryDecompose(decomposition);
+    } else {
+      return false;
+    }
+  }
+
+  // Returns true conservatively if the program might be able to observe a
+  // ToString() operation on this value.
+  bool ToStringCanBeObserved() const {
+    return ToStringOrToNumberCanBeObserved();
+  }
+
+  // Returns true conservatively if the program might be able to observe a
+  // ToNumber() operation on this value.
+  bool ToNumberCanBeObserved() const {
+    return ToStringOrToNumberCanBeObserved();
+  }
+
+  MinusZeroMode GetMinusZeroMode() {
+    return CheckFlag(kBailoutOnMinusZero)
+        ? FAIL_ON_MINUS_ZERO : TREAT_MINUS_ZERO_AS_ZERO;
+  }
+
+ protected:
+  // This function must be overridden for instructions with flag kUseGVN, to
+  // compare the non-Operand parts of the instruction.
+  virtual bool DataEquals(HValue* other) {
+    UNREACHABLE();
+    return false;
+  }
+
+  bool ToStringOrToNumberCanBeObserved() const {
+    if (type().IsTaggedPrimitive()) return false;
+    if (type().IsJSReceiver()) return true;
+    return !representation().IsSmiOrInteger32() && !representation().IsDouble();
+  }
+
+  virtual Representation RepresentationFromInputs() {
+    return representation();
+  }
+  virtual Representation RepresentationFromUses();
+  Representation RepresentationFromUseRequirements();
+  bool HasNonSmiUse();
+  virtual void UpdateRepresentation(Representation new_rep,
+                                    HInferRepresentationPhase* h_infer,
+                                    const char* reason);
+  void AddDependantsToWorklist(HInferRepresentationPhase* h_infer);
+
+  virtual void RepresentationChanged(Representation to) { }
+
+  virtual Range* InferRange(Zone* zone);
+  virtual void DeleteFromGraph() = 0;
+  virtual void InternalSetOperandAt(int index, HValue* value) = 0;
+  void clear_block() {
+    DCHECK(block_ != NULL);
+    block_ = NULL;
+  }
+
+  void set_representation(Representation r) {
+    DCHECK(representation_.IsNone() && !r.IsNone());
+    representation_ = r;
+  }
+
+  static GVNFlagSet AllFlagSet() {
+    GVNFlagSet result;
+#define ADD_FLAG(Type) result.Add(k##Type);
+  GVN_TRACKED_FLAG_LIST(ADD_FLAG)
+  GVN_UNTRACKED_FLAG_LIST(ADD_FLAG)
+#undef ADD_FLAG
+    return result;
+  }
+
+  // A flag mask to mark an instruction as having arbitrary side effects.
+  static GVNFlagSet AllSideEffectsFlagSet() {
+    GVNFlagSet result = AllFlagSet();
+    result.Remove(kOsrEntries);
+    return result;
+  }
+  friend std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
+
+  // A flag mask of all side effects that can make observable changes in
+  // an executing program (i.e. are not safe to repeat, move or remove);
+  static GVNFlagSet AllObservableSideEffectsFlagSet() {
+    GVNFlagSet result = AllFlagSet();
+    result.Remove(kNewSpacePromotion);
+    result.Remove(kElementsKind);
+    result.Remove(kElementsPointer);
+    result.Remove(kMaps);
+    return result;
+  }
+
+  // Remove the matching use from the use list if present.  Returns the
+  // removed list node or NULL.
+  HUseListNode* RemoveUse(HValue* value, int index);
+
+  void RegisterUse(int index, HValue* new_value);
+
+  HBasicBlock* block_;
+
+  // The id of this instruction in the hydrogen graph, assigned when first
+  // added to the graph. Reflects creation order.
+  int id_;
+
+  Representation representation_;
+  HType type_;
+  HUseListNode* use_list_;
+  Range* range_;
+#ifdef DEBUG
+  bool range_poisoned_;
+#endif
+  int flags_;
+  GVNFlagSet changes_flags_;
+  GVNFlagSet depends_on_flags_;
+
+ private:
+  virtual bool IsDeletable() const { return false; }
+
+  DISALLOW_COPY_AND_ASSIGN(HValue);
+};
+
+// Support for printing various aspects of an HValue.
+struct NameOf {
+  explicit NameOf(const HValue* const v) : value(v) {}
+  const HValue* value;
+};
+
+
+struct TypeOf {
+  explicit TypeOf(const HValue* const v) : value(v) {}
+  const HValue* value;
+};
+
+
+struct ChangesOf {
+  explicit ChangesOf(const HValue* const v) : value(v) {}
+  const HValue* value;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const HValue& v);
+std::ostream& operator<<(std::ostream& os, const NameOf& v);
+std::ostream& operator<<(std::ostream& os, const TypeOf& v);
+std::ostream& operator<<(std::ostream& os, const ChangesOf& v);
+
+
+#define DECLARE_INSTRUCTION_FACTORY_P0(I)                        \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
+    return new (zone) I();                                       \
+  }
+
+#define DECLARE_INSTRUCTION_FACTORY_P1(I, P1)                           \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \
+    return new (zone) I(p1);                                            \
+  }
+
+#define DECLARE_INSTRUCTION_FACTORY_P2(I, P1, P2)                              \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \
+    return new (zone) I(p1, p2);                                               \
+  }
+
+#define DECLARE_INSTRUCTION_FACTORY_P3(I, P1, P2, P3)                        \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3) {                                                     \
+    return new (zone) I(p1, p2, p3);                                         \
+  }
+
+#define DECLARE_INSTRUCTION_FACTORY_P4(I, P1, P2, P3, P4)                    \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3, P4 p4) {                                              \
+    return new (zone) I(p1, p2, p3, p4);                                     \
+  }
+
+#define DECLARE_INSTRUCTION_FACTORY_P5(I, P1, P2, P3, P4, P5)                \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3, P4 p4, P5 p5) {                                       \
+    return new (zone) I(p1, p2, p3, p4, p5);                                 \
+  }
+
+#define DECLARE_INSTRUCTION_FACTORY_P6(I, P1, P2, P3, P4, P5, P6)            \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3, P4 p4, P5 p5, P6 p6) {                                \
+    return new (zone) I(p1, p2, p3, p4, p5, p6);                             \
+  }
+
+#define DECLARE_INSTRUCTION_FACTORY_P7(I, P1, P2, P3, P4, P5, P6, P7)        \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {                         \
+    return new (zone) I(p1, p2, p3, p4, p5, p6, p7);                         \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P0(I)           \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context) { \
+    return new (zone) I(context);                                \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(I, P1)              \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1) { \
+    return new (zone) I(context, p1);                                   \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(I, P1, P2)                 \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2) { \
+    return new (zone) I(context, p1, p2);                                      \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(I, P1, P2, P3)           \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3) {                                                     \
+    return new (zone) I(context, p1, p2, p3);                                \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(I, P1, P2, P3, P4)       \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3, P4 p4) {                                              \
+    return new (zone) I(context, p1, p2, p3, p4);                            \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(I, P1, P2, P3, P4, P5)   \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2, \
+                P3 p3, P4 p4, P5 p5) {                                       \
+    return new (zone) I(context, p1, p2, p3, p4, p5);                        \
+  }
+
+#define DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(I, P1, P2, P3, P4, P5, P6) \
+  static I* New(Isolate* isolate, Zone* zone, HValue* context, P1 p1, P2 p2,   \
+                P3 p3, P4 p4, P5 p5, P6 p6) {                                  \
+    return new (zone) I(context, p1, p2, p3, p4, p5, p6);                      \
+  }
+
+
+// A helper class to represent per-operand position information attached to
+// the HInstruction in the compact form. Uses tagging to distinguish between
+// case when only instruction's position is available and case when operands'
+// positions are also available.
+// In the first case it contains intruction's position as a tagged value.
+// In the second case it points to an array which contains instruction's
+// position and operands' positions.
+class HPositionInfo {
+ public:
+  explicit HPositionInfo(int pos) : data_(TagPosition(pos)) { }
+
+  SourcePosition position() const {
+    if (has_operand_positions()) {
+      return operand_positions()[kInstructionPosIndex];
+    }
+    return SourcePosition::FromRaw(static_cast<int>(UntagPosition(data_)));
+  }
+
+  void set_position(SourcePosition pos) {
+    if (has_operand_positions()) {
+      operand_positions()[kInstructionPosIndex] = pos;
+    } else {
+      data_ = TagPosition(pos.raw());
+    }
+  }
+
+  void ensure_storage_for_operand_positions(Zone* zone, int operand_count) {
+    if (has_operand_positions()) {
+      return;
+    }
+
+    const int length = kFirstOperandPosIndex + operand_count;
+    SourcePosition* positions = zone->NewArray<SourcePosition>(length);
+    for (int i = 0; i < length; i++) {
+      positions[i] = SourcePosition::Unknown();
+    }
+
+    const SourcePosition pos = position();
+    data_ = reinterpret_cast<intptr_t>(positions);
+    set_position(pos);
+
+    DCHECK(has_operand_positions());
+  }
+
+  SourcePosition operand_position(int idx) const {
+    if (!has_operand_positions()) {
+      return position();
+    }
+    return *operand_position_slot(idx);
+  }
+
+  void set_operand_position(int idx, SourcePosition pos) {
+    *operand_position_slot(idx) = pos;
+  }
+
+ private:
+  static const intptr_t kInstructionPosIndex = 0;
+  static const intptr_t kFirstOperandPosIndex = 1;
+
+  SourcePosition* operand_position_slot(int idx) const {
+    DCHECK(has_operand_positions());
+    return &(operand_positions()[kFirstOperandPosIndex + idx]);
+  }
+
+  bool has_operand_positions() const {
+    return !IsTaggedPosition(data_);
+  }
+
+  SourcePosition* operand_positions() const {
+    DCHECK(has_operand_positions());
+    return reinterpret_cast<SourcePosition*>(data_);
+  }
+
+  static const intptr_t kPositionTag = 1;
+  static const intptr_t kPositionShift = 1;
+  static bool IsTaggedPosition(intptr_t val) {
+    return (val & kPositionTag) != 0;
+  }
+  static intptr_t UntagPosition(intptr_t val) {
+    DCHECK(IsTaggedPosition(val));
+    return val >> kPositionShift;
+  }
+  static intptr_t TagPosition(intptr_t val) {
+    const intptr_t result = (val << kPositionShift) | kPositionTag;
+    DCHECK(UntagPosition(result) == val);
+    return result;
+  }
+
+  intptr_t data_;
+};
+
+
+class HInstruction : public HValue {
+ public:
+  HInstruction* next() const { return next_; }
+  HInstruction* previous() const { return previous_; }
+
+  std::ostream& PrintTo(std::ostream& os) const override;          // NOLINT
+  virtual std::ostream& PrintDataTo(std::ostream& os) const;       // NOLINT
+
+  bool IsLinked() const { return block() != NULL; }
+  void Unlink();
+
+  void InsertBefore(HInstruction* next);
+
+  template<class T> T* Prepend(T* instr) {
+    instr->InsertBefore(this);
+    return instr;
+  }
+
+  void InsertAfter(HInstruction* previous);
+
+  template<class T> T* Append(T* instr) {
+    instr->InsertAfter(this);
+    return instr;
+  }
+
+  // The position is a write-once variable.
+  SourcePosition position() const override {
+    return SourcePosition(position_.position());
+  }
+  bool has_position() const {
+    return !position().IsUnknown();
+  }
+  void set_position(SourcePosition position) {
+    DCHECK(!has_position());
+    DCHECK(!position.IsUnknown());
+    position_.set_position(position);
+  }
+
+  SourcePosition operand_position(int index) const override {
+    const SourcePosition pos = position_.operand_position(index);
+    return pos.IsUnknown() ? position() : pos;
+  }
+  void set_operand_position(Zone* zone, int index, SourcePosition pos) {
+    DCHECK(0 <= index && index < OperandCount());
+    position_.ensure_storage_for_operand_positions(zone, OperandCount());
+    position_.set_operand_position(index, pos);
+  }
+
+  bool Dominates(HInstruction* other);
+  bool CanTruncateToSmi() const { return CheckFlag(kTruncatingToSmi); }
+  bool CanTruncateToInt32() const { return CheckFlag(kTruncatingToInt32); }
+
+  virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
+
+#ifdef DEBUG
+  void Verify() override;
+#endif
+
+  bool CanDeoptimize();
+
+  virtual bool HasStackCheck() { return false; }
+
+  DECLARE_ABSTRACT_INSTRUCTION(Instruction)
+
+ protected:
+  explicit HInstruction(HType type = HType::Tagged())
+      : HValue(type),
+        next_(NULL),
+        previous_(NULL),
+        position_(RelocInfo::kNoPosition) {
+    SetDependsOnFlag(kOsrEntries);
+  }
+
+  void DeleteFromGraph() override { Unlink(); }
+
+ private:
+  void InitializeAsFirst(HBasicBlock* block) {
+    DCHECK(!IsLinked());
+    SetBlock(block);
+  }
+
+  HInstruction* next_;
+  HInstruction* previous_;
+  HPositionInfo position_;
+
+  friend class HBasicBlock;
+};
+
+
+template<int V>
+class HTemplateInstruction : public HInstruction {
+ public:
+  int OperandCount() const final { return V; }
+  HValue* OperandAt(int i) const final { return inputs_[i]; }
+
+ protected:
+  explicit HTemplateInstruction(HType type = HType::Tagged())
+      : HInstruction(type) {}
+
+  void InternalSetOperandAt(int i, HValue* value) final { inputs_[i] = value; }
+
+ private:
+  EmbeddedContainer<HValue*, V> inputs_;
+};
+
+
+class HControlInstruction : public HInstruction {
+ public:
+  virtual HBasicBlock* SuccessorAt(int i) const = 0;
+  virtual int SuccessorCount() const = 0;
+  virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) {
+    *block = NULL;
+    return false;
+  }
+
+  HBasicBlock* FirstSuccessor() {
+    return SuccessorCount() > 0 ? SuccessorAt(0) : NULL;
+  }
+  HBasicBlock* SecondSuccessor() {
+    return SuccessorCount() > 1 ? SuccessorAt(1) : NULL;
+  }
+
+  void Not() {
+    HBasicBlock* swap = SuccessorAt(0);
+    SetSuccessorAt(0, SuccessorAt(1));
+    SetSuccessorAt(1, swap);
+  }
+
+  DECLARE_ABSTRACT_INSTRUCTION(ControlInstruction)
+};
+
+
+class HSuccessorIterator final BASE_EMBEDDED {
+ public:
+  explicit HSuccessorIterator(const HControlInstruction* instr)
+      : instr_(instr), current_(0) {}
+
+  bool Done() { return current_ >= instr_->SuccessorCount(); }
+  HBasicBlock* Current() { return instr_->SuccessorAt(current_); }
+  void Advance() { current_++; }
+
+ private:
+  const HControlInstruction* instr_;
+  int current_;
+};
+
+
+template<int S, int V>
+class HTemplateControlInstruction : public HControlInstruction {
+ public:
+  int SuccessorCount() const override { return S; }
+  HBasicBlock* SuccessorAt(int i) const override { return successors_[i]; }
+  void SetSuccessorAt(int i, HBasicBlock* block) override {
+    successors_[i] = block;
+  }
+
+  int OperandCount() const override { return V; }
+  HValue* OperandAt(int i) const override { return inputs_[i]; }
+
+
+ protected:
+  void InternalSetOperandAt(int i, HValue* value) override {
+    inputs_[i] = value;
+  }
+
+ private:
+  EmbeddedContainer<HBasicBlock*, S> successors_;
+  EmbeddedContainer<HValue*, V> inputs_;
+};
+
+
+class HBlockEntry final : public HTemplateInstruction<0> {
+ public:
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(BlockEntry)
+};
+
+
+class HDummyUse final : public HTemplateInstruction<1> {
+ public:
+  explicit HDummyUse(HValue* value)
+      : HTemplateInstruction<1>(HType::Smi()) {
+    SetOperandAt(0, value);
+    // Pretend to be a Smi so that the HChange instructions inserted
+    // before any use generate as little code as possible.
+    set_representation(Representation::Tagged());
+  }
+
+  HValue* value() const { return OperandAt(0); }
+
+  bool HasEscapingOperandAt(int index) override { return false; }
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse);
+};
+
+
+// Inserts an int3/stop break instruction for debugging purposes.
+class HDebugBreak final : public HTemplateInstruction<0> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak)
+};
+
+
+class HPrologue final : public HTemplateInstruction<0> {
+ public:
+  static HPrologue* New(Zone* zone) { return new (zone) HPrologue(); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Prologue)
+};
+
+
+class HGoto final : public HTemplateControlInstruction<1, 0> {
+ public:
+  explicit HGoto(HBasicBlock* target) {
+    SetSuccessorAt(0, target);
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override {
+    *block = FirstSuccessor();
+    return true;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(Goto)
+};
+
+
+class HDeoptimize final : public HTemplateControlInstruction<1, 0> {
+ public:
+  static HDeoptimize* New(Isolate* isolate, Zone* zone, HValue* context,
+                          Deoptimizer::DeoptReason reason,
+                          Deoptimizer::BailoutType type,
+                          HBasicBlock* unreachable_continuation) {
+    return new(zone) HDeoptimize(reason, type, unreachable_continuation);
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override {
+    *block = NULL;
+    return true;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  Deoptimizer::DeoptReason reason() const { return reason_; }
+  Deoptimizer::BailoutType type() { return type_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize)
+
+ private:
+  explicit HDeoptimize(Deoptimizer::DeoptReason reason,
+                       Deoptimizer::BailoutType type,
+                       HBasicBlock* unreachable_continuation)
+      : reason_(reason), type_(type) {
+    SetSuccessorAt(0, unreachable_continuation);
+  }
+
+  Deoptimizer::DeoptReason reason_;
+  Deoptimizer::BailoutType type_;
+};
+
+
+class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
+ public:
+  HUnaryControlInstruction(HValue* value,
+                           HBasicBlock* true_target,
+                           HBasicBlock* false_target) {
+    SetOperandAt(0, value);
+    SetSuccessorAt(0, true_target);
+    SetSuccessorAt(1, false_target);
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HValue* value() const { return OperandAt(0); }
+};
+
+
+class HBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
+                                 ToBooleanStub::Types);
+  DECLARE_INSTRUCTION_FACTORY_P4(HBranch, HValue*,
+                                 ToBooleanStub::Types,
+                                 HBasicBlock*, HBasicBlock*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+  Representation observed_input_representation(int index) override;
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  ToBooleanStub::Types expected_input_types() const {
+    return expected_input_types_;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch)
+
+ private:
+  HBranch(HValue* value,
+          ToBooleanStub::Types expected_input_types = ToBooleanStub::Types(),
+          HBasicBlock* true_target = NULL,
+          HBasicBlock* false_target = NULL)
+      : HUnaryControlInstruction(value, true_target, false_target),
+        expected_input_types_(expected_input_types) {
+    SetFlag(kAllowUndefinedAsNaN);
+  }
+
+  ToBooleanStub::Types expected_input_types_;
+};
+
+
+class HCompareMap final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
+  DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
+                                 HBasicBlock*, HBasicBlock*);
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override {
+    if (known_successor_index() != kNoKnownSuccessorIndex) {
+      *block = SuccessorAt(known_successor_index());
+      return true;
+    }
+    *block = NULL;
+    return false;
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  static const int kNoKnownSuccessorIndex = -1;
+  int known_successor_index() const {
+    return KnownSuccessorIndexField::decode(bit_field_) -
+           kInternalKnownSuccessorOffset;
+  }
+  void set_known_successor_index(int index) {
+    DCHECK(index >= 0 - kInternalKnownSuccessorOffset);
+    bit_field_ = KnownSuccessorIndexField::update(
+        bit_field_, index + kInternalKnownSuccessorOffset);
+  }
+
+  Unique<Map> map() const { return map_; }
+  bool map_is_stable() const { return MapIsStableField::decode(bit_field_); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMap)
+
+ protected:
+  int RedefinedOperandIndex() override { return 0; }
+
+ private:
+  HCompareMap(HValue* value, Handle<Map> map, HBasicBlock* true_target = NULL,
+              HBasicBlock* false_target = NULL)
+      : HUnaryControlInstruction(value, true_target, false_target),
+        bit_field_(KnownSuccessorIndexField::encode(
+                       kNoKnownSuccessorIndex + kInternalKnownSuccessorOffset) |
+                   MapIsStableField::encode(map->is_stable())),
+        map_(Unique<Map>::CreateImmovable(map)) {
+    set_representation(Representation::Tagged());
+  }
+
+  // BitFields can only store unsigned values, so use an offset.
+  // Adding kInternalKnownSuccessorOffset must yield an unsigned value.
+  static const int kInternalKnownSuccessorOffset = 1;
+  STATIC_ASSERT(kNoKnownSuccessorIndex + kInternalKnownSuccessorOffset >= 0);
+
+  class KnownSuccessorIndexField : public BitField<int, 0, 31> {};
+  class MapIsStableField : public BitField<bool, 31, 1> {};
+
+  uint32_t bit_field_;
+  Unique<Map> map_;
+};
+
+
+class HContext final : public HTemplateInstruction<0> {
+ public:
+  static HContext* New(Zone* zone) {
+    return new(zone) HContext();
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Context)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HContext() {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HReturn final : public HTemplateControlInstruction<0, 3> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    // TODO(titzer): require an Int32 input for faster returns.
+    if (index == 2) return Representation::Smi();
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HValue* value() const { return OperandAt(0); }
+  HValue* context() const { return OperandAt(1); }
+  HValue* parameter_count() const { return OperandAt(2); }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return)
+
+ private:
+  HReturn(HValue* context, HValue* value, HValue* parameter_count = 0) {
+    SetOperandAt(0, value);
+    SetOperandAt(1, context);
+    SetOperandAt(2, parameter_count);
+  }
+};
+
+
+class HAbnormalExit final : public HTemplateControlInstruction<0, 0> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AbnormalExit)
+ private:
+  HAbnormalExit() {}
+};
+
+
+class HUnaryOperation : public HTemplateInstruction<1> {
+ public:
+  explicit HUnaryOperation(HValue* value, HType type = HType::Tagged())
+      : HTemplateInstruction<1>(type) {
+    SetOperandAt(0, value);
+  }
+
+  static HUnaryOperation* cast(HValue* value) {
+    return reinterpret_cast<HUnaryOperation*>(value);
+  }
+
+  HValue* value() const { return OperandAt(0); }
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+};
+
+
+class HUseConst final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(UseConst)
+
+ private:
+    explicit HUseConst(HValue* old_value) : HUnaryOperation(old_value) { }
+};
+
+
+class HForceRepresentation final : public HTemplateInstruction<1> {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* value,
+                           Representation required_representation);
+
+  HValue* value() const { return OperandAt(0); }
+
+  Representation observed_input_representation(int index) override {
+    // We haven't actually *observed* this, but it's closer to the truth
+    // than 'None'.
+    return representation();  // Same as the output representation.
+  }
+  Representation RequiredInputRepresentation(int index) override {
+    return representation();  // Same as the output representation.
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
+
+ private:
+  HForceRepresentation(HValue* value, Representation required_representation) {
+    SetOperandAt(0, value);
+    set_representation(required_representation);
+  }
+};
+
+
+class HChange final : public HUnaryOperation {
+ public:
+  HChange(HValue* value,
+          Representation to,
+          bool is_truncating_to_smi,
+          bool is_truncating_to_int32)
+      : HUnaryOperation(value) {
+    DCHECK(!value->representation().IsNone());
+    DCHECK(!to.IsNone());
+    DCHECK(!value->representation().Equals(to));
+    set_representation(to);
+    SetFlag(kUseGVN);
+    SetFlag(kCanOverflow);
+    if (is_truncating_to_smi && to.IsSmi()) {
+      SetFlag(kTruncatingToSmi);
+      SetFlag(kTruncatingToInt32);
+    }
+    if (is_truncating_to_int32) SetFlag(kTruncatingToInt32);
+    if (value->representation().IsSmi() || value->type().IsSmi()) {
+      set_type(HType::Smi());
+    } else {
+      set_type(HType::TaggedNumber());
+      if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
+    }
+  }
+
+  bool can_convert_undefined_to_nan() {
+    return CheckUsesForFlag(kAllowUndefinedAsNaN);
+  }
+
+  HType CalculateInferredType() override;
+  HValue* Canonicalize() override;
+
+  Representation from() const { return value()->representation(); }
+  Representation to() const { return representation(); }
+  bool deoptimize_on_minus_zero() const {
+    return CheckFlag(kBailoutOnMinusZero);
+  }
+  Representation RequiredInputRepresentation(int index) override {
+    return from();
+  }
+
+  Range* InferRange(Zone* zone) override;
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(Change)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  bool IsDeletable() const override {
+    return !from().IsTagged() || value()->type().IsSmi();
+  }
+};
+
+
+class HClampToUint8 final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HClampToUint8(HValue* value)
+      : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kAllowUndefinedAsNaN);
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HDoubleBits final : public HUnaryOperation {
+ public:
+  enum Bits { HIGH, LOW };
+  DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Double();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits)
+
+  Bits bits() { return bits_; }
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
+  }
+
+ private:
+  HDoubleBits(HValue* value, Bits bits)
+      : HUnaryOperation(value), bits_(bits) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return true; }
+
+  Bits bits_;
+};
+
+
+class HConstructDouble final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Integer32();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble)
+
+  HValue* hi() { return OperandAt(0); }
+  HValue* lo() { return OperandAt(1); }
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HConstructDouble(HValue* hi, HValue* lo) {
+    set_representation(Representation::Double());
+    SetFlag(kUseGVN);
+    SetOperandAt(0, hi);
+    SetOperandAt(1, lo);
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+enum RemovableSimulate {
+  REMOVABLE_SIMULATE,
+  FIXED_SIMULATE
+};
+
+
+class HSimulate final : public HInstruction {
+ public:
+  HSimulate(BailoutId ast_id, int pop_count, Zone* zone,
+            RemovableSimulate removable)
+      : ast_id_(ast_id),
+        pop_count_(pop_count),
+        values_(2, zone),
+        assigned_indexes_(2, zone),
+        zone_(zone),
+        bit_field_(RemovableField::encode(removable) |
+                   DoneWithReplayField::encode(false)) {}
+  ~HSimulate() {}
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  bool HasAstId() const { return !ast_id_.IsNone(); }
+  BailoutId ast_id() const { return ast_id_; }
+  void set_ast_id(BailoutId id) {
+    DCHECK(!HasAstId());
+    ast_id_ = id;
+  }
+
+  int pop_count() const { return pop_count_; }
+  const ZoneList<HValue*>* values() const { return &values_; }
+  int GetAssignedIndexAt(int index) const {
+    DCHECK(HasAssignedIndexAt(index));
+    return assigned_indexes_[index];
+  }
+  bool HasAssignedIndexAt(int index) const {
+    return assigned_indexes_[index] != kNoIndex;
+  }
+  void AddAssignedValue(int index, HValue* value) {
+    AddValue(index, value);
+  }
+  void AddPushedValue(HValue* value) {
+    AddValue(kNoIndex, value);
+  }
+  int ToOperandIndex(int environment_index) {
+    for (int i = 0; i < assigned_indexes_.length(); ++i) {
+      if (assigned_indexes_[i] == environment_index) return i;
+    }
+    return -1;
+  }
+  int OperandCount() const override { return values_.length(); }
+  HValue* OperandAt(int index) const override { return values_[index]; }
+
+  bool HasEscapingOperandAt(int index) override { return false; }
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  void MergeWith(ZoneList<HSimulate*>* list);
+  bool is_candidate_for_removal() {
+    return RemovableField::decode(bit_field_) == REMOVABLE_SIMULATE;
+  }
+
+  // Replay effects of this instruction on the given environment.
+  void ReplayEnvironment(HEnvironment* env);
+
+  DECLARE_CONCRETE_INSTRUCTION(Simulate)
+
+#ifdef DEBUG
+  void Verify() override;
+  void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
+  Handle<JSFunction> closure() const { return closure_; }
+#endif
+
+ protected:
+  void InternalSetOperandAt(int index, HValue* value) override {
+    values_[index] = value;
+  }
+
+ private:
+  static const int kNoIndex = -1;
+  void AddValue(int index, HValue* value) {
+    assigned_indexes_.Add(index, zone_);
+    // Resize the list of pushed values.
+    values_.Add(NULL, zone_);
+    // Set the operand through the base method in HValue to make sure that the
+    // use lists are correctly updated.
+    SetOperandAt(values_.length() - 1, value);
+  }
+  bool HasValueForIndex(int index) {
+    for (int i = 0; i < assigned_indexes_.length(); ++i) {
+      if (assigned_indexes_[i] == index) return true;
+    }
+    return false;
+  }
+  bool is_done_with_replay() const {
+    return DoneWithReplayField::decode(bit_field_);
+  }
+  void set_done_with_replay() {
+    bit_field_ = DoneWithReplayField::update(bit_field_, true);
+  }
+
+  class RemovableField : public BitField<RemovableSimulate, 0, 1> {};
+  class DoneWithReplayField : public BitField<bool, 1, 1> {};
+
+  BailoutId ast_id_;
+  int pop_count_;
+  ZoneList<HValue*> values_;
+  ZoneList<int> assigned_indexes_;
+  Zone* zone_;
+  uint32_t bit_field_;
+
+#ifdef DEBUG
+  Handle<JSFunction> closure_;
+#endif
+};
+
+
+class HEnvironmentMarker final : public HTemplateInstruction<1> {
+ public:
+  enum Kind { BIND, LOOKUP };
+
+  DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int);
+
+  Kind kind() const { return kind_; }
+  int index() const { return index_; }
+  HSimulate* next_simulate() { return next_simulate_; }
+  void set_next_simulate(HSimulate* simulate) {
+    next_simulate_ = simulate;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+#ifdef DEBUG
+  void set_closure(Handle<JSFunction> closure) {
+    DCHECK(closure_.is_null());
+    DCHECK(!closure.is_null());
+    closure_ = closure;
+  }
+  Handle<JSFunction> closure() const { return closure_; }
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(EnvironmentMarker);
+
+ private:
+  HEnvironmentMarker(Kind kind, int index)
+      : kind_(kind), index_(index), next_simulate_(NULL) { }
+
+  Kind kind_;
+  int index_;
+  HSimulate* next_simulate_;
+
+#ifdef DEBUG
+  Handle<JSFunction> closure_;
+#endif
+};
+
+
+class HStackCheck final : public HTemplateInstruction<1> {
+ public:
+  enum Type {
+    kFunctionEntry,
+    kBackwardsBranch
+  };
+
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HStackCheck, Type);
+
+  HValue* context() { return OperandAt(0); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  void Eliminate() {
+    // The stack check eliminator might try to eliminate the same stack
+    // check instruction multiple times.
+    if (IsLinked()) {
+      DeleteAndReplaceWith(NULL);
+    }
+  }
+
+  bool is_function_entry() { return type_ == kFunctionEntry; }
+  bool is_backwards_branch() { return type_ == kBackwardsBranch; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck)
+
+ private:
+  HStackCheck(HValue* context, Type type) : type_(type) {
+    SetOperandAt(0, context);
+    SetChangesFlag(kNewSpacePromotion);
+  }
+
+  Type type_;
+};
+
+
+enum InliningKind {
+  NORMAL_RETURN,          // Drop the function from the environment on return.
+  CONSTRUCT_CALL_RETURN,  // Either use allocated receiver or return value.
+  GETTER_CALL_RETURN,     // Returning from a getter, need to restore context.
+  SETTER_CALL_RETURN      // Use the RHS of the assignment as the return value.
+};
+
+
+class HArgumentsObject;
+class HConstant;
+
+
+class HEnterInlined final : public HTemplateInstruction<0> {
+ public:
+  static HEnterInlined* New(Isolate* isolate, Zone* zone, HValue* context,
+                            BailoutId return_id, Handle<JSFunction> closure,
+                            HConstant* closure_context, int arguments_count,
+                            FunctionLiteral* function,
+                            InliningKind inlining_kind, Variable* arguments_var,
+                            HArgumentsObject* arguments_object) {
+    return new (zone) HEnterInlined(return_id, closure, closure_context,
+                                    arguments_count, function, inlining_kind,
+                                    arguments_var, arguments_object, zone);
+  }
+
+  void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
+  ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Handle<SharedFunctionInfo> shared() const { return shared_; }
+  Handle<JSFunction> closure() const { return closure_; }
+  HConstant* closure_context() const { return closure_context_; }
+  int arguments_count() const { return arguments_count_; }
+  bool arguments_pushed() const { return arguments_pushed_; }
+  void set_arguments_pushed() { arguments_pushed_ = true; }
+  FunctionLiteral* function() const { return function_; }
+  InliningKind inlining_kind() const { return inlining_kind_; }
+  BailoutId ReturnId() const { return return_id_; }
+  int inlining_id() const { return inlining_id_; }
+  void set_inlining_id(int inlining_id) { inlining_id_ = inlining_id; }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  Variable* arguments_var() { return arguments_var_; }
+  HArgumentsObject* arguments_object() { return arguments_object_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
+
+ private:
+  HEnterInlined(BailoutId return_id, Handle<JSFunction> closure,
+                HConstant* closure_context, int arguments_count,
+                FunctionLiteral* function, InliningKind inlining_kind,
+                Variable* arguments_var, HArgumentsObject* arguments_object,
+                Zone* zone)
+      : return_id_(return_id),
+        shared_(handle(closure->shared())),
+        closure_(closure),
+        closure_context_(closure_context),
+        arguments_count_(arguments_count),
+        arguments_pushed_(false),
+        function_(function),
+        inlining_kind_(inlining_kind),
+        inlining_id_(0),
+        arguments_var_(arguments_var),
+        arguments_object_(arguments_object),
+        return_targets_(2, zone) {}
+
+  BailoutId return_id_;
+  Handle<SharedFunctionInfo> shared_;
+  Handle<JSFunction> closure_;
+  HConstant* closure_context_;
+  int arguments_count_;
+  bool arguments_pushed_;
+  FunctionLiteral* function_;
+  InliningKind inlining_kind_;
+  int inlining_id_;
+  Variable* arguments_var_;
+  HArgumentsObject* arguments_object_;
+  ZoneList<HBasicBlock*> return_targets_;
+};
+
+
+class HLeaveInlined final : public HTemplateInstruction<0> {
+ public:
+  HLeaveInlined(HEnterInlined* entry,
+                int drop_count)
+      : entry_(entry),
+        drop_count_(drop_count) { }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  int argument_delta() const override {
+    return entry_->arguments_pushed() ? -drop_count_ : 0;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LeaveInlined)
+
+ private:
+  HEnterInlined* entry_;
+  int drop_count_;
+};
+
+
+class HPushArguments final : public HInstruction {
+ public:
+  static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context) {
+    return new(zone) HPushArguments(zone);
+  }
+  static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+                             HValue* arg1) {
+    HPushArguments* instr = new(zone) HPushArguments(zone);
+    instr->AddInput(arg1);
+    return instr;
+  }
+  static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+                             HValue* arg1, HValue* arg2) {
+    HPushArguments* instr = new(zone) HPushArguments(zone);
+    instr->AddInput(arg1);
+    instr->AddInput(arg2);
+    return instr;
+  }
+  static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+                             HValue* arg1, HValue* arg2, HValue* arg3) {
+    HPushArguments* instr = new(zone) HPushArguments(zone);
+    instr->AddInput(arg1);
+    instr->AddInput(arg2);
+    instr->AddInput(arg3);
+    return instr;
+  }
+  static HPushArguments* New(Isolate* isolate, Zone* zone, HValue* context,
+                             HValue* arg1, HValue* arg2, HValue* arg3,
+                             HValue* arg4) {
+    HPushArguments* instr = new(zone) HPushArguments(zone);
+    instr->AddInput(arg1);
+    instr->AddInput(arg2);
+    instr->AddInput(arg3);
+    instr->AddInput(arg4);
+    return instr;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  int argument_delta() const override { return inputs_.length(); }
+  HValue* argument(int i) { return OperandAt(i); }
+
+  int OperandCount() const final { return inputs_.length(); }
+  HValue* OperandAt(int i) const final { return inputs_[i]; }
+
+  void AddInput(HValue* value);
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArguments)
+
+ protected:
+  void InternalSetOperandAt(int i, HValue* value) final { inputs_[i] = value; }
+
+ private:
+  explicit HPushArguments(Zone* zone)
+      : HInstruction(HType::Tagged()), inputs_(4, zone) {
+    set_representation(Representation::Tagged());
+  }
+
+  ZoneList<HValue*> inputs_;
+};
+
+
+class HThisFunction final : public HTemplateInstruction<0> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HThisFunction() {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HDeclareGlobals final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
+                                              Handle<FixedArray>,
+                                              int);
+
+  HValue* context() { return OperandAt(0); }
+  Handle<FixedArray> pairs() const { return pairs_; }
+  int flags() const { return flags_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+ private:
+  HDeclareGlobals(HValue* context,
+                  Handle<FixedArray> pairs,
+                  int flags)
+      : HUnaryOperation(context),
+        pairs_(pairs),
+        flags_(flags) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  Handle<FixedArray> pairs_;
+  int flags_;
+};
+
+
+template <int V>
+class HCall : public HTemplateInstruction<V> {
+ public:
+  // The argument count includes the receiver.
+  explicit HCall<V>(int argument_count) : argument_count_(argument_count) {
+    this->set_representation(Representation::Tagged());
+    this->SetAllSideEffects();
+  }
+
+  HType CalculateInferredType() final { return HType::Tagged(); }
+
+  virtual int argument_count() const {
+    return argument_count_;
+  }
+
+  int argument_delta() const override { return -argument_count(); }
+
+ private:
+  int argument_count_;
+};
+
+
+class HUnaryCall : public HCall<1> {
+ public:
+  HUnaryCall(HValue* value, int argument_count)
+      : HCall<1>(argument_count) {
+    SetOperandAt(0, value);
+  }
+
+  Representation RequiredInputRepresentation(int index) final {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HValue* value() const { return OperandAt(0); }
+};
+
+
+class HBinaryCall : public HCall<2> {
+ public:
+  HBinaryCall(HValue* first, HValue* second, int argument_count)
+      : HCall<2>(argument_count) {
+    SetOperandAt(0, first);
+    SetOperandAt(1, second);
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) final {
+    return Representation::Tagged();
+  }
+
+  HValue* first() const { return OperandAt(0); }
+  HValue* second() const { return OperandAt(1); }
+};
+
+
+class HCallJSFunction final : public HCall<1> {
+ public:
+  static HCallJSFunction* New(Isolate* isolate, Zone* zone, HValue* context,
+                              HValue* function, int argument_count);
+
+  HValue* function() const { return OperandAt(0); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) final {
+    DCHECK(index == 0);
+    return Representation::Tagged();
+  }
+
+  bool HasStackCheck() final { return has_stack_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
+
+ private:
+  // The argument count includes the receiver.
+  HCallJSFunction(HValue* function,
+                  int argument_count,
+                  bool has_stack_check)
+      : HCall<1>(argument_count),
+        has_stack_check_(has_stack_check) {
+      SetOperandAt(0, function);
+  }
+
+  bool has_stack_check_;
+};
+
+
+enum CallMode { NORMAL_CALL, TAIL_CALL };
+
+
+class HCallWithDescriptor final : public HInstruction {
+ public:
+  static HCallWithDescriptor* New(Isolate* isolate, Zone* zone, HValue* context,
+                                  HValue* target, int argument_count,
+                                  CallInterfaceDescriptor descriptor,
+                                  const Vector<HValue*>& operands,
+                                  CallMode call_mode = NORMAL_CALL) {
+    HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
+        target, argument_count, descriptor, operands, call_mode, zone);
+    DCHECK(operands.length() == res->GetParameterCount());
+    return res;
+  }
+
+  int OperandCount() const final { return values_.length(); }
+  HValue* OperandAt(int index) const final { return values_[index]; }
+
+  Representation RequiredInputRepresentation(int index) final {
+    if (index == 0 || index == 1) {
+      // Target + context
+      return Representation::Tagged();
+    } else {
+      int par_index = index - 2;
+      DCHECK(par_index < GetParameterCount());
+      return RepresentationFromType(descriptor_.GetParameterType(par_index));
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
+
+  HType CalculateInferredType() final { return HType::Tagged(); }
+
+  bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
+
+  virtual int argument_count() const {
+    return argument_count_;
+  }
+
+  int argument_delta() const override { return -argument_count_; }
+
+  CallInterfaceDescriptor descriptor() const { return descriptor_; }
+
+  HValue* target() {
+    return OperandAt(0);
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+ private:
+  // The argument count includes the receiver.
+  HCallWithDescriptor(HValue* target, int argument_count,
+                      CallInterfaceDescriptor descriptor,
+                      const Vector<HValue*>& operands, CallMode call_mode,
+                      Zone* zone)
+      : descriptor_(descriptor),
+        values_(GetParameterCount() + 1, zone),
+        argument_count_(argument_count),
+        call_mode_(call_mode) {
+    // We can only tail call without any stack arguments.
+    DCHECK(call_mode != TAIL_CALL || argument_count == 0);
+    AddOperand(target, zone);
+    for (int i = 0; i < operands.length(); i++) {
+      AddOperand(operands[i], zone);
+    }
+    this->set_representation(Representation::Tagged());
+    this->SetAllSideEffects();
+  }
+
+  void AddOperand(HValue* v, Zone* zone) {
+    values_.Add(NULL, zone);
+    SetOperandAt(values_.length() - 1, v);
+  }
+
+  int GetParameterCount() const {
+    return descriptor_.GetRegisterParameterCount() + 1;
+  }
+
+  void InternalSetOperandAt(int index, HValue* value) final {
+    values_[index] = value;
+  }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<HValue*> values_;
+  int argument_count_;
+  CallMode call_mode_;
+};
+
+
+class HInvokeFunction final : public HBinaryCall {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
+
+  HInvokeFunction(HValue* context,
+                  HValue* function,
+                  Handle<JSFunction> known_function,
+                  int argument_count)
+      : HBinaryCall(context, function, argument_count),
+        known_function_(known_function) {
+    formal_parameter_count_ =
+        known_function.is_null()
+            ? 0
+            : known_function->shared()->internal_formal_parameter_count();
+    has_stack_check_ = !known_function.is_null() &&
+        (known_function->code()->kind() == Code::FUNCTION ||
+         known_function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+  }
+
+  static HInvokeFunction* New(Isolate* isolate, Zone* zone, HValue* context,
+                              HValue* function,
+                              Handle<JSFunction> known_function,
+                              int argument_count) {
+    return new(zone) HInvokeFunction(context, function,
+                                     known_function, argument_count);
+  }
+
+  HValue* context() { return first(); }
+  HValue* function() { return second(); }
+  Handle<JSFunction> known_function() { return known_function_; }
+  int formal_parameter_count() const { return formal_parameter_count_; }
+
+  bool HasStackCheck() final { return has_stack_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
+
+ private:
+  HInvokeFunction(HValue* context, HValue* function, int argument_count)
+      : HBinaryCall(context, function, argument_count),
+        has_stack_check_(false) {
+  }
+
+  Handle<JSFunction> known_function_;
+  int formal_parameter_count_;
+  bool has_stack_check_;
+};
+
+
+class HCallFunction final : public HBinaryCall {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallFunction, HValue*, int,
+                                              ConvertReceiverMode);
+
+  HValue* context() const { return first(); }
+  HValue* function() const { return second(); }
+
+  ConvertReceiverMode convert_mode() const { return convert_mode_; }
+  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<TypeFeedbackVector> feedback_vector() const {
+    return feedback_vector_;
+  }
+  bool HasVectorAndSlot() const { return !feedback_vector_.is_null(); }
+  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+                        FeedbackVectorSlot slot) {
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction)
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  int argument_delta() const override { return -argument_count(); }
+
+ private:
+  HCallFunction(HValue* context, HValue* function, int argument_count,
+                ConvertReceiverMode convert_mode)
+      : HBinaryCall(context, function, argument_count),
+        convert_mode_(convert_mode) {}
+  Handle<TypeFeedbackVector> feedback_vector_;
+  FeedbackVectorSlot slot_;
+  ConvertReceiverMode convert_mode_;
+};
+
+
+class HCallNewArray final : public HBinaryCall {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HCallNewArray, HValue*, int,
+                                              ElementsKind,
+                                              Handle<AllocationSite>);
+
+  HValue* context() { return first(); }
+  HValue* constructor() { return second(); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  ElementsKind elements_kind() const { return elements_kind_; }
+  Handle<AllocationSite> site() const { return site_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray)
+
+ private:
+  HCallNewArray(HValue* context, HValue* constructor, int argument_count,
+                ElementsKind elements_kind, Handle<AllocationSite> site)
+      : HBinaryCall(context, constructor, argument_count),
+        elements_kind_(elements_kind),
+        site_(site) {}
+
+  ElementsKind elements_kind_;
+  Handle<AllocationSite> site_;
+};
+
+
+class HCallRuntime final : public HCall<1> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallRuntime,
+                                              const Runtime::Function*, int);
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HValue* context() { return OperandAt(0); }
+  const Runtime::Function* function() const { return c_function_; }
+  SaveFPRegsMode save_doubles() const { return save_doubles_; }
+  void set_save_doubles(SaveFPRegsMode save_doubles) {
+    save_doubles_ = save_doubles;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime)
+
+ private:
+  HCallRuntime(HValue* context, const Runtime::Function* c_function,
+               int argument_count)
+      : HCall<1>(argument_count),
+        c_function_(c_function),
+        save_doubles_(kDontSaveFPRegs) {
+    SetOperandAt(0, context);
+  }
+
+  const Runtime::Function* c_function_;
+  SaveFPRegsMode save_doubles_;
+};
+
+
+class HMapEnumLength final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HMapEnumLength(HValue* value)
+      : HUnaryOperation(value, HType::Smi()) {
+    set_representation(Representation::Smi());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kMaps);
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HUnaryMathOperation final : public HTemplateInstruction<2> {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* value, BuiltinFunctionId op);
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    if (index == 0) {
+      return Representation::Tagged();
+    } else {
+      switch (op_) {
+        case kMathFloor:
+        case kMathRound:
+        case kMathFround:
+        case kMathSqrt:
+        case kMathPowHalf:
+        case kMathLog:
+        case kMathExp:
+          return Representation::Double();
+        case kMathAbs:
+          return representation();
+        case kMathClz32:
+          return Representation::Integer32();
+        default:
+          UNREACHABLE();
+          return Representation::None();
+      }
+    }
+  }
+
+  Range* InferRange(Zone* zone) override;
+
+  HValue* Canonicalize() override;
+  Representation RepresentationFromUses() override;
+  Representation RepresentationFromInputs() override;
+
+  BuiltinFunctionId op() const { return op_; }
+  const char* OpName() const;
+
+  DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
+    return op_ == b->op();
+  }
+
+ private:
+  // Indicates if we support a double (and int32) output for Math.floor and
+  // Math.round.
+  bool SupportsFlexibleFloorAndRound() const {
+#ifdef V8_TARGET_ARCH_ARM64
+    // TODO(rmcilroy): Re-enable this for Arm64 once http://crbug.com/476477 is
+    // fixed.
+    return false;
+#else
+    return false;
+#endif
+  }
+  HUnaryMathOperation(HValue* context, HValue* value, BuiltinFunctionId op)
+      : HTemplateInstruction<2>(HType::TaggedNumber()), op_(op) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
+    switch (op) {
+      case kMathFloor:
+      case kMathRound:
+        if (SupportsFlexibleFloorAndRound()) {
+          SetFlag(kFlexibleRepresentation);
+        } else {
+          set_representation(Representation::Integer32());
+        }
+        break;
+      case kMathClz32:
+        set_representation(Representation::Integer32());
+        break;
+      case kMathAbs:
+        // Not setting representation here: it is None intentionally.
+        SetFlag(kFlexibleRepresentation);
+        // TODO(svenpanne) This flag is actually only needed if representation()
+        // is tagged, and not when it is an unboxed double or unboxed integer.
+        SetChangesFlag(kNewSpacePromotion);
+        break;
+      case kMathFround:
+      case kMathLog:
+      case kMathExp:
+      case kMathSqrt:
+      case kMathPowHalf:
+        set_representation(Representation::Double());
+        break;
+      default:
+        UNREACHABLE();
+    }
+    SetFlag(kUseGVN);
+    SetFlag(kAllowUndefinedAsNaN);
+  }
+
+  bool IsDeletable() const override {
+    // TODO(crankshaft): This should be true, however the semantics of this
+    // instruction also include the ToNumber conversion that is mentioned in the
+    // spec, which is of course observable.
+    return false;
+  }
+
+  HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
+  HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
+
+  BuiltinFunctionId op_;
+};
+
+
+class HLoadRoot final : public HTemplateInstruction<0> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
+  DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  Heap::RootListIndex index() const { return index_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    HLoadRoot* b = HLoadRoot::cast(other);
+    return index_ == b->index_;
+  }
+
+ private:
+  explicit HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
+      : HTemplateInstruction<0>(type), index_(index) {
+    SetFlag(kUseGVN);
+    // TODO(bmeurer): We'll need kDependsOnRoots once we add the
+    // corresponding HStoreRoot instruction.
+    SetDependsOnFlag(kCalls);
+    set_representation(Representation::Tagged());
+  }
+
+  bool IsDeletable() const override { return true; }
+
+  const Heap::RootListIndex index_;
+};
+
+
+class HCheckMaps final : public HTemplateInstruction<2> {
+ public:
+  static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context,
+                         HValue* value, Handle<Map> map,
+                         HValue* typecheck = NULL) {
+    return new(zone) HCheckMaps(value, new(zone) UniqueSet<Map>(
+            Unique<Map>::CreateImmovable(map), zone), typecheck);
+  }
+  static HCheckMaps* New(Isolate* isolate, Zone* zone, HValue* context,
+                         HValue* value, SmallMapList* map_list,
+                         HValue* typecheck = NULL) {
+    UniqueSet<Map>* maps = new(zone) UniqueSet<Map>(map_list->length(), zone);
+    for (int i = 0; i < map_list->length(); ++i) {
+      maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone);
+    }
+    return new(zone) HCheckMaps(value, maps, typecheck);
+  }
+
+  bool IsStabilityCheck() const {
+    return IsStabilityCheckField::decode(bit_field_);
+  }
+  void MarkAsStabilityCheck() {
+    bit_field_ = MapsAreStableField::encode(true) |
+                 HasMigrationTargetField::encode(false) |
+                 IsStabilityCheckField::encode(true);
+    ClearChangesFlag(kNewSpacePromotion);
+    ClearDependsOnFlag(kElementsKind);
+    ClearDependsOnFlag(kMaps);
+  }
+
+  bool HasEscapingOperandAt(int index) override { return false; }
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HType CalculateInferredType() override {
+    if (value()->type().IsHeapObject()) return value()->type();
+    return HType::HeapObject();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HValue* value() const { return OperandAt(0); }
+  HValue* typecheck() const { return OperandAt(1); }
+
+  const UniqueSet<Map>* maps() const { return maps_; }
+  void set_maps(const UniqueSet<Map>* maps) { maps_ = maps; }
+
+  bool maps_are_stable() const {
+    return MapsAreStableField::decode(bit_field_);
+  }
+
+  bool HasMigrationTarget() const {
+    return HasMigrationTargetField::decode(bit_field_);
+  }
+
+  HValue* Canonicalize() override;
+
+  static HCheckMaps* CreateAndInsertAfter(Zone* zone,
+                                          HValue* value,
+                                          Unique<Map> map,
+                                          bool map_is_stable,
+                                          HInstruction* instr) {
+    return instr->Append(new(zone) HCheckMaps(
+            value, new(zone) UniqueSet<Map>(map, zone), map_is_stable));
+  }
+
+  static HCheckMaps* CreateAndInsertBefore(Zone* zone,
+                                           HValue* value,
+                                           const UniqueSet<Map>* maps,
+                                           bool maps_are_stable,
+                                           HInstruction* instr) {
+    return instr->Prepend(new(zone) HCheckMaps(value, maps, maps_are_stable));
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    return this->maps()->Equals(HCheckMaps::cast(other)->maps());
+  }
+
+  int RedefinedOperandIndex() override { return 0; }
+
+ private:
+  HCheckMaps(HValue* value, const UniqueSet<Map>* maps, bool maps_are_stable)
+      : HTemplateInstruction<2>(HType::HeapObject()),
+        maps_(maps),
+        bit_field_(HasMigrationTargetField::encode(false) |
+                   IsStabilityCheckField::encode(false) |
+                   MapsAreStableField::encode(maps_are_stable)) {
+    DCHECK_NE(0, maps->size());
+    SetOperandAt(0, value);
+    // Use the object value for the dependency.
+    SetOperandAt(1, value);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kMaps);
+    SetDependsOnFlag(kElementsKind);
+  }
+
+  HCheckMaps(HValue* value, const UniqueSet<Map>* maps, HValue* typecheck)
+      : HTemplateInstruction<2>(HType::HeapObject()),
+        maps_(maps),
+        bit_field_(HasMigrationTargetField::encode(false) |
+                   IsStabilityCheckField::encode(false) |
+                   MapsAreStableField::encode(true)) {
+    DCHECK_NE(0, maps->size());
+    SetOperandAt(0, value);
+    // Use the object value for the dependency if NULL is passed.
+    SetOperandAt(1, typecheck ? typecheck : value);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kMaps);
+    SetDependsOnFlag(kElementsKind);
+    for (int i = 0; i < maps->size(); ++i) {
+      Handle<Map> map = maps->at(i).handle();
+      if (map->is_migration_target()) {
+        bit_field_ = HasMigrationTargetField::update(bit_field_, true);
+      }
+      if (!map->is_stable()) {
+        bit_field_ = MapsAreStableField::update(bit_field_, false);
+      }
+    }
+    if (HasMigrationTarget()) SetChangesFlag(kNewSpacePromotion);
+  }
+
+  class HasMigrationTargetField : public BitField<bool, 0, 1> {};
+  class IsStabilityCheckField : public BitField<bool, 1, 1> {};
+  class MapsAreStableField : public BitField<bool, 2, 1> {};
+
+  const UniqueSet<Map>* maps_;
+  uint32_t bit_field_;
+};
+
+
+class HCheckValue final : public HUnaryOperation {
+ public:
+  static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context,
+                          HValue* value, Handle<JSFunction> func) {
+    bool in_new_space = isolate->heap()->InNewSpace(*func);
+    // NOTE: We create an uninitialized Unique and initialize it later.
+    // This is because a JSFunction can move due to GC during graph creation.
+    // TODO(titzer): This is a migration crutch. Replace with some kind of
+    // Uniqueness scope later.
+    Unique<JSFunction> target = Unique<JSFunction>::CreateUninitialized(func);
+    HCheckValue* check = new(zone) HCheckValue(value, target, in_new_space);
+    return check;
+  }
+  static HCheckValue* New(Isolate* isolate, Zone* zone, HValue* context,
+                          HValue* value, Unique<HeapObject> target,
+                          bool object_in_new_space) {
+    return new(zone) HCheckValue(value, target, object_in_new_space);
+  }
+
+  void FinalizeUniqueness() override {
+    object_ = Unique<HeapObject>(object_.handle());
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HValue* Canonicalize() override;
+
+#ifdef DEBUG
+  void Verify() override;
+#endif
+
+  Unique<HeapObject> object() const { return object_; }
+  bool object_in_new_space() const { return object_in_new_space_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    HCheckValue* b = HCheckValue::cast(other);
+    return object_ == b->object_;
+  }
+
+ private:
+  HCheckValue(HValue* value, Unique<HeapObject> object,
+               bool object_in_new_space)
+      : HUnaryOperation(value, value->type()),
+        object_(object),
+        object_in_new_space_(object_in_new_space) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  Unique<HeapObject> object_;
+  bool object_in_new_space_;
+};
+
+
+class HCheckInstanceType final : public HUnaryOperation {
+ public:
+  enum Check {
+    IS_JS_RECEIVER,
+    IS_JS_ARRAY,
+    IS_JS_DATE,
+    IS_STRING,
+    IS_INTERNALIZED_STRING,
+    LAST_INTERVAL_CHECK = IS_JS_DATE
+  };
+
+  DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HType CalculateInferredType() override {
+    switch (check_) {
+      case IS_JS_RECEIVER: return HType::JSReceiver();
+      case IS_JS_ARRAY: return HType::JSArray();
+      case IS_JS_DATE: return HType::JSObject();
+      case IS_STRING: return HType::String();
+      case IS_INTERNALIZED_STRING: return HType::String();
+    }
+    UNREACHABLE();
+    return HType::Tagged();
+  }
+
+  HValue* Canonicalize() override;
+
+  bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
+  void GetCheckInterval(InstanceType* first, InstanceType* last);
+  void GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag);
+
+  Check check() const { return check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType)
+
+ protected:
+  // TODO(ager): It could be nice to allow the ommision of instance
+  // type checks if we have already performed an instance type check
+  // with a larger range.
+  bool DataEquals(HValue* other) override {
+    HCheckInstanceType* b = HCheckInstanceType::cast(other);
+    return check_ == b->check_;
+  }
+
+  int RedefinedOperandIndex() override { return 0; }
+
+ private:
+  const char* GetCheckName() const;
+
+  HCheckInstanceType(HValue* value, Check check)
+      : HUnaryOperation(value, HType::HeapObject()), check_(check) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  const Check check_;
+};
+
+
+class HCheckSmi final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HValue* Canonicalize() override {
+    HType value_type = value()->type();
+    if (value_type.IsSmi()) {
+      return NULL;
+    }
+    return this;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
+    set_representation(Representation::Smi());
+    SetFlag(kUseGVN);
+  }
+};
+
+
+class HCheckArrayBufferNotNeutered final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HCheckArrayBufferNotNeutered, HValue*);
+
+  bool HasEscapingOperandAt(int index) override { return false; }
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HType CalculateInferredType() override {
+    if (value()->type().IsHeapObject()) return value()->type();
+    return HType::HeapObject();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+  int RedefinedOperandIndex() override { return 0; }
+
+ private:
+  explicit HCheckArrayBufferNotNeutered(HValue* value)
+      : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kCalls);
+  }
+};
+
+
+class HCheckHeapObject final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
+
+  bool HasEscapingOperandAt(int index) override { return false; }
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HType CalculateInferredType() override {
+    if (value()->type().IsHeapObject()) return value()->type();
+    return HType::HeapObject();
+  }
+
+#ifdef DEBUG
+  void Verify() override;
+#endif
+
+  HValue* Canonicalize() override {
+    return value()->type().IsHeapObject() ? NULL : this;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+};
+
+
+class InductionVariableData;
+
+
+struct InductionVariableLimitUpdate {
+  InductionVariableData* updated_variable;
+  HValue* limit;
+  bool limit_is_upper;
+  bool limit_is_included;
+
+  InductionVariableLimitUpdate()
+      : updated_variable(NULL), limit(NULL),
+        limit_is_upper(false), limit_is_included(false) {}
+};
+
+
+class HBoundsCheck;
+class HPhi;
+class HBitwise;
+
+
+class InductionVariableData final : public ZoneObject {
+ public:
+  class InductionVariableCheck : public ZoneObject {
+   public:
+    HBoundsCheck* check() { return check_; }
+    InductionVariableCheck* next() { return next_; }
+    bool HasUpperLimit() { return upper_limit_ >= 0; }
+    int32_t upper_limit() {
+      DCHECK(HasUpperLimit());
+      return upper_limit_;
+    }
+    void set_upper_limit(int32_t upper_limit) {
+      upper_limit_ = upper_limit;
+    }
+
+    bool processed() { return processed_; }
+    void set_processed() { processed_ = true; }
+
+    InductionVariableCheck(HBoundsCheck* check,
+                           InductionVariableCheck* next,
+                           int32_t upper_limit = kNoLimit)
+        : check_(check), next_(next), upper_limit_(upper_limit),
+          processed_(false) {}
+
+   private:
+    HBoundsCheck* check_;
+    InductionVariableCheck* next_;
+    int32_t upper_limit_;
+    bool processed_;
+  };
+
+  class ChecksRelatedToLength : public ZoneObject {
+   public:
+    HValue* length() { return length_; }
+    ChecksRelatedToLength* next() { return next_; }
+    InductionVariableCheck* checks() { return checks_; }
+
+    void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
+    void CloseCurrentBlock();
+
+    ChecksRelatedToLength(HValue* length, ChecksRelatedToLength* next)
+      : length_(length), next_(next), checks_(NULL),
+        first_check_in_block_(NULL),
+        added_index_(NULL),
+        added_constant_(NULL),
+        current_and_mask_in_block_(0),
+        current_or_mask_in_block_(0) {}
+
+   private:
+    void UseNewIndexInCurrentBlock(Token::Value token,
+                                   int32_t mask,
+                                   HValue* index_base,
+                                   HValue* context);
+
+    HBoundsCheck* first_check_in_block() { return first_check_in_block_; }
+    HBitwise* added_index() { return added_index_; }
+    void set_added_index(HBitwise* index) { added_index_ = index; }
+    HConstant* added_constant() { return added_constant_; }
+    void set_added_constant(HConstant* constant) { added_constant_ = constant; }
+    int32_t current_and_mask_in_block() { return current_and_mask_in_block_; }
+    int32_t current_or_mask_in_block() { return current_or_mask_in_block_; }
+    int32_t current_upper_limit() { return current_upper_limit_; }
+
+    HValue* length_;
+    ChecksRelatedToLength* next_;
+    InductionVariableCheck* checks_;
+
+    HBoundsCheck* first_check_in_block_;
+    HBitwise* added_index_;
+    HConstant* added_constant_;
+    int32_t current_and_mask_in_block_;
+    int32_t current_or_mask_in_block_;
+    int32_t current_upper_limit_;
+  };
+
+  struct LimitFromPredecessorBlock {
+    InductionVariableData* variable;
+    Token::Value token;
+    HValue* limit;
+    HBasicBlock* other_target;
+
+    bool LimitIsValid() { return token != Token::ILLEGAL; }
+
+    bool LimitIsIncluded() {
+      return Token::IsEqualityOp(token) ||
+          token == Token::GTE || token == Token::LTE;
+    }
+    bool LimitIsUpper() {
+      return token == Token::LTE || token == Token::LT || token == Token::NE;
+    }
+
+    LimitFromPredecessorBlock()
+        : variable(NULL),
+          token(Token::ILLEGAL),
+          limit(NULL),
+          other_target(NULL) {}
+  };
+
+  static const int32_t kNoLimit = -1;
+
+  static InductionVariableData* ExaminePhi(HPhi* phi);
+  static void ComputeLimitFromPredecessorBlock(
+      HBasicBlock* block,
+      LimitFromPredecessorBlock* result);
+  static bool ComputeInductionVariableLimit(
+      HBasicBlock* block,
+      InductionVariableLimitUpdate* additional_limit);
+
+  struct BitwiseDecompositionResult {
+    HValue* base;
+    int32_t and_mask;
+    int32_t or_mask;
+    HValue* context;
+
+    BitwiseDecompositionResult()
+        : base(NULL), and_mask(0), or_mask(0), context(NULL) {}
+  };
+  static void DecomposeBitwise(HValue* value,
+                               BitwiseDecompositionResult* result);
+
+  void AddCheck(HBoundsCheck* check, int32_t upper_limit = kNoLimit);
+
+  bool CheckIfBranchIsLoopGuard(Token::Value token,
+                                HBasicBlock* current_branch,
+                                HBasicBlock* other_branch);
+
+  void UpdateAdditionalLimit(InductionVariableLimitUpdate* update);
+
+  HPhi* phi() { return phi_; }
+  HValue* base() { return base_; }
+  int32_t increment() { return increment_; }
+  HValue* limit() { return limit_; }
+  bool limit_included() { return limit_included_; }
+  HBasicBlock* limit_validity() { return limit_validity_; }
+  HBasicBlock* induction_exit_block() { return induction_exit_block_; }
+  HBasicBlock* induction_exit_target() { return induction_exit_target_; }
+  ChecksRelatedToLength* checks() { return checks_; }
+  HValue* additional_upper_limit() { return additional_upper_limit_; }
+  bool additional_upper_limit_is_included() {
+    return additional_upper_limit_is_included_;
+  }
+  HValue* additional_lower_limit() { return additional_lower_limit_; }
+  bool additional_lower_limit_is_included() {
+    return additional_lower_limit_is_included_;
+  }
+
+  bool LowerLimitIsNonNegativeConstant() {
+    if (base()->IsInteger32Constant() && base()->GetInteger32Constant() >= 0) {
+      return true;
+    }
+    if (additional_lower_limit() != NULL &&
+        additional_lower_limit()->IsInteger32Constant() &&
+        additional_lower_limit()->GetInteger32Constant() >= 0) {
+      // Ignoring the corner case of !additional_lower_limit_is_included()
+      // is safe, handling it adds unneeded complexity.
+      return true;
+    }
+    return false;
+  }
+
+  int32_t ComputeUpperLimit(int32_t and_mask, int32_t or_mask);
+
+ private:
+  template <class T> void swap(T* a, T* b) {
+    T c(*a);
+    *a = *b;
+    *b = c;
+  }
+
+  InductionVariableData(HPhi* phi, HValue* base, int32_t increment)
+      : phi_(phi), base_(IgnoreOsrValue(base)), increment_(increment),
+        limit_(NULL), limit_included_(false), limit_validity_(NULL),
+        induction_exit_block_(NULL), induction_exit_target_(NULL),
+        checks_(NULL),
+        additional_upper_limit_(NULL),
+        additional_upper_limit_is_included_(false),
+        additional_lower_limit_(NULL),
+        additional_lower_limit_is_included_(false) {}
+
+  static int32_t ComputeIncrement(HPhi* phi, HValue* phi_operand);
+
+  static HValue* IgnoreOsrValue(HValue* v);
+  static InductionVariableData* GetInductionVariableData(HValue* v);
+
+  HPhi* phi_;
+  HValue* base_;
+  int32_t increment_;
+  HValue* limit_;
+  bool limit_included_;
+  HBasicBlock* limit_validity_;
+  HBasicBlock* induction_exit_block_;
+  HBasicBlock* induction_exit_target_;
+  ChecksRelatedToLength* checks_;
+  HValue* additional_upper_limit_;
+  bool additional_upper_limit_is_included_;
+  HValue* additional_lower_limit_;
+  bool additional_lower_limit_is_included_;
+};
+
+
+class HPhi final : public HValue {
+ public:
+  HPhi(int merged_index, Zone* zone)
+      : inputs_(2, zone), merged_index_(merged_index) {
+    DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex);
+    SetFlag(kFlexibleRepresentation);
+    SetFlag(kAllowUndefinedAsNaN);
+  }
+
+  Representation RepresentationFromInputs() override;
+
+  Range* InferRange(Zone* zone) override;
+  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+  Representation RequiredInputRepresentation(int index) override {
+    return representation();
+  }
+  Representation KnownOptimalRepresentation() override {
+    return representation();
+  }
+  HType CalculateInferredType() override;
+  int OperandCount() const override { return inputs_.length(); }
+  HValue* OperandAt(int index) const override { return inputs_[index]; }
+  HValue* GetRedundantReplacement();
+  void AddInput(HValue* value);
+  bool HasRealUses();
+
+  bool IsReceiver() const { return merged_index_ == 0; }
+  bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
+
+  SourcePosition position() const override;
+
+  int merged_index() const { return merged_index_; }
+
+  InductionVariableData* induction_variable_data() {
+    return induction_variable_data_;
+  }
+  bool IsInductionVariable() {
+    return induction_variable_data_ != NULL;
+  }
+  bool IsLimitedInductionVariable() {
+    return IsInductionVariable() &&
+        induction_variable_data_->limit() != NULL;
+  }
+  void DetectInductionVariable() {
+    DCHECK(induction_variable_data_ == NULL);
+    induction_variable_data_ = InductionVariableData::ExaminePhi(this);
+  }
+
+  std::ostream& PrintTo(std::ostream& os) const override;  // NOLINT
+
+#ifdef DEBUG
+  void Verify() override;
+#endif
+
+  void InitRealUses(int id);
+  void AddNonPhiUsesFrom(HPhi* other);
+
+  Representation representation_from_indirect_uses() const {
+    return representation_from_indirect_uses_;
+  }
+
+  bool has_type_feedback_from_uses() const {
+    return has_type_feedback_from_uses_;
+  }
+
+  int phi_id() { return phi_id_; }
+
+  static HPhi* cast(HValue* value) {
+    DCHECK(value->IsPhi());
+    return reinterpret_cast<HPhi*>(value);
+  }
+  Opcode opcode() const override { return HValue::kPhi; }
+
+  void SimplifyConstantInputs();
+
+  // Marker value representing an invalid merge index.
+  static const int kInvalidMergedIndex = -1;
+
+ protected:
+  void DeleteFromGraph() override;
+  void InternalSetOperandAt(int index, HValue* value) override {
+    inputs_[index] = value;
+  }
+
+ private:
+  Representation representation_from_non_phi_uses() const {
+    return representation_from_non_phi_uses_;
+  }
+
+  ZoneList<HValue*> inputs_;
+  int merged_index_ = 0;
+
+  int phi_id_ = -1;
+  InductionVariableData* induction_variable_data_ = nullptr;
+
+  Representation representation_from_indirect_uses_ = Representation::None();
+  Representation representation_from_non_phi_uses_ = Representation::None();
+  bool has_type_feedback_from_uses_ = false;
+
+  // TODO(titzer): we can't eliminate the receiver for generating backtraces
+  bool IsDeletable() const override { return !IsReceiver(); }
+};
+
+
+// Common base class for HArgumentsObject and HCapturedObject.
+class HDematerializedObject : public HInstruction {
+ public:
+  HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
+
+  int OperandCount() const final { return values_.length(); }
+  HValue* OperandAt(int index) const final { return values_[index]; }
+
+  bool HasEscapingOperandAt(int index) final { return false; }
+  Representation RequiredInputRepresentation(int index) final {
+    return Representation::None();
+  }
+
+ protected:
+  void InternalSetOperandAt(int index, HValue* value) final {
+    values_[index] = value;
+  }
+
+  // List of values tracked by this marker.
+  ZoneList<HValue*> values_;
+};
+
+
+class HArgumentsObject final : public HDematerializedObject {
+ public:
+  static HArgumentsObject* New(Isolate* isolate, Zone* zone, HValue* context,
+                               int count) {
+    return new(zone) HArgumentsObject(count, zone);
+  }
+
+  // The values contain a list of all elements in the arguments object
+  // including the receiver object, which is skipped when materializing.
+  const ZoneList<HValue*>* arguments_values() const { return &values_; }
+  int arguments_count() const { return values_.length(); }
+
+  void AddArgument(HValue* argument, Zone* zone) {
+    values_.Add(NULL, zone);  // Resize list.
+    SetOperandAt(values_.length() - 1, argument);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsObject)
+
+ private:
+  HArgumentsObject(int count, Zone* zone)
+      : HDematerializedObject(count, zone) {
+    set_representation(Representation::Tagged());
+    SetFlag(kIsArguments);
+  }
+};
+
+
+class HCapturedObject final : public HDematerializedObject {
+ public:
+  HCapturedObject(int length, int id, Zone* zone)
+      : HDematerializedObject(length, zone), capture_id_(id) {
+    set_representation(Representation::Tagged());
+    values_.AddBlock(NULL, length, zone);  // Resize list.
+  }
+
+  // The values contain a list of all in-object properties inside the
+  // captured object and is index by field index. Properties in the
+  // properties or elements backing store are not tracked here.
+  const ZoneList<HValue*>* values() const { return &values_; }
+  int length() const { return values_.length(); }
+  int capture_id() const { return capture_id_; }
+
+  // Shortcut for the map value of this captured object.
+  HValue* map_value() const { return values()->first(); }
+
+  void ReuseSideEffectsFromStore(HInstruction* store) {
+    DCHECK(store->HasObservableSideEffects());
+    DCHECK(store->IsStoreNamedField());
+    changes_flags_.Add(store->ChangesFlags());
+  }
+
+  // Replay effects of this instruction on the given environment.
+  void ReplayEnvironment(HEnvironment* env);
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
+
+ private:
+  int capture_id_;
+
+  // Note that we cannot DCE captured objects as they are used to replay
+  // the environment. This method is here as an explicit reminder.
+  // TODO(mstarzinger): Turn HSimulates into full snapshots maybe?
+  bool IsDeletable() const final { return false; }
+};
+
+
+class HConstant final : public HTemplateInstruction<0> {
+ public:
+  enum Special { kHoleNaN };
+
+  DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Special);
+  DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
+  DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
+  DECLARE_INSTRUCTION_FACTORY_P1(HConstant, double);
+  DECLARE_INSTRUCTION_FACTORY_P1(HConstant, Handle<Object>);
+  DECLARE_INSTRUCTION_FACTORY_P1(HConstant, ExternalReference);
+
+  static HConstant* CreateAndInsertAfter(Isolate* isolate, Zone* zone,
+                                         HValue* context, int32_t value,
+                                         Representation representation,
+                                         HInstruction* instruction) {
+    return instruction->Append(
+        HConstant::New(isolate, zone, context, value, representation));
+  }
+
+  Handle<Map> GetMonomorphicJSObjectMap() override {
+    Handle<Object> object = object_.handle();
+    if (!object.is_null() && object->IsHeapObject()) {
+      return v8::internal::handle(HeapObject::cast(*object)->map());
+    }
+    return Handle<Map>();
+  }
+
+  static HConstant* CreateAndInsertBefore(Isolate* isolate, Zone* zone,
+                                          HValue* context, int32_t value,
+                                          Representation representation,
+                                          HInstruction* instruction) {
+    return instruction->Prepend(
+        HConstant::New(isolate, zone, context, value, representation));
+  }
+
+  static HConstant* CreateAndInsertBefore(Zone* zone,
+                                          Unique<Map> map,
+                                          bool map_is_stable,
+                                          HInstruction* instruction) {
+    return instruction->Prepend(new(zone) HConstant(
+        map, Unique<Map>(Handle<Map>::null()), map_is_stable,
+        Representation::Tagged(), HType::HeapObject(), true,
+        false, false, MAP_TYPE));
+  }
+
+  static HConstant* CreateAndInsertAfter(Zone* zone,
+                                         Unique<Map> map,
+                                         bool map_is_stable,
+                                         HInstruction* instruction) {
+    return instruction->Append(new(zone) HConstant(
+            map, Unique<Map>(Handle<Map>::null()), map_is_stable,
+            Representation::Tagged(), HType::HeapObject(), true,
+            false, false, MAP_TYPE));
+  }
+
+  Handle<Object> handle(Isolate* isolate) {
+    if (object_.handle().is_null()) {
+      // Default arguments to is_not_in_new_space depend on this heap number
+      // to be tenured so that it's guaranteed not to be located in new space.
+      object_ = Unique<Object>::CreateUninitialized(
+          isolate->factory()->NewNumber(double_value_, TENURED));
+    }
+    AllowDeferredHandleDereference smi_check;
+    DCHECK(HasInteger32Value() || !object_.handle()->IsSmi());
+    return object_.handle();
+  }
+
+  bool IsSpecialDouble() const {
+    return HasDoubleValue() &&
+           (bit_cast<int64_t>(double_value_) == bit_cast<int64_t>(-0.0) ||
+            std::isnan(double_value_));
+  }
+
+  bool NotInNewSpace() const {
+    return IsNotInNewSpaceField::decode(bit_field_);
+  }
+
+  bool ImmortalImmovable() const;
+
+  bool IsCell() const {
+    InstanceType instance_type = GetInstanceType();
+    return instance_type == CELL_TYPE;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  Representation KnownOptimalRepresentation() override {
+    if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
+    if (HasInteger32Value()) return Representation::Integer32();
+    if (HasNumberValue()) return Representation::Double();
+    if (HasExternalReferenceValue()) return Representation::External();
+    return Representation::Tagged();
+  }
+
+  bool EmitAtUses() override;
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+  HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
+  Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
+  Maybe<HConstant*> CopyToTruncatedNumber(Isolate* isolate, Zone* zone);
+  bool HasInteger32Value() const {
+    return HasInt32ValueField::decode(bit_field_);
+  }
+  int32_t Integer32Value() const {
+    DCHECK(HasInteger32Value());
+    return int32_value_;
+  }
+  bool HasSmiValue() const { return HasSmiValueField::decode(bit_field_); }
+  bool HasDoubleValue() const {
+    return HasDoubleValueField::decode(bit_field_);
+  }
+  double DoubleValue() const {
+    DCHECK(HasDoubleValue());
+    return double_value_;
+  }
+  uint64_t DoubleValueAsBits() const {
+    uint64_t bits;
+    DCHECK(HasDoubleValue());
+    STATIC_ASSERT(sizeof(bits) == sizeof(double_value_));
+    std::memcpy(&bits, &double_value_, sizeof(bits));
+    return bits;
+  }
+  bool IsTheHole() const {
+    if (HasDoubleValue() && DoubleValueAsBits() == kHoleNanInt64) {
+      return true;
+    }
+    return object_.IsInitialized() &&
+           object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
+  }
+  bool HasNumberValue() const { return HasDoubleValue(); }
+  int32_t NumberValueAsInteger32() const {
+    DCHECK(HasNumberValue());
+    // Irrespective of whether a numeric HConstant can be safely
+    // represented as an int32, we store the (in some cases lossy)
+    // representation of the number in int32_value_.
+    return int32_value_;
+  }
+  bool HasStringValue() const {
+    if (HasNumberValue()) return false;
+    DCHECK(!object_.handle().is_null());
+    return GetInstanceType() < FIRST_NONSTRING_TYPE;
+  }
+  Handle<String> StringValue() const {
+    DCHECK(HasStringValue());
+    return Handle<String>::cast(object_.handle());
+  }
+  bool HasInternalizedStringValue() const {
+    return HasStringValue() && StringShape(GetInstanceType()).IsInternalized();
+  }
+
+  bool HasExternalReferenceValue() const {
+    return HasExternalReferenceValueField::decode(bit_field_);
+  }
+  ExternalReference ExternalReferenceValue() const {
+    return external_reference_value_;
+  }
+
+  bool HasBooleanValue() const { return type_.IsBoolean(); }
+  bool BooleanValue() const { return BooleanValueField::decode(bit_field_); }
+  bool IsCallable() const { return IsCallableField::decode(bit_field_); }
+  bool IsUndetectable() const {
+    return IsUndetectableField::decode(bit_field_);
+  }
+  InstanceType GetInstanceType() const {
+    return InstanceTypeField::decode(bit_field_);
+  }
+
+  bool HasMapValue() const { return GetInstanceType() == MAP_TYPE; }
+  Unique<Map> MapValue() const {
+    DCHECK(HasMapValue());
+    return Unique<Map>::cast(GetUnique());
+  }
+  bool HasStableMapValue() const {
+    DCHECK(HasMapValue() || !HasStableMapValueField::decode(bit_field_));
+    return HasStableMapValueField::decode(bit_field_);
+  }
+
+  bool HasObjectMap() const { return !object_map_.IsNull(); }
+  Unique<Map> ObjectMap() const {
+    DCHECK(HasObjectMap());
+    return object_map_;
+  }
+
+  intptr_t Hashcode() override {
+    if (HasInteger32Value()) {
+      return static_cast<intptr_t>(int32_value_);
+    } else if (HasDoubleValue()) {
+      uint64_t bits = DoubleValueAsBits();
+      if (sizeof(bits) > sizeof(intptr_t)) {
+        bits ^= (bits >> 32);
+      }
+      return static_cast<intptr_t>(bits);
+    } else if (HasExternalReferenceValue()) {
+      return reinterpret_cast<intptr_t>(external_reference_value_.address());
+    } else {
+      DCHECK(!object_.handle().is_null());
+      return object_.Hashcode();
+    }
+  }
+
+  void FinalizeUniqueness() override {
+    if (!HasDoubleValue() && !HasExternalReferenceValue()) {
+      DCHECK(!object_.handle().is_null());
+      object_ = Unique<Object>(object_.handle());
+    }
+  }
+
+  Unique<Object> GetUnique() const {
+    return object_;
+  }
+
+  bool EqualsUnique(Unique<Object> other) const {
+    return object_.IsInitialized() && object_ == other;
+  }
+
+  bool DataEquals(HValue* other) override {
+    HConstant* other_constant = HConstant::cast(other);
+    if (HasInteger32Value()) {
+      return other_constant->HasInteger32Value() &&
+             int32_value_ == other_constant->int32_value_;
+    } else if (HasDoubleValue()) {
+      return other_constant->HasDoubleValue() &&
+             std::memcmp(&double_value_, &other_constant->double_value_,
+                         sizeof(double_value_)) == 0;
+    } else if (HasExternalReferenceValue()) {
+      return other_constant->HasExternalReferenceValue() &&
+             external_reference_value_ ==
+                 other_constant->external_reference_value_;
+    } else {
+      if (other_constant->HasInteger32Value() ||
+          other_constant->HasDoubleValue() ||
+          other_constant->HasExternalReferenceValue()) {
+        return false;
+      }
+      DCHECK(!object_.handle().is_null());
+      return other_constant->object_ == object_;
+    }
+  }
+
+#ifdef DEBUG
+  void Verify() override {}
+#endif
+
+  DECLARE_CONCRETE_INSTRUCTION(Constant)
+
+ protected:
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  friend class HGraph;
+  explicit HConstant(Special special);
+  explicit HConstant(Handle<Object> handle,
+                     Representation r = Representation::None());
+  HConstant(int32_t value,
+            Representation r = Representation::None(),
+            bool is_not_in_new_space = true,
+            Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
+  HConstant(double value,
+            Representation r = Representation::None(),
+            bool is_not_in_new_space = true,
+            Unique<Object> optional = Unique<Object>(Handle<Object>::null()));
+  HConstant(Unique<Object> object,
+            Unique<Map> object_map,
+            bool has_stable_map_value,
+            Representation r,
+            HType type,
+            bool is_not_in_new_space,
+            bool boolean_value,
+            bool is_undetectable,
+            InstanceType instance_type);
+
+  explicit HConstant(ExternalReference reference);
+
+  void Initialize(Representation r);
+
+  bool IsDeletable() const override { return true; }
+
+  // If object_ is a map, this indicates whether the map is stable.
+  class HasStableMapValueField : public BitField<bool, 0, 1> {};
+
+  // We store the HConstant in the most specific form safely possible.
+  // These flags tell us if the respective member fields hold valid, safe
+  // representations of the constant. More specific flags imply more general
+  // flags, but not the converse (i.e. smi => int32 => double).
+  class HasSmiValueField : public BitField<bool, 1, 1> {};
+  class HasInt32ValueField : public BitField<bool, 2, 1> {};
+  class HasDoubleValueField : public BitField<bool, 3, 1> {};
+
+  class HasExternalReferenceValueField : public BitField<bool, 4, 1> {};
+  class IsNotInNewSpaceField : public BitField<bool, 5, 1> {};
+  class BooleanValueField : public BitField<bool, 6, 1> {};
+  class IsUndetectableField : public BitField<bool, 7, 1> {};
+  class IsCallableField : public BitField<bool, 8, 1> {};
+
+  static const InstanceType kUnknownInstanceType = FILLER_TYPE;
+  class InstanceTypeField : public BitField<InstanceType, 16, 8> {};
+
+  // If this is a numerical constant, object_ either points to the
+  // HeapObject the constant originated from or is null.  If the
+  // constant is non-numeric, object_ always points to a valid
+  // constant HeapObject.
+  Unique<Object> object_;
+
+  // If object_ is a heap object, this points to the stable map of the object.
+  Unique<Map> object_map_;
+
+  uint32_t bit_field_;
+
+  int32_t int32_value_;
+  double double_value_;
+  ExternalReference external_reference_value_;
+};
+
+
+class HBinaryOperation : public HTemplateInstruction<3> {
+ public:
+  HBinaryOperation(HValue* context, HValue* left, HValue* right,
+                   Strength strength, HType type = HType::Tagged())
+      : HTemplateInstruction<3>(type),
+        strength_(strength),
+        observed_output_representation_(Representation::None()) {
+    DCHECK(left != NULL && right != NULL);
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    SetOperandAt(2, right);
+    observed_input_representation_[0] = Representation::None();
+    observed_input_representation_[1] = Representation::None();
+  }
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* left() const { return OperandAt(1); }
+  HValue* right() const { return OperandAt(2); }
+  Strength strength() const { return strength_; }
+
+  // True if switching left and right operands likely generates better code.
+  bool AreOperandsBetterSwitched() {
+    if (!IsCommutative()) return false;
+
+    // Constant operands are better off on the right, they can be inlined in
+    // many situations on most platforms.
+    if (left()->IsConstant()) return true;
+    if (right()->IsConstant()) return false;
+
+    // Otherwise, if there is only one use of the right operand, it would be
+    // better off on the left for platforms that only have 2-arg arithmetic
+    // ops (e.g ia32, x64) that clobber the left operand.
+    return right()->HasOneUse();
+  }
+
+  HValue* BetterLeftOperand() {
+    return AreOperandsBetterSwitched() ? right() : left();
+  }
+
+  HValue* BetterRightOperand() {
+    return AreOperandsBetterSwitched() ? left() : right();
+  }
+
+  void set_observed_input_representation(int index, Representation rep) {
+    DCHECK(index >= 1 && index <= 2);
+    observed_input_representation_[index - 1] = rep;
+  }
+
+  virtual void initialize_output_representation(Representation observed) {
+    observed_output_representation_ = observed;
+  }
+
+  Representation observed_input_representation(int index) override {
+    if (index == 0) return Representation::Tagged();
+    return observed_input_representation_[index - 1];
+  }
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
+        ? Representation::Integer32() : new_rep;
+    HValue::UpdateRepresentation(rep, h_infer, reason);
+  }
+
+  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+  Representation RepresentationFromInputs() override;
+  Representation RepresentationFromOutput();
+  void AssumeRepresentation(Representation r) override;
+
+  virtual bool IsCommutative() const { return false; }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    if (index == 0) return Representation::Tagged();
+    return representation();
+  }
+
+  void SetOperandPositions(Zone* zone, SourcePosition left_pos,
+                           SourcePosition right_pos) {
+    set_operand_position(zone, 1, left_pos);
+    set_operand_position(zone, 2, right_pos);
+  }
+
+  bool RightIsPowerOf2() {
+    if (!right()->IsInteger32Constant()) return false;
+    int32_t value = right()->GetInteger32Constant();
+    if (value < 0) {
+      return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(-value));
+    }
+    return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(value));
+  }
+
+  Strength strength() { return strength_; }
+
+  DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
+
+ private:
+  bool IgnoreObservedOutputRepresentation(Representation current_rep);
+  Strength strength_;
+
+  Representation observed_input_representation_[2];
+  Representation observed_output_representation_;
+};
+
+
+class HWrapReceiver final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
+
+  bool DataEquals(HValue* other) override { return true; }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HValue* receiver() const { return OperandAt(0); }
+  HValue* function() const { return OperandAt(1); }
+
+  HValue* Canonicalize() override;
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+  bool known_function() const { return known_function_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
+
+ private:
+  HWrapReceiver(HValue* receiver, HValue* function) {
+    known_function_ = function->IsConstant() &&
+        HConstant::cast(function)->handle(function->isolate())->IsJSFunction();
+    set_representation(Representation::Tagged());
+    SetOperandAt(0, receiver);
+    SetOperandAt(1, function);
+    SetFlag(kUseGVN);
+  }
+
+  bool known_function_;
+};
+
+
+class HApplyArguments final : public HTemplateInstruction<4> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
+                                 HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    // The length is untagged, all other inputs are tagged.
+    return (index == 2)
+        ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  HValue* function() { return OperandAt(0); }
+  HValue* receiver() { return OperandAt(1); }
+  HValue* length() { return OperandAt(2); }
+  HValue* elements() { return OperandAt(3); }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments)
+
+ private:
+  HApplyArguments(HValue* function,
+                  HValue* receiver,
+                  HValue* length,
+                  HValue* elements) {
+    set_representation(Representation::Tagged());
+    SetOperandAt(0, function);
+    SetOperandAt(1, receiver);
+    SetOperandAt(2, length);
+    SetOperandAt(3, elements);
+    SetAllSideEffects();
+  }
+};
+
+
+class HArgumentsElements final : public HTemplateInstruction<0> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  bool from_inlined() const { return from_inlined_; }
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
+    // The value produced by this instruction is a pointer into the stack
+    // that looks as if it was a smi because of alignment.
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return true; }
+
+  bool from_inlined_;
+};
+
+
+class HArgumentsLength final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HAccessArgumentsAt final : public HTemplateInstruction<3> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    // The arguments elements is considered tagged.
+    return index == 0
+        ? Representation::Tagged()
+        : Representation::Integer32();
+  }
+
+  HValue* arguments() const { return OperandAt(0); }
+  HValue* length() const { return OperandAt(1); }
+  HValue* index() const { return OperandAt(2); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
+
+ private:
+  HAccessArgumentsAt(HValue* arguments, HValue* length, HValue* index) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetOperandAt(0, arguments);
+    SetOperandAt(1, length);
+    SetOperandAt(2, index);
+  }
+
+  bool DataEquals(HValue* other) override { return true; }
+};
+
+
+class HBoundsCheckBaseIndexInformation;
+
+
+class HBoundsCheck final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
+
+  bool skip_check() const { return skip_check_; }
+  void set_skip_check() { skip_check_ = true; }
+
+  HValue* base() const { return base_; }
+  int offset() const { return offset_; }
+  int scale() const { return scale_; }
+
+  void ApplyIndexChange();
+  bool DetectCompoundIndex() {
+    DCHECK(base() == NULL);
+
+    DecompositionResult decomposition;
+    if (index()->TryDecompose(&decomposition)) {
+      base_ = decomposition.base();
+      offset_ = decomposition.offset();
+      scale_ = decomposition.scale();
+      return true;
+    } else {
+      base_ = index();
+      offset_ = 0;
+      scale_ = 0;
+      return false;
+    }
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return representation();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+
+  HValue* index() const { return OperandAt(0); }
+  HValue* length() const { return OperandAt(1); }
+  bool allow_equality() const { return allow_equality_; }
+  void set_allow_equality(bool v) { allow_equality_ = v; }
+
+  int RedefinedOperandIndex() override { return 0; }
+  bool IsPurelyInformativeDefinition() override { return skip_check(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
+
+ protected:
+  friend class HBoundsCheckBaseIndexInformation;
+
+  Range* InferRange(Zone* zone) override;
+
+  bool DataEquals(HValue* other) override { return true; }
+  bool skip_check_;
+  HValue* base_;
+  int offset_;
+  int scale_;
+  bool allow_equality_;
+
+ private:
+  // Normally HBoundsCheck should be created using the
+  // HGraphBuilder::AddBoundsCheck() helper.
+  // However when building stubs, where we know that the arguments are Int32,
+  // it makes sense to invoke this constructor directly.
+  HBoundsCheck(HValue* index, HValue* length)
+    : skip_check_(false),
+      base_(NULL), offset_(0), scale_(0),
+      allow_equality_(false) {
+    SetOperandAt(0, index);
+    SetOperandAt(1, length);
+    SetFlag(kFlexibleRepresentation);
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return skip_check() && !FLAG_debug_code; }
+};
+
+
+class HBoundsCheckBaseIndexInformation final : public HTemplateInstruction<2> {
+ public:
+  explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
+    DecompositionResult decomposition;
+    if (check->index()->TryDecompose(&decomposition)) {
+      SetOperandAt(0, decomposition.base());
+      SetOperandAt(1, check);
+    } else {
+      UNREACHABLE();
+    }
+  }
+
+  HValue* base_index() const { return OperandAt(0); }
+  HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
+
+  Representation RequiredInputRepresentation(int index) override {
+    return representation();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  int RedefinedOperandIndex() override { return 0; }
+  bool IsPurelyInformativeDefinition() override { return true; }
+};
+
+
+class HBitwiseBinaryOperation : public HBinaryOperation {
+ public:
+  HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right,
+                          Strength strength, HType type = HType::TaggedNumber())
+      : HBinaryOperation(context, left, right, strength, type) {
+    SetFlag(kFlexibleRepresentation);
+    SetFlag(kTruncatingToInt32);
+    if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
+    SetAllSideEffects();
+  }
+
+  void RepresentationChanged(Representation to) override {
+    if (to.IsTagged() &&
+        (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
+      SetAllSideEffects();
+      ClearFlag(kUseGVN);
+    } else {
+      ClearAllSideEffects();
+      SetFlag(kUseGVN);
+    }
+    if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
+  }
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    // We only generate either int32 or generic tagged bitwise operations.
+    if (new_rep.IsDouble()) new_rep = Representation::Integer32();
+    HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  Representation observed_input_representation(int index) override {
+    Representation r = HBinaryOperation::observed_input_representation(index);
+    if (r.IsDouble()) return Representation::Integer32();
+    return r;
+  }
+
+  void initialize_output_representation(Representation observed) override {
+    if (observed.IsDouble()) observed = Representation::Integer32();
+    HBinaryOperation::initialize_output_representation(observed);
+  }
+
+  DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
+
+ private:
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HMathFloorOfDiv final : public HBinaryOperation {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
+                                              HValue*,
+                                              HValue*);
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right, Strength::WEAK) {
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+    SetFlag(kCanOverflow);
+    SetFlag(kCanBeDivByZero);
+    SetFlag(kLeftCanBeMinInt);
+    SetFlag(kLeftCanBeNegative);
+    SetFlag(kLeftCanBePositive);
+    SetFlag(kAllowUndefinedAsNaN);
+  }
+
+  Range* InferRange(Zone* zone) override;
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HArithmeticBinaryOperation : public HBinaryOperation {
+ public:
+  HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right,
+                             Strength strength)
+      : HBinaryOperation(context, left, right, strength,
+                         HType::TaggedNumber()) {
+    SetAllSideEffects();
+    SetFlag(kFlexibleRepresentation);
+    if (!is_strong(strength)) SetFlag(kAllowUndefinedAsNaN);
+  }
+
+  void RepresentationChanged(Representation to) override {
+    if (to.IsTagged() &&
+        (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
+      SetAllSideEffects();
+      ClearFlag(kUseGVN);
+    } else {
+      ClearAllSideEffects();
+      SetFlag(kUseGVN);
+    }
+    if (to.IsTagged()) SetChangesFlag(kNewSpacePromotion);
+  }
+
+  DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
+
+ private:
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HCompareGeneric final : public HBinaryOperation {
+ public:
+  static HCompareGeneric* New(Isolate* isolate, Zone* zone, HValue* context,
+                              HValue* left, HValue* right, Token::Value token,
+                              Strength strength = Strength::WEAK) {
+    return new (zone) HCompareGeneric(context, left, right, token, strength);
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return index == 0
+        ? Representation::Tagged()
+        : representation();
+  }
+
+  Token::Value token() const { return token_; }
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
+
+ private:
+  HCompareGeneric(HValue* context, HValue* left, HValue* right,
+                  Token::Value token, Strength strength)
+      : HBinaryOperation(context, left, right, strength, HType::Boolean()),
+        token_(token) {
+    DCHECK(Token::IsCompareOp(token));
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  Token::Value token_;
+};
+
+
+class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
+ public:
+  static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone,
+                                       HValue* context, HValue* left,
+                                       HValue* right, Token::Value token,
+                                       HBasicBlock* true_target = NULL,
+                                       HBasicBlock* false_target = NULL,
+                                       Strength strength = Strength::WEAK) {
+    return new (zone) HCompareNumericAndBranch(left, right, token, true_target,
+                                               false_target, strength);
+  }
+  static HCompareNumericAndBranch* New(Isolate* isolate, Zone* zone,
+                                       HValue* context, HValue* left,
+                                       HValue* right, Token::Value token,
+                                       Strength strength) {
+    return new (zone)
+        HCompareNumericAndBranch(left, right, token, NULL, NULL, strength);
+  }
+
+  HValue* left() const { return OperandAt(0); }
+  HValue* right() const { return OperandAt(1); }
+  Token::Value token() const { return token_; }
+
+  void set_observed_input_representation(Representation left,
+                                         Representation right) {
+      observed_input_representation_[0] = left;
+      observed_input_representation_[1] = right;
+  }
+
+  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+
+  Representation RequiredInputRepresentation(int index) override {
+    return representation();
+  }
+  Representation observed_input_representation(int index) override {
+    return observed_input_representation_[index];
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  Strength strength() const { return strength_; }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  void SetOperandPositions(Zone* zone, SourcePosition left_pos,
+                           SourcePosition right_pos) {
+    set_operand_position(zone, 0, left_pos);
+    set_operand_position(zone, 1, right_pos);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch)
+
+ private:
+  HCompareNumericAndBranch(HValue* left, HValue* right, Token::Value token,
+                           HBasicBlock* true_target, HBasicBlock* false_target,
+                           Strength strength)
+      : token_(token), strength_(strength) {
+    SetFlag(kFlexibleRepresentation);
+    DCHECK(Token::IsCompareOp(token));
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
+    SetSuccessorAt(0, true_target);
+    SetSuccessorAt(1, false_target);
+  }
+
+  Representation observed_input_representation_[2];
+  Token::Value token_;
+  Strength strength_;
+};
+
+
+class HCompareHoleAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
+                                 HBasicBlock*, HBasicBlock*);
+
+  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+
+  Representation RequiredInputRepresentation(int index) override {
+    return representation();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareHoleAndBranch)
+
+ private:
+  HCompareHoleAndBranch(HValue* value,
+                        HBasicBlock* true_target = NULL,
+                        HBasicBlock* false_target = NULL)
+      : HUnaryControlInstruction(value, true_target, false_target) {
+    SetFlag(kFlexibleRepresentation);
+    SetFlag(kAllowUndefinedAsNaN);
+  }
+};
+
+
+class HCompareMinusZeroAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
+
+  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+
+  Representation RequiredInputRepresentation(int index) override {
+    return representation();
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
+
+ private:
+  explicit HCompareMinusZeroAndBranch(HValue* value)
+      : HUnaryControlInstruction(value, NULL, NULL) {
+  }
+};
+
+
+class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HCompareObjectEqAndBranch, HValue*, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
+                                 HBasicBlock*, HBasicBlock*);
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  static const int kNoKnownSuccessorIndex = -1;
+  int known_successor_index() const { return known_successor_index_; }
+  void set_known_successor_index(int known_successor_index) {
+    known_successor_index_ = known_successor_index;
+  }
+
+  HValue* left() const { return OperandAt(0); }
+  HValue* right() const { return OperandAt(1); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  Representation observed_input_representation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareObjectEqAndBranch)
+
+ private:
+  HCompareObjectEqAndBranch(HValue* left,
+                            HValue* right,
+                            HBasicBlock* true_target = NULL,
+                            HBasicBlock* false_target = NULL)
+      : known_successor_index_(kNoKnownSuccessorIndex) {
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
+    SetSuccessorAt(0, true_target);
+    SetSuccessorAt(1, false_target);
+  }
+
+  int known_successor_index_;
+};
+
+
+class HIsStringAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
+                                 HBasicBlock*, HBasicBlock*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  static const int kNoKnownSuccessorIndex = -1;
+  int known_successor_index() const { return known_successor_index_; }
+  void set_known_successor_index(int known_successor_index) {
+    known_successor_index_ = known_successor_index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
+
+ protected:
+  int RedefinedOperandIndex() override { return 0; }
+
+ private:
+  HIsStringAndBranch(HValue* value, HBasicBlock* true_target = NULL,
+                     HBasicBlock* false_target = NULL)
+      : HUnaryControlInstruction(value, true_target, false_target),
+        known_successor_index_(kNoKnownSuccessorIndex) {
+    set_representation(Representation::Tagged());
+  }
+
+  int known_successor_index_;
+};
+
+
+class HIsSmiAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
+                                 HBasicBlock*, HBasicBlock*);
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+  int RedefinedOperandIndex() override { return 0; }
+
+ private:
+  HIsSmiAndBranch(HValue* value,
+                  HBasicBlock* true_target = NULL,
+                  HBasicBlock* false_target = NULL)
+      : HUnaryControlInstruction(value, true_target, false_target) {
+    set_representation(Representation::Tagged());
+  }
+};
+
+
+class HIsUndetectableAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
+                                 HBasicBlock*, HBasicBlock*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
+
+ private:
+  HIsUndetectableAndBranch(HValue* value,
+                           HBasicBlock* true_target = NULL,
+                           HBasicBlock* false_target = NULL)
+      : HUnaryControlInstruction(value, true_target, false_target) {}
+};
+
+
+class HStringCompareAndBranch final : public HTemplateControlInstruction<2, 3> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HStringCompareAndBranch,
+                                              HValue*,
+                                              HValue*,
+                                              Token::Value);
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* left() const { return OperandAt(1); }
+  HValue* right() const { return OperandAt(2); }
+  Token::Value token() const { return token_; }
+
+  std::ostream& PrintDataTo(std::ostream& os) const final;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) final {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch)
+
+ private:
+  HStringCompareAndBranch(HValue* context, HValue* left, HValue* right,
+                          Token::Value token)
+      : token_(token) {
+    DCHECK(Token::IsCompareOp(token));
+    SetOperandAt(0, context);
+    SetOperandAt(1, left);
+    SetOperandAt(2, right);
+    set_representation(Representation::Tagged());
+    SetChangesFlag(kNewSpacePromotion);
+    SetDependsOnFlag(kStringChars);
+    SetDependsOnFlag(kStringLengths);
+  }
+
+  Token::Value const token_;
+};
+
+
+class HHasInstanceTypeAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(
+      HHasInstanceTypeAndBranch, HValue*, InstanceType);
+  DECLARE_INSTRUCTION_FACTORY_P3(
+      HHasInstanceTypeAndBranch, HValue*, InstanceType, InstanceType);
+
+  InstanceType from() { return from_; }
+  InstanceType to() { return to_; }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
+
+ private:
+  HHasInstanceTypeAndBranch(HValue* value, InstanceType type)
+      : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
+  HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
+      : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
+    DCHECK(to == LAST_TYPE);  // Others not implemented yet in backend.
+  }
+
+  InstanceType from_;
+  InstanceType to_;  // Inclusive range, not all combinations work.
+};
+
+
+class HHasCachedArrayIndexAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch)
+ private:
+  explicit HHasCachedArrayIndexAndBranch(HValue* value)
+      : HUnaryControlInstruction(value, NULL, NULL) { }
+};
+
+
+class HGetCachedArrayIndex final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HClassOfTestAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
+                                 Handle<String>);
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Handle<String> class_name() const { return class_name_; }
+
+ private:
+  HClassOfTestAndBranch(HValue* value, Handle<String> class_name)
+      : HUnaryControlInstruction(value, NULL, NULL),
+        class_name_(class_name) { }
+
+  Handle<String> class_name_;
+};
+
+
+class HTypeofIsAndBranch final : public HUnaryControlInstruction {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
+
+  Handle<String> type_literal() const { return type_literal_.handle(); }
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  bool KnownSuccessorBlock(HBasicBlock** block) override;
+
+  void FinalizeUniqueness() override {
+    type_literal_ = Unique<String>(type_literal_.handle());
+  }
+
+ private:
+  HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
+      : HUnaryControlInstruction(value, NULL, NULL),
+        type_literal_(Unique<String>::CreateUninitialized(type_literal)) { }
+
+  Unique<String> type_literal_;
+};
+
+
+class HInstanceOf final : public HBinaryOperation {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
+
+ private:
+  HInstanceOf(HValue* context, HValue* left, HValue* right)
+      : HBinaryOperation(context, left, right, Strength::WEAK,
+                         HType::Boolean()) {
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+};
+
+
+class HHasInPrototypeChainAndBranch final
+    : public HTemplateControlInstruction<2, 2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HHasInPrototypeChainAndBranch, HValue*,
+                                 HValue*);
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* prototype() const { return OperandAt(1); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  bool ObjectNeedsSmiCheck() const {
+    return !object()->type().IsHeapObject() &&
+           !object()->representation().IsHeapObject();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch)
+
+ private:
+  HHasInPrototypeChainAndBranch(HValue* object, HValue* prototype) {
+    SetOperandAt(0, object);
+    SetOperandAt(1, prototype);
+    SetDependsOnFlag(kCalls);
+  }
+};
+
+
+class HPower final : public HTemplateInstruction<2> {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right);
+
+  HValue* left() { return OperandAt(0); }
+  HValue* right() const { return OperandAt(1); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return index == 0
+      ? Representation::Double()
+      : Representation::None();
+  }
+  Representation observed_input_representation(int index) override {
+    return RequiredInputRepresentation(index);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HPower(HValue* left, HValue* right) {
+    SetOperandAt(0, left);
+    SetOperandAt(1, right);
+    set_representation(Representation::Double());
+    SetFlag(kUseGVN);
+    SetChangesFlag(kNewSpacePromotion);
+  }
+
+  bool IsDeletable() const override {
+    return !right()->representation().IsTagged();
+  }
+};
+
+
+enum ExternalAddType {
+  AddOfExternalAndTagged,
+  AddOfExternalAndInt32,
+  NoExternalAdd
+};
+
+
+class HAdd final : public HArithmeticBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right, Strength strength,
+                           ExternalAddType external_add_type);
+
+  // Add is only commutative if two integer values are added and not if two
+  // tagged values are added (because it might be a String concatenation).
+  // We also do not commute (pointer + offset).
+  bool IsCommutative() const override {
+    return !representation().IsTagged() && !representation().IsExternal();
+  }
+
+  HValue* Canonicalize() override;
+
+  bool TryDecompose(DecompositionResult* decomposition) override {
+    if (left()->IsInteger32Constant()) {
+      decomposition->Apply(right(), left()->GetInteger32Constant());
+      return true;
+    } else if (right()->IsInteger32Constant()) {
+      decomposition->Apply(left(), right()->GetInteger32Constant());
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  void RepresentationChanged(Representation to) override {
+    if (to.IsTagged() &&
+        (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
+         left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
+      SetAllSideEffects();
+      ClearFlag(kUseGVN);
+    } else {
+      ClearAllSideEffects();
+      SetFlag(kUseGVN);
+    }
+    if (to.IsTagged()) {
+      SetChangesFlag(kNewSpacePromotion);
+      ClearFlag(kAllowUndefinedAsNaN);
+    }
+  }
+
+  Representation RepresentationFromInputs() override;
+
+  Representation RequiredInputRepresentation(int index) override;
+
+  bool IsConsistentExternalRepresentation() {
+    return left()->representation().IsExternal() &&
+           ((external_add_type_ == AddOfExternalAndInt32 &&
+             right()->representation().IsInteger32()) ||
+            (external_add_type_ == AddOfExternalAndTagged &&
+             right()->representation().IsTagged()));
+  }
+
+  ExternalAddType external_add_type() const { return external_add_type_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Add)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  HAdd(HValue* context, HValue* left, HValue* right, Strength strength,
+       ExternalAddType external_add_type = NoExternalAdd)
+      : HArithmeticBinaryOperation(context, left, right, strength),
+        external_add_type_(external_add_type) {
+    SetFlag(kCanOverflow);
+    switch (external_add_type_) {
+      case AddOfExternalAndTagged:
+        DCHECK(left->representation().IsExternal());
+        DCHECK(right->representation().IsTagged());
+        SetDependsOnFlag(kNewSpacePromotion);
+        ClearFlag(HValue::kCanOverflow);
+        SetFlag(kHasNoObservableSideEffects);
+        break;
+
+      case NoExternalAdd:
+        // This is a bit of a hack: The call to this constructor is generated
+        // by a macro that also supports sub and mul, so it doesn't pass in
+        // a value for external_add_type but uses the default.
+        if (left->representation().IsExternal()) {
+          external_add_type_ = AddOfExternalAndInt32;
+        }
+        break;
+
+      case AddOfExternalAndInt32:
+        // See comment above.
+        UNREACHABLE();
+        break;
+    }
+  }
+
+  ExternalAddType external_add_type_;
+};
+
+
+class HSub final : public HArithmeticBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  HValue* Canonicalize() override;
+
+  bool TryDecompose(DecompositionResult* decomposition) override {
+    if (right()->IsInteger32Constant()) {
+      decomposition->Apply(left(), -right()->GetInteger32Constant());
+      return true;
+    } else {
+      return false;
+    }
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Sub)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  HSub(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HArithmeticBinaryOperation(context, left, right, strength) {
+    SetFlag(kCanOverflow);
+  }
+};
+
+
+class HMul final : public HArithmeticBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  static HInstruction* NewImul(Isolate* isolate, Zone* zone, HValue* context,
+                               HValue* left, HValue* right,
+                               Strength strength = Strength::WEAK) {
+    HInstruction* instr =
+        HMul::New(isolate, zone, context, left, right, strength);
+    if (!instr->IsMul()) return instr;
+    HMul* mul = HMul::cast(instr);
+    // TODO(mstarzinger): Prevent bailout on minus zero for imul.
+    mul->AssumeRepresentation(Representation::Integer32());
+    mul->ClearFlag(HValue::kCanOverflow);
+    return mul;
+  }
+
+  HValue* Canonicalize() override;
+
+  // Only commutative if it is certain that not two objects are multiplicated.
+  bool IsCommutative() const override { return !representation().IsTagged(); }
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  bool MulMinusOne();
+
+  DECLARE_CONCRETE_INSTRUCTION(Mul)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  HMul(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HArithmeticBinaryOperation(context, left, right, strength) {
+    SetFlag(kCanOverflow);
+  }
+};
+
+
+class HMod final : public HArithmeticBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  HValue* Canonicalize() override;
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+    HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Mod)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  HMod(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HArithmeticBinaryOperation(context, left, right, strength) {
+    SetFlag(kCanBeDivByZero);
+    SetFlag(kCanOverflow);
+    SetFlag(kLeftCanBeNegative);
+  }
+};
+
+
+class HDiv final : public HArithmeticBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  HValue* Canonicalize() override;
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+    HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Div)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  HDiv(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HArithmeticBinaryOperation(context, left, right, strength) {
+    SetFlag(kCanBeDivByZero);
+    SetFlag(kCanOverflow);
+  }
+};
+
+
+class HMathMinMax final : public HArithmeticBinaryOperation {
+ public:
+  enum Operation { kMathMin, kMathMax };
+
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right, Operation op);
+
+  Representation observed_input_representation(int index) override {
+    return RequiredInputRepresentation(index);
+  }
+
+  void InferRepresentation(HInferRepresentationPhase* h_infer) override;
+
+  Representation RepresentationFromInputs() override {
+    Representation left_rep = left()->representation();
+    Representation right_rep = right()->representation();
+    Representation result = Representation::Smi();
+    result = result.generalize(left_rep);
+    result = result.generalize(right_rep);
+    if (result.IsTagged()) return Representation::Double();
+    return result;
+  }
+
+  bool IsCommutative() const override { return true; }
+
+  Operation operation() { return operation_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    return other->IsMathMinMax() &&
+        HMathMinMax::cast(other)->operation_ == operation_;
+  }
+
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
+      : HArithmeticBinaryOperation(context, left, right, Strength::WEAK),
+        operation_(op) {}
+
+  Operation operation_;
+};
+
+
+class HBitwise final : public HBitwiseBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           Token::Value op, HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  Token::Value op() const { return op_; }
+
+  bool IsCommutative() const override { return true; }
+
+  HValue* Canonicalize() override;
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(Bitwise)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    return op() == HBitwise::cast(other)->op();
+  }
+
+  Range* InferRange(Zone* zone) override;
+
+ private:
+  HBitwise(HValue* context, Token::Value op, HValue* left, HValue* right,
+           Strength strength)
+      : HBitwiseBinaryOperation(context, left, right, strength), op_(op) {
+    DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
+    // BIT_AND with a smi-range positive value will always unset the
+    // entire sign-extension of the smi-sign.
+    if (op == Token::BIT_AND &&
+        ((left->IsConstant() &&
+          left->representation().IsSmi() &&
+          HConstant::cast(left)->Integer32Value() >= 0) ||
+         (right->IsConstant() &&
+          right->representation().IsSmi() &&
+          HConstant::cast(right)->Integer32Value() >= 0))) {
+      SetFlag(kTruncatingToSmi);
+      SetFlag(kTruncatingToInt32);
+    // BIT_OR with a smi-range negative value will always set the entire
+    // sign-extension of the smi-sign.
+    } else if (op == Token::BIT_OR &&
+        ((left->IsConstant() &&
+          left->representation().IsSmi() &&
+          HConstant::cast(left)->Integer32Value() < 0) ||
+         (right->IsConstant() &&
+          right->representation().IsSmi() &&
+          HConstant::cast(right)->Integer32Value() < 0))) {
+      SetFlag(kTruncatingToSmi);
+      SetFlag(kTruncatingToInt32);
+    }
+  }
+
+  Token::Value op_;
+};
+
+
+class HShl final : public HBitwiseBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  Range* InferRange(Zone* zone) override;
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    if (new_rep.IsSmi() &&
+        !(right()->IsInteger32Constant() &&
+          right()->GetInteger32Constant() >= 0)) {
+      new_rep = Representation::Integer32();
+    }
+    HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Shl)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HShl(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HBitwiseBinaryOperation(context, left, right, strength) {}
+};
+
+
+class HShr final : public HBitwiseBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  bool TryDecompose(DecompositionResult* decomposition) override {
+    if (right()->IsInteger32Constant()) {
+      if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
+        // This is intended to look for HAdd and HSub, to handle compounds
+        // like ((base + offset) >> scale) with one single decomposition.
+        left()->TryDecompose(decomposition);
+        return true;
+      }
+    }
+    return false;
+  }
+
+  Range* InferRange(Zone* zone) override;
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+    HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Shr)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HShr(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HBitwiseBinaryOperation(context, left, right, strength) {}
+};
+
+
+class HSar final : public HBitwiseBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK);
+
+  bool TryDecompose(DecompositionResult* decomposition) override {
+    if (right()->IsInteger32Constant()) {
+      if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
+        // This is intended to look for HAdd and HSub, to handle compounds
+        // like ((base + offset) >> scale) with one single decomposition.
+        left()->TryDecompose(decomposition);
+        return true;
+      }
+    }
+    return false;
+  }
+
+  Range* InferRange(Zone* zone) override;
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+    HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Sar)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HSar(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HBitwiseBinaryOperation(context, left, right, strength) {}
+};
+
+
+class HRor final : public HBitwiseBinaryOperation {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* left, HValue* right,
+                           Strength strength = Strength::WEAK) {
+    return new (zone) HRor(context, left, right, strength);
+  }
+
+  void UpdateRepresentation(Representation new_rep,
+                            HInferRepresentationPhase* h_infer,
+                            const char* reason) override {
+    if (new_rep.IsSmi()) new_rep = Representation::Integer32();
+    HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Ror)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HRor(HValue* context, HValue* left, HValue* right, Strength strength)
+      : HBitwiseBinaryOperation(context, left, right, strength) {
+    ChangeRepresentation(Representation::Integer32());
+  }
+};
+
+
+class HOsrEntry final : public HTemplateInstruction<0> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId);
+
+  BailoutId ast_id() const { return ast_id_; }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry)
+
+ private:
+  explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
+    SetChangesFlag(kOsrEntries);
+    SetChangesFlag(kNewSpacePromotion);
+  }
+
+  BailoutId ast_id_;
+};
+
+
+class HParameter final : public HTemplateInstruction<0> {
+ public:
+  enum ParameterKind {
+    STACK_PARAMETER,
+    REGISTER_PARAMETER
+  };
+
+  DECLARE_INSTRUCTION_FACTORY_P1(HParameter, unsigned);
+  DECLARE_INSTRUCTION_FACTORY_P2(HParameter, unsigned, ParameterKind);
+  DECLARE_INSTRUCTION_FACTORY_P3(HParameter, unsigned, ParameterKind,
+                                 Representation);
+
+  unsigned index() const { return index_; }
+  ParameterKind kind() const { return kind_; }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  Representation KnownOptimalRepresentation() override {
+    // If a parameter is an input to a phi, that phi should not
+    // choose any more optimistic representation than Tagged.
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Parameter)
+
+ private:
+  explicit HParameter(unsigned index,
+                      ParameterKind kind = STACK_PARAMETER)
+      : index_(index),
+        kind_(kind) {
+    set_representation(Representation::Tagged());
+  }
+
+  explicit HParameter(unsigned index,
+                      ParameterKind kind,
+                      Representation r)
+      : index_(index),
+        kind_(kind) {
+    set_representation(r);
+  }
+
+  unsigned index_;
+  ParameterKind kind_;
+};
+
+
+class HCallStub final : public HUnaryCall {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
+  CodeStub::Major major_key() { return major_key_; }
+
+  HValue* context() { return value(); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub)
+
+ private:
+  HCallStub(HValue* context, CodeStub::Major major_key, int argument_count)
+      : HUnaryCall(context, argument_count),
+        major_key_(major_key) {
+  }
+
+  CodeStub::Major major_key_;
+};
+
+
+class HUnknownOSRValue final : public HTemplateInstruction<0> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::None();
+  }
+
+  void set_incoming_value(HPhi* value) { incoming_value_ = value; }
+  HPhi* incoming_value() { return incoming_value_; }
+  HEnvironment *environment() { return environment_; }
+  int index() { return index_; }
+
+  Representation KnownOptimalRepresentation() override {
+    if (incoming_value_ == NULL) return Representation::None();
+    return incoming_value_->KnownOptimalRepresentation();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue)
+
+ private:
+  HUnknownOSRValue(HEnvironment* environment, int index)
+      : environment_(environment),
+        index_(index),
+        incoming_value_(NULL) {
+    set_representation(Representation::Tagged());
+  }
+
+  HEnvironment* environment_;
+  int index_;
+  HPhi* incoming_value_;
+};
+
+
+class HLoadGlobalGeneric final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
+                                              Handle<String>, TypeofMode);
+
+  HValue* context() { return OperandAt(0); }
+  HValue* global_object() { return OperandAt(1); }
+  Handle<String> name() const { return name_; }
+  TypeofMode typeof_mode() const { return typeof_mode_; }
+  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<TypeFeedbackVector> feedback_vector() const {
+    return feedback_vector_;
+  }
+  bool HasVectorAndSlot() const { return true; }
+  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+                        FeedbackVectorSlot slot) {
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
+
+ private:
+  HLoadGlobalGeneric(HValue* context, HValue* global_object,
+                     Handle<String> name, TypeofMode typeof_mode)
+      : name_(name), typeof_mode_(typeof_mode) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, global_object);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  Handle<String> name_;
+  TypeofMode typeof_mode_;
+  Handle<TypeFeedbackVector> feedback_vector_;
+  FeedbackVectorSlot slot_;
+};
+
+
+class HAllocate final : public HTemplateInstruction<2> {
+ public:
+  static bool CompatibleInstanceTypes(InstanceType type1,
+                                      InstanceType type2) {
+    return ComputeFlags(TENURED, type1) == ComputeFlags(TENURED, type2) &&
+        ComputeFlags(NOT_TENURED, type1) == ComputeFlags(NOT_TENURED, type2);
+  }
+
+  static HAllocate* New(
+      Isolate* isolate, Zone* zone, HValue* context, HValue* size, HType type,
+      PretenureFlag pretenure_flag, InstanceType instance_type,
+      Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null()) {
+    return new(zone) HAllocate(context, size, type, pretenure_flag,
+        instance_type, allocation_site);
+  }
+
+  // Maximum instance size for which allocations will be inlined.
+  static const int kMaxInlineSize = 64 * kPointerSize;
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* size() const { return OperandAt(1); }
+
+  bool has_size_upper_bound() { return size_upper_bound_ != NULL; }
+  HConstant* size_upper_bound() { return size_upper_bound_; }
+  void set_size_upper_bound(HConstant* value) {
+    DCHECK(size_upper_bound_ == NULL);
+    size_upper_bound_ = value;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    if (index == 0) {
+      return Representation::Tagged();
+    } else {
+      return Representation::Integer32();
+    }
+  }
+
+  Handle<Map> GetMonomorphicJSObjectMap() override {
+    return known_initial_map_;
+  }
+
+  void set_known_initial_map(Handle<Map> known_initial_map) {
+    known_initial_map_ = known_initial_map;
+  }
+
+  bool IsNewSpaceAllocation() const {
+    return (flags_ & ALLOCATE_IN_NEW_SPACE) != 0;
+  }
+
+  bool IsOldSpaceAllocation() const {
+    return (flags_ & ALLOCATE_IN_OLD_SPACE) != 0;
+  }
+
+  bool MustAllocateDoubleAligned() const {
+    return (flags_ & ALLOCATE_DOUBLE_ALIGNED) != 0;
+  }
+
+  bool MustPrefillWithFiller() const {
+    return (flags_ & PREFILL_WITH_FILLER) != 0;
+  }
+
+  void MakePrefillWithFiller() {
+    flags_ = static_cast<HAllocate::Flags>(flags_ | PREFILL_WITH_FILLER);
+  }
+
+  bool MustClearNextMapWord() const {
+    return (flags_ & CLEAR_NEXT_MAP_WORD) != 0;
+  }
+
+  void MakeDoubleAligned() {
+    flags_ = static_cast<HAllocate::Flags>(flags_ | ALLOCATE_DOUBLE_ALIGNED);
+  }
+
+  bool HandleSideEffectDominator(GVNFlag side_effect,
+                                 HValue* dominator) override;
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate)
+
+ private:
+  enum Flags {
+    ALLOCATE_IN_NEW_SPACE = 1 << 0,
+    ALLOCATE_IN_OLD_SPACE = 1 << 2,
+    ALLOCATE_DOUBLE_ALIGNED = 1 << 3,
+    PREFILL_WITH_FILLER = 1 << 4,
+    CLEAR_NEXT_MAP_WORD = 1 << 5
+  };
+
+  HAllocate(HValue* context,
+            HValue* size,
+            HType type,
+            PretenureFlag pretenure_flag,
+            InstanceType instance_type,
+            Handle<AllocationSite> allocation_site =
+                Handle<AllocationSite>::null())
+      : HTemplateInstruction<2>(type),
+        flags_(ComputeFlags(pretenure_flag, instance_type)),
+        dominating_allocate_(NULL),
+        filler_free_space_size_(NULL),
+        size_upper_bound_(NULL) {
+    SetOperandAt(0, context);
+    UpdateSize(size);
+    set_representation(Representation::Tagged());
+    SetFlag(kTrackSideEffectDominators);
+    SetChangesFlag(kNewSpacePromotion);
+    SetDependsOnFlag(kNewSpacePromotion);
+
+    if (FLAG_trace_pretenuring) {
+      PrintF("HAllocate with AllocationSite %p %s\n",
+             allocation_site.is_null()
+                 ? static_cast<void*>(NULL)
+                 : static_cast<void*>(*allocation_site),
+             pretenure_flag == TENURED ? "tenured" : "not tenured");
+    }
+  }
+
+  static Flags ComputeFlags(PretenureFlag pretenure_flag,
+                            InstanceType instance_type) {
+    Flags flags = pretenure_flag == TENURED ? ALLOCATE_IN_OLD_SPACE
+                                            : ALLOCATE_IN_NEW_SPACE;
+    if (instance_type == FIXED_DOUBLE_ARRAY_TYPE) {
+      flags = static_cast<Flags>(flags | ALLOCATE_DOUBLE_ALIGNED);
+    }
+    // We have to fill the allocated object with one word fillers if we do
+    // not use allocation folding since some allocations may depend on each
+    // other, i.e., have a pointer to each other. A GC in between these
+    // allocations may leave such objects behind in a not completely initialized
+    // state.
+    if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
+      flags = static_cast<Flags>(flags | PREFILL_WITH_FILLER);
+    }
+    if (pretenure_flag == NOT_TENURED &&
+        AllocationSite::CanTrack(instance_type)) {
+      flags = static_cast<Flags>(flags | CLEAR_NEXT_MAP_WORD);
+    }
+    return flags;
+  }
+
+  void UpdateClearNextMapWord(bool clear_next_map_word) {
+    flags_ = static_cast<Flags>(clear_next_map_word
+                                ? flags_ | CLEAR_NEXT_MAP_WORD
+                                : flags_ & ~CLEAR_NEXT_MAP_WORD);
+  }
+
+  void UpdateSize(HValue* size) {
+    SetOperandAt(1, size);
+    if (size->IsInteger32Constant()) {
+      size_upper_bound_ = HConstant::cast(size);
+    } else {
+      size_upper_bound_ = NULL;
+    }
+  }
+
+  HAllocate* GetFoldableDominator(HAllocate* dominator);
+
+  void UpdateFreeSpaceFiller(int32_t filler_size);
+
+  void CreateFreeSpaceFiller(int32_t filler_size);
+
+  bool IsFoldable(HAllocate* allocate) {
+    return (IsNewSpaceAllocation() && allocate->IsNewSpaceAllocation()) ||
+           (IsOldSpaceAllocation() && allocate->IsOldSpaceAllocation());
+  }
+
+  void ClearNextMapWord(int offset);
+
+  Flags flags_;
+  Handle<Map> known_initial_map_;
+  HAllocate* dominating_allocate_;
+  HStoreNamedField* filler_free_space_size_;
+  HConstant* size_upper_bound_;
+};
+
+
+class HStoreCodeEntry final : public HTemplateInstruction<2> {
+ public:
+  static HStoreCodeEntry* New(Isolate* isolate, Zone* zone, HValue* context,
+                              HValue* function, HValue* code) {
+    return new(zone) HStoreCodeEntry(function, code);
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HValue* function() { return OperandAt(0); }
+  HValue* code_object() { return OperandAt(1); }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry)
+
+ private:
+  HStoreCodeEntry(HValue* function, HValue* code) {
+    SetOperandAt(0, function);
+    SetOperandAt(1, code);
+  }
+};
+
+
+class HInnerAllocatedObject final : public HTemplateInstruction<2> {
+ public:
+  static HInnerAllocatedObject* New(Isolate* isolate, Zone* zone,
+                                    HValue* context, HValue* value,
+                                    HValue* offset, HType type) {
+    return new(zone) HInnerAllocatedObject(value, offset, type);
+  }
+
+  HValue* base_object() const { return OperandAt(0); }
+  HValue* offset() const { return OperandAt(1); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return index == 0 ? Representation::Tagged() : Representation::Integer32();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
+
+ private:
+  HInnerAllocatedObject(HValue* value,
+                        HValue* offset,
+                        HType type) : HTemplateInstruction<2>(type) {
+    DCHECK(value->IsAllocate());
+    DCHECK(type.IsHeapObject());
+    SetOperandAt(0, value);
+    SetOperandAt(1, offset);
+    set_representation(Representation::Tagged());
+  }
+};
+
+
+inline bool StoringValueNeedsWriteBarrier(HValue* value) {
+  return !value->type().IsSmi()
+      && !value->type().IsNull()
+      && !value->type().IsBoolean()
+      && !value->type().IsUndefined()
+      && !(value->IsConstant() && HConstant::cast(value)->ImmortalImmovable());
+}
+
+
+inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
+                                            HValue* value,
+                                            HValue* dominator) {
+  while (object->IsInnerAllocatedObject()) {
+    object = HInnerAllocatedObject::cast(object)->base_object();
+  }
+  if (object->IsConstant() &&
+      HConstant::cast(object)->HasExternalReferenceValue()) {
+    // Stores to external references require no write barriers
+    return false;
+  }
+  // We definitely need a write barrier unless the object is the allocation
+  // dominator.
+  if (object == dominator && object->IsAllocate()) {
+    // Stores to new space allocations require no write barriers.
+    if (HAllocate::cast(object)->IsNewSpaceAllocation()) {
+      return false;
+    }
+    // Stores to old space allocations require no write barriers if the value is
+    // a constant provably not in new space.
+    if (value->IsConstant() && HConstant::cast(value)->NotInNewSpace()) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+inline PointersToHereCheck PointersToHereCheckForObject(HValue* object,
+                                                        HValue* dominator) {
+  while (object->IsInnerAllocatedObject()) {
+    object = HInnerAllocatedObject::cast(object)->base_object();
+  }
+  if (object == dominator &&
+      object->IsAllocate() &&
+      HAllocate::cast(object)->IsNewSpaceAllocation()) {
+    return kPointersToHereAreAlwaysInteresting;
+  }
+  return kPointersToHereMaybeInteresting;
+}
+
+
+class HLoadContextSlot final : public HUnaryOperation {
+ public:
+  enum Mode {
+    // Perform a normal load of the context slot without checking its value.
+    kNoCheck,
+    // Load and check the value of the context slot. Deoptimize if it's the
+    // hole value. This is used for checking for loading of uninitialized
+    // harmony bindings where we deoptimize into full-codegen generated code
+    // which will subsequently throw a reference error.
+    kCheckDeoptimize,
+    // Load and check the value of the context slot. Return undefined if it's
+    // the hole value. This is used for non-harmony const assignments
+    kCheckReturnUndefined
+  };
+
+  HLoadContextSlot(HValue* context, int slot_index, Mode mode)
+      : HUnaryOperation(context), slot_index_(slot_index), mode_(mode) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kContextSlots);
+  }
+
+  int slot_index() const { return slot_index_; }
+  Mode mode() const { return mode_; }
+
+  bool DeoptimizesOnHole() {
+    return mode_ == kCheckDeoptimize;
+  }
+
+  bool RequiresHoleCheck() const {
+    return mode_ != kNoCheck;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    HLoadContextSlot* b = HLoadContextSlot::cast(other);
+    return (slot_index() == b->slot_index());
+  }
+
+ private:
+  bool IsDeletable() const override { return !RequiresHoleCheck(); }
+
+  int slot_index_;
+  Mode mode_;
+};
+
+
+class HStoreContextSlot final : public HTemplateInstruction<2> {
+ public:
+  enum Mode {
+    // Perform a normal store to the context slot without checking its previous
+    // value.
+    kNoCheck,
+    // Check the previous value of the context slot and deoptimize if it's the
+    // hole value. This is used for checking for assignments to uninitialized
+    // harmony bindings where we deoptimize into full-codegen generated code
+    // which will subsequently throw a reference error.
+    kCheckDeoptimize,
+    // Check the previous value and ignore assignment if it isn't a hole value
+    kCheckIgnoreAssignment
+  };
+
+  DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int,
+                                 Mode, HValue*);
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+  int slot_index() const { return slot_index_; }
+  Mode mode() const { return mode_; }
+
+  bool NeedsWriteBarrier() {
+    return StoringValueNeedsWriteBarrier(value());
+  }
+
+  bool DeoptimizesOnHole() {
+    return mode_ == kCheckDeoptimize;
+  }
+
+  bool RequiresHoleCheck() {
+    return mode_ != kNoCheck;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
+
+ private:
+  HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
+      : slot_index_(slot_index), mode_(mode) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
+    SetChangesFlag(kContextSlots);
+  }
+
+  int slot_index_;
+  Mode mode_;
+};
+
+
+// Represents an access to a portion of an object, such as the map pointer,
+// array elements pointer, etc, but not accesses to array elements themselves.
+class HObjectAccess final {
+ public:
+  inline bool IsInobject() const {
+    return portion() != kBackingStore && portion() != kExternalMemory;
+  }
+
+  inline bool IsExternalMemory() const {
+    return portion() == kExternalMemory;
+  }
+
+  inline bool IsStringLength() const {
+    return portion() == kStringLengths;
+  }
+
+  inline bool IsMap() const {
+    return portion() == kMaps;
+  }
+
+  inline int offset() const {
+    return OffsetField::decode(value_);
+  }
+
+  inline Representation representation() const {
+    return Representation::FromKind(RepresentationField::decode(value_));
+  }
+
+  inline Handle<Name> name() const { return name_; }
+
+  inline bool immutable() const {
+    return ImmutableField::decode(value_);
+  }
+
+  // Returns true if access is being made to an in-object property that
+  // was already added to the object.
+  inline bool existing_inobject_property() const {
+    return ExistingInobjectPropertyField::decode(value_);
+  }
+
+  inline HObjectAccess WithRepresentation(Representation representation) {
+    return HObjectAccess(portion(), offset(), representation, name(),
+                         immutable(), existing_inobject_property());
+  }
+
+  static HObjectAccess ForHeapNumberValue() {
+    return HObjectAccess(
+        kDouble, HeapNumber::kValueOffset, Representation::Double());
+  }
+
+  static HObjectAccess ForHeapNumberValueLowestBits() {
+    return HObjectAccess(kDouble,
+                         HeapNumber::kValueOffset,
+                         Representation::Integer32());
+  }
+
+  static HObjectAccess ForHeapNumberValueHighestBits() {
+    return HObjectAccess(kDouble,
+                         HeapNumber::kValueOffset + kIntSize,
+                         Representation::Integer32());
+  }
+
+  static HObjectAccess ForOddballToNumber(
+      Representation representation = Representation::Tagged()) {
+    return HObjectAccess(kInobject, Oddball::kToNumberOffset, representation);
+  }
+
+  static HObjectAccess ForOddballTypeOf() {
+    return HObjectAccess(kInobject, Oddball::kTypeOfOffset,
+                         Representation::HeapObject());
+  }
+
+  static HObjectAccess ForElementsPointer() {
+    return HObjectAccess(kElementsPointer, JSObject::kElementsOffset);
+  }
+
+  static HObjectAccess ForLiteralsPointer() {
+    return HObjectAccess(kInobject, JSFunction::kLiteralsOffset);
+  }
+
+  static HObjectAccess ForNextFunctionLinkPointer() {
+    return HObjectAccess(kInobject, JSFunction::kNextFunctionLinkOffset);
+  }
+
+  static HObjectAccess ForArrayLength(ElementsKind elements_kind) {
+    return HObjectAccess(
+        kArrayLengths,
+        JSArray::kLengthOffset,
+        IsFastElementsKind(elements_kind)
+            ? Representation::Smi() : Representation::Tagged());
+  }
+
+  static HObjectAccess ForAllocationSiteOffset(int offset);
+
+  static HObjectAccess ForAllocationSiteList() {
+    return HObjectAccess(kExternalMemory, 0, Representation::Tagged(),
+                         Handle<Name>::null(), false, false);
+  }
+
+  static HObjectAccess ForFixedArrayLength() {
+    return HObjectAccess(
+        kArrayLengths,
+        FixedArray::kLengthOffset,
+        Representation::Smi());
+  }
+
+  static HObjectAccess ForFixedTypedArrayBaseBasePointer() {
+    return HObjectAccess(kInobject, FixedTypedArrayBase::kBasePointerOffset,
+                         Representation::Tagged());
+  }
+
+  static HObjectAccess ForFixedTypedArrayBaseExternalPointer() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        FixedTypedArrayBase::kExternalPointerOffset,
+        Representation::External());
+  }
+
+  static HObjectAccess ForStringHashField() {
+    return HObjectAccess(kInobject,
+                         String::kHashFieldOffset,
+                         Representation::Integer32());
+  }
+
+  static HObjectAccess ForStringLength() {
+    STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+    return HObjectAccess(
+        kStringLengths,
+        String::kLengthOffset,
+        Representation::Smi());
+  }
+
+  static HObjectAccess ForConsStringFirst() {
+    return HObjectAccess(kInobject, ConsString::kFirstOffset);
+  }
+
+  static HObjectAccess ForConsStringSecond() {
+    return HObjectAccess(kInobject, ConsString::kSecondOffset);
+  }
+
+  static HObjectAccess ForPropertiesPointer() {
+    return HObjectAccess(kInobject, JSObject::kPropertiesOffset);
+  }
+
+  static HObjectAccess ForPrototypeOrInitialMap() {
+    return HObjectAccess(kInobject, JSFunction::kPrototypeOrInitialMapOffset);
+  }
+
+  static HObjectAccess ForSharedFunctionInfoPointer() {
+    return HObjectAccess(kInobject, JSFunction::kSharedFunctionInfoOffset);
+  }
+
+  static HObjectAccess ForCodeEntryPointer() {
+    return HObjectAccess(kInobject, JSFunction::kCodeEntryOffset);
+  }
+
+  static HObjectAccess ForCodeOffset() {
+    return HObjectAccess(kInobject, SharedFunctionInfo::kCodeOffset);
+  }
+
+  static HObjectAccess ForOptimizedCodeMap() {
+    return HObjectAccess(kInobject,
+                         SharedFunctionInfo::kOptimizedCodeMapOffset);
+  }
+
+  static HObjectAccess ForOptimizedCodeMapSharedCode() {
+    return HObjectAccess(kInobject, FixedArray::OffsetOfElementAt(
+                                        SharedFunctionInfo::kSharedCodeIndex));
+  }
+
+  static HObjectAccess ForFunctionContextPointer() {
+    return HObjectAccess(kInobject, JSFunction::kContextOffset);
+  }
+
+  static HObjectAccess ForMap() {
+    return HObjectAccess(kMaps, JSObject::kMapOffset);
+  }
+
+  static HObjectAccess ForPrototype() {
+    return HObjectAccess(kMaps, Map::kPrototypeOffset);
+  }
+
+  static HObjectAccess ForMapAsInteger32() {
+    return HObjectAccess(kMaps, JSObject::kMapOffset,
+                         Representation::Integer32());
+  }
+
+  static HObjectAccess ForMapInObjectPropertiesOrConstructorFunctionIndex() {
+    return HObjectAccess(
+        kInobject, Map::kInObjectPropertiesOrConstructorFunctionIndexOffset,
+        Representation::UInteger8());
+  }
+
+  static HObjectAccess ForMapInstanceType() {
+    return HObjectAccess(kInobject,
+                         Map::kInstanceTypeOffset,
+                         Representation::UInteger8());
+  }
+
+  static HObjectAccess ForMapInstanceSize() {
+    return HObjectAccess(kInobject,
+                         Map::kInstanceSizeOffset,
+                         Representation::UInteger8());
+  }
+
+  static HObjectAccess ForMapBitField() {
+    return HObjectAccess(kInobject,
+                         Map::kBitFieldOffset,
+                         Representation::UInteger8());
+  }
+
+  static HObjectAccess ForMapBitField2() {
+    return HObjectAccess(kInobject,
+                         Map::kBitField2Offset,
+                         Representation::UInteger8());
+  }
+
+  static HObjectAccess ForNameHashField() {
+    return HObjectAccess(kInobject,
+                         Name::kHashFieldOffset,
+                         Representation::Integer32());
+  }
+
+  static HObjectAccess ForMapInstanceTypeAndBitField() {
+    STATIC_ASSERT((Map::kInstanceTypeAndBitFieldOffset & 1) == 0);
+    // Ensure the two fields share one 16-bit word, endian-independent.
+    STATIC_ASSERT((Map::kBitFieldOffset & ~1) ==
+                  (Map::kInstanceTypeOffset & ~1));
+    return HObjectAccess(kInobject,
+                         Map::kInstanceTypeAndBitFieldOffset,
+                         Representation::UInteger16());
+  }
+
+  static HObjectAccess ForPropertyCellValue() {
+    return HObjectAccess(kInobject, PropertyCell::kValueOffset);
+  }
+
+  static HObjectAccess ForPropertyCellDetails() {
+    return HObjectAccess(kInobject, PropertyCell::kDetailsOffset,
+                         Representation::Smi());
+  }
+
+  static HObjectAccess ForCellValue() {
+    return HObjectAccess(kInobject, Cell::kValueOffset);
+  }
+
+  static HObjectAccess ForWeakCellValue() {
+    return HObjectAccess(kInobject, WeakCell::kValueOffset);
+  }
+
+  static HObjectAccess ForWeakCellNext() {
+    return HObjectAccess(kInobject, WeakCell::kNextOffset);
+  }
+
+  static HObjectAccess ForAllocationMementoSite() {
+    return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset);
+  }
+
+  static HObjectAccess ForCounter() {
+    return HObjectAccess(kExternalMemory, 0, Representation::Integer32(),
+                         Handle<Name>::null(), false, false);
+  }
+
+  static HObjectAccess ForExternalUInteger8() {
+    return HObjectAccess(kExternalMemory, 0, Representation::UInteger8(),
+                         Handle<Name>::null(), false, false);
+  }
+
+  // Create an access to an offset in a fixed array header.
+  static HObjectAccess ForFixedArrayHeader(int offset);
+
+  // Create an access to an in-object property in a JSObject.
+  // This kind of access must be used when the object |map| is known and
+  // in-object properties are being accessed. Accesses of the in-object
+  // properties can have different semantics depending on whether corresponding
+  // property was added to the map or not.
+  static HObjectAccess ForMapAndOffset(Handle<Map> map, int offset,
+      Representation representation = Representation::Tagged());
+
+  // Create an access to an in-object property in a JSObject.
+  // This kind of access can be used for accessing object header fields or
+  // in-object properties if the map of the object is not known.
+  static HObjectAccess ForObservableJSObjectOffset(int offset,
+      Representation representation = Representation::Tagged()) {
+    return ForMapAndOffset(Handle<Map>::null(), offset, representation);
+  }
+
+  // Create an access to an in-object property in a JSArray.
+  static HObjectAccess ForJSArrayOffset(int offset);
+
+  static HObjectAccess ForContextSlot(int index);
+
+  static HObjectAccess ForScriptContext(int index);
+
+  // Create an access to the backing store of an object.
+  static HObjectAccess ForBackingStoreOffset(int offset,
+      Representation representation = Representation::Tagged());
+
+  // Create an access to a resolved field (in-object or backing store).
+  static HObjectAccess ForField(Handle<Map> map, int index,
+                                Representation representation,
+                                Handle<Name> name);
+
+  static HObjectAccess ForJSTypedArrayLength() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSTypedArray::kLengthOffset);
+  }
+
+  static HObjectAccess ForJSArrayBufferBackingStore() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSArrayBuffer::kBackingStoreOffset, Representation::External());
+  }
+
+  static HObjectAccess ForJSArrayBufferByteLength() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSArrayBuffer::kByteLengthOffset, Representation::Tagged());
+  }
+
+  static HObjectAccess ForJSArrayBufferBitField() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSArrayBuffer::kBitFieldOffset, Representation::Integer32());
+  }
+
+  static HObjectAccess ForJSArrayBufferBitFieldSlot() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSArrayBuffer::kBitFieldSlot, Representation::Smi());
+  }
+
+  static HObjectAccess ForJSArrayBufferViewBuffer() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSArrayBufferView::kBufferOffset);
+  }
+
+  static HObjectAccess ForJSArrayBufferViewByteOffset() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSArrayBufferView::kByteOffsetOffset);
+  }
+
+  static HObjectAccess ForJSArrayBufferViewByteLength() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSArrayBufferView::kByteLengthOffset);
+  }
+
+  static HObjectAccess ForJSGlobalObjectNativeContext() {
+    return HObjectAccess(kInobject, JSGlobalObject::kNativeContextOffset);
+  }
+
+  static HObjectAccess ForJSRegExpFlags() {
+    return HObjectAccess(kInobject, JSRegExp::kFlagsOffset);
+  }
+
+  static HObjectAccess ForJSRegExpSource() {
+    return HObjectAccess(kInobject, JSRegExp::kSourceOffset);
+  }
+
+  static HObjectAccess ForJSCollectionTable() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSCollection::kTableOffset);
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNumberOfBuckets() {
+    return HObjectAccess(kInobject, CollectionType::kNumberOfBucketsOffset,
+                         Representation::Smi());
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNumberOfElements() {
+    return HObjectAccess(kInobject, CollectionType::kNumberOfElementsOffset,
+                         Representation::Smi());
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNumberOfDeletedElements() {
+    return HObjectAccess(kInobject,
+                         CollectionType::kNumberOfDeletedElementsOffset,
+                         Representation::Smi());
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNextTable() {
+    return HObjectAccess(kInobject, CollectionType::kNextTableOffset);
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableBucket(int bucket) {
+    return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset +
+                                        (bucket * kPointerSize),
+                         Representation::Smi());
+  }
+
+  // Access into the data table of an OrderedHashTable with a
+  // known-at-compile-time bucket count.
+  template <typename CollectionType, int kBucketCount>
+  static HObjectAccess ForOrderedHashTableDataTableIndex(int index) {
+    return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset +
+                                        (kBucketCount * kPointerSize) +
+                                        (index * kPointerSize));
+  }
+
+  inline bool Equals(HObjectAccess that) const {
+    return value_ == that.value_;  // portion and offset must match
+  }
+
+ protected:
+  void SetGVNFlags(HValue *instr, PropertyAccessType access_type);
+
+ private:
+  // internal use only; different parts of an object or array
+  enum Portion {
+    kMaps,             // map of an object
+    kArrayLengths,     // the length of an array
+    kStringLengths,    // the length of a string
+    kElementsPointer,  // elements pointer
+    kBackingStore,     // some field in the backing store
+    kDouble,           // some double field
+    kInobject,         // some other in-object field
+    kExternalMemory    // some field in external memory
+  };
+
+  HObjectAccess() : value_(0) {}
+
+  HObjectAccess(Portion portion, int offset,
+                Representation representation = Representation::Tagged(),
+                Handle<Name> name = Handle<Name>::null(),
+                bool immutable = false, bool existing_inobject_property = true)
+      : value_(PortionField::encode(portion) |
+               RepresentationField::encode(representation.kind()) |
+               ImmutableField::encode(immutable ? 1 : 0) |
+               ExistingInobjectPropertyField::encode(
+                   existing_inobject_property ? 1 : 0) |
+               OffsetField::encode(offset)),
+        name_(name) {
+    // assert that the fields decode correctly
+    DCHECK(this->offset() == offset);
+    DCHECK(this->portion() == portion);
+    DCHECK(this->immutable() == immutable);
+    DCHECK(this->existing_inobject_property() == existing_inobject_property);
+    DCHECK(RepresentationField::decode(value_) == representation.kind());
+    DCHECK(!this->existing_inobject_property() || IsInobject());
+  }
+
+  class PortionField : public BitField<Portion, 0, 3> {};
+  class RepresentationField : public BitField<Representation::Kind, 3, 4> {};
+  class ImmutableField : public BitField<bool, 7, 1> {};
+  class ExistingInobjectPropertyField : public BitField<bool, 8, 1> {};
+  class OffsetField : public BitField<int, 9, 23> {};
+
+  uint32_t value_;  // encodes portion, representation, immutable, and offset
+  Handle<Name> name_;
+
+  friend class HLoadNamedField;
+  friend class HStoreNamedField;
+  friend class SideEffectsTracker;
+  friend std::ostream& operator<<(std::ostream& os,
+                                  const HObjectAccess& access);
+
+  inline Portion portion() const {
+    return PortionField::decode(value_);
+  }
+};
+
+
+std::ostream& operator<<(std::ostream& os, const HObjectAccess& access);
+
+
+class HLoadNamedField final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*,
+                                 HValue*, HObjectAccess);
+  DECLARE_INSTRUCTION_FACTORY_P5(HLoadNamedField, HValue*, HValue*,
+                                 HObjectAccess, const UniqueSet<Map>*, HType);
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* dependency() const {
+    DCHECK(HasDependency());
+    return OperandAt(1);
+  }
+  bool HasDependency() const { return OperandAt(0) != OperandAt(1); }
+  HObjectAccess access() const { return access_; }
+  Representation field_representation() const {
+      return access_.representation();
+  }
+
+  const UniqueSet<Map>* maps() const { return maps_; }
+
+  bool HasEscapingOperandAt(int index) override { return false; }
+  bool HasOutOfBoundsAccess(int size) override {
+    return !access().IsInobject() || access().offset() >= size;
+  }
+  Representation RequiredInputRepresentation(int index) override {
+    if (index == 0) {
+      // object must be external in case of external memory access
+      return access().IsExternalMemory() ? Representation::External()
+                                         : Representation::Tagged();
+    }
+    DCHECK(index == 1);
+    return Representation::None();
+  }
+  Range* InferRange(Zone* zone) override;
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  bool CanBeReplacedWith(HValue* other) const {
+    if (!CheckFlag(HValue::kCantBeReplaced)) return false;
+    if (!type().Equals(other->type())) return false;
+    if (!representation().Equals(other->representation())) return false;
+    if (!other->IsLoadNamedField()) return true;
+    HLoadNamedField* that = HLoadNamedField::cast(other);
+    if (this->maps_ == that->maps_) return true;
+    if (this->maps_ == NULL || that->maps_ == NULL) return false;
+    return this->maps_->IsSubset(that->maps_);
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    HLoadNamedField* that = HLoadNamedField::cast(other);
+    if (!this->access_.Equals(that->access_)) return false;
+    if (this->maps_ == that->maps_) return true;
+    return (this->maps_ != NULL &&
+            that->maps_ != NULL &&
+            this->maps_->Equals(that->maps_));
+  }
+
+ private:
+  HLoadNamedField(HValue* object,
+                  HValue* dependency,
+                  HObjectAccess access)
+      : access_(access), maps_(NULL) {
+    DCHECK_NOT_NULL(object);
+    SetOperandAt(0, object);
+    SetOperandAt(1, dependency ? dependency : object);
+
+    Representation representation = access.representation();
+    if (representation.IsInteger8() ||
+        representation.IsUInteger8() ||
+        representation.IsInteger16() ||
+        representation.IsUInteger16()) {
+      set_representation(Representation::Integer32());
+    } else if (representation.IsSmi()) {
+      set_type(HType::Smi());
+      if (SmiValuesAre32Bits()) {
+        set_representation(Representation::Integer32());
+      } else {
+        set_representation(representation);
+      }
+    } else if (representation.IsDouble() ||
+               representation.IsExternal() ||
+               representation.IsInteger32()) {
+      set_representation(representation);
+    } else if (representation.IsHeapObject()) {
+      set_type(HType::HeapObject());
+      set_representation(Representation::Tagged());
+    } else {
+      set_representation(Representation::Tagged());
+    }
+    access.SetGVNFlags(this, LOAD);
+  }
+
+  HLoadNamedField(HValue* object,
+                  HValue* dependency,
+                  HObjectAccess access,
+                  const UniqueSet<Map>* maps,
+                  HType type)
+      : HTemplateInstruction<2>(type), access_(access), maps_(maps) {
+    DCHECK_NOT_NULL(maps);
+    DCHECK_NE(0, maps->size());
+
+    DCHECK_NOT_NULL(object);
+    SetOperandAt(0, object);
+    SetOperandAt(1, dependency ? dependency : object);
+
+    DCHECK(access.representation().IsHeapObject());
+    DCHECK(type.IsHeapObject());
+    set_representation(Representation::Tagged());
+
+    access.SetGVNFlags(this, LOAD);
+  }
+
+  bool IsDeletable() const override { return true; }
+
+  HObjectAccess access_;
+  const UniqueSet<Map>* maps_;
+};
+
+
+class HLoadNamedGeneric final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadNamedGeneric, HValue*,
+                                              Handle<Name>, LanguageMode,
+                                              InlineCacheState);
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* object() const { return OperandAt(1); }
+  Handle<Name> name() const { return name_; }
+
+  InlineCacheState initialization_state() const {
+    return initialization_state_;
+  }
+  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<TypeFeedbackVector> feedback_vector() const {
+    return feedback_vector_;
+  }
+  bool HasVectorAndSlot() const { return true; }
+  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+                        FeedbackVectorSlot slot) {
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
+
+  LanguageMode language_mode() const { return language_mode_; }
+
+ private:
+  HLoadNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
+                    LanguageMode language_mode,
+                    InlineCacheState initialization_state)
+      : name_(name),
+        language_mode_(language_mode),
+        initialization_state_(initialization_state) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, object);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+
+  Handle<Name> name_;
+  Handle<TypeFeedbackVector> feedback_vector_;
+  FeedbackVectorSlot slot_;
+  LanguageMode language_mode_;
+  InlineCacheState initialization_state_;
+};
+
+
+class HLoadFunctionPrototype final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
+
+  HValue* function() { return OperandAt(0); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HLoadFunctionPrototype(HValue* function)
+      : HUnaryOperation(function) {
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kCalls);
+  }
+};
+
+class ArrayInstructionInterface {
+ public:
+  virtual HValue* GetKey() = 0;
+  virtual void SetKey(HValue* key) = 0;
+  virtual ElementsKind elements_kind() const = 0;
+  // TryIncreaseBaseOffset returns false if overflow would result.
+  virtual bool TryIncreaseBaseOffset(uint32_t increase_by_value) = 0;
+  virtual bool IsDehoisted() const = 0;
+  virtual void SetDehoisted(bool is_dehoisted) = 0;
+  virtual ~ArrayInstructionInterface() { }
+
+  static Representation KeyedAccessIndexRequirement(Representation r) {
+    return r.IsInteger32() || SmiValuesAre32Bits()
+        ? Representation::Integer32() : Representation::Smi();
+  }
+};
+
+
+static const int kDefaultKeyedHeaderOffsetSentinel = -1;
+
+enum LoadKeyedHoleMode {
+  NEVER_RETURN_HOLE,
+  ALLOW_RETURN_HOLE,
+  CONVERT_HOLE_TO_UNDEFINED
+};
+
+
+class HLoadKeyed final : public HTemplateInstruction<4>,
+                         public ArrayInstructionInterface {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P5(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
+                                 ElementsKind);
+  DECLARE_INSTRUCTION_FACTORY_P6(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
+                                 ElementsKind, LoadKeyedHoleMode);
+  DECLARE_INSTRUCTION_FACTORY_P7(HLoadKeyed, HValue*, HValue*, HValue*, HValue*,
+                                 ElementsKind, LoadKeyedHoleMode, int);
+
+  bool is_fixed_typed_array() const {
+    return IsFixedTypedArrayElementsKind(elements_kind());
+  }
+  HValue* elements() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* dependency() const {
+    DCHECK(HasDependency());
+    return OperandAt(2);
+  }
+  bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
+  HValue* backing_store_owner() const {
+    DCHECK(HasBackingStoreOwner());
+    return OperandAt(3);
+  }
+  bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
+  uint32_t base_offset() const { return BaseOffsetField::decode(bit_field_); }
+  bool TryIncreaseBaseOffset(uint32_t increase_by_value) override;
+  HValue* GetKey() override { return key(); }
+  void SetKey(HValue* key) override { SetOperandAt(1, key); }
+  bool IsDehoisted() const override {
+    return IsDehoistedField::decode(bit_field_);
+  }
+  void SetDehoisted(bool is_dehoisted) override {
+    bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
+  }
+  ElementsKind elements_kind() const override {
+    return ElementsKindField::decode(bit_field_);
+  }
+  LoadKeyedHoleMode hole_mode() const {
+    return HoleModeField::decode(bit_field_);
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    // kind_fast:                 tagged[int32] (none)
+    // kind_double:               tagged[int32] (none)
+    // kind_fixed_typed_array:    external[int32] (none)
+    // kind_external:             external[int32] (none)
+    if (index == 0) {
+      return is_fixed_typed_array() ? Representation::External()
+                                    : Representation::Tagged();
+    }
+    if (index == 1) {
+      return ArrayInstructionInterface::KeyedAccessIndexRequirement(
+          OperandAt(1)->representation());
+    }
+    if (index == 2) {
+      return Representation::None();
+    }
+    DCHECK_EQ(3, index);
+    return HasBackingStoreOwner() ? Representation::Tagged()
+                                  : Representation::None();
+  }
+
+  Representation observed_input_representation(int index) override {
+    return RequiredInputRepresentation(index);
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  bool UsesMustHandleHole() const;
+  bool AllUsesCanTreatHoleAsNaN() const;
+  bool RequiresHoleCheck() const;
+
+  Range* InferRange(Zone* zone) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    if (!other->IsLoadKeyed()) return false;
+    HLoadKeyed* other_load = HLoadKeyed::cast(other);
+
+    if (base_offset() != other_load->base_offset()) return false;
+    return elements_kind() == other_load->elements_kind();
+  }
+
+ private:
+  HLoadKeyed(HValue* obj, HValue* key, HValue* dependency,
+             HValue* backing_store_owner, ElementsKind elements_kind,
+             LoadKeyedHoleMode mode = NEVER_RETURN_HOLE,
+             int offset = kDefaultKeyedHeaderOffsetSentinel)
+      : bit_field_(0) {
+    offset = offset == kDefaultKeyedHeaderOffsetSentinel
+        ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+        : offset;
+    bit_field_ = ElementsKindField::encode(elements_kind) |
+        HoleModeField::encode(mode) |
+        BaseOffsetField::encode(offset);
+
+    SetOperandAt(0, obj);
+    SetOperandAt(1, key);
+    SetOperandAt(2, dependency != nullptr ? dependency : obj);
+    SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
+    DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
+
+    if (!is_fixed_typed_array()) {
+      // I can detect the case between storing double (holey and fast) and
+      // smi/object by looking at elements_kind_.
+      DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
+             IsFastDoubleElementsKind(elements_kind));
+
+      if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+        if (IsFastSmiElementsKind(elements_kind) &&
+            (!IsHoleyElementsKind(elements_kind) ||
+             mode == NEVER_RETURN_HOLE)) {
+          set_type(HType::Smi());
+          if (SmiValuesAre32Bits() && !RequiresHoleCheck()) {
+            set_representation(Representation::Integer32());
+          } else {
+            set_representation(Representation::Smi());
+          }
+        } else {
+          set_representation(Representation::Tagged());
+        }
+
+        SetDependsOnFlag(kArrayElements);
+      } else {
+        set_representation(Representation::Double());
+        SetDependsOnFlag(kDoubleArrayElements);
+      }
+    } else {
+      if (elements_kind == FLOAT32_ELEMENTS ||
+          elements_kind == FLOAT64_ELEMENTS) {
+        set_representation(Representation::Double());
+      } else {
+        set_representation(Representation::Integer32());
+      }
+
+      if (is_fixed_typed_array()) {
+        SetDependsOnFlag(kExternalMemory);
+        SetDependsOnFlag(kTypedArrayElements);
+      } else {
+        UNREACHABLE();
+      }
+      // Native code could change the specialized array.
+      SetDependsOnFlag(kCalls);
+    }
+
+    SetFlag(kUseGVN);
+  }
+
+  bool IsDeletable() const override { return !RequiresHoleCheck(); }
+
+  // Establish some checks around our packed fields
+  enum LoadKeyedBits {
+    kBitsForElementsKind = 5,
+    kBitsForHoleMode = 2,
+    kBitsForBaseOffset = 24,
+    kBitsForIsDehoisted = 1,
+
+    kStartElementsKind = 0,
+    kStartHoleMode = kStartElementsKind + kBitsForElementsKind,
+    kStartBaseOffset = kStartHoleMode + kBitsForHoleMode,
+    kStartIsDehoisted = kStartBaseOffset + kBitsForBaseOffset
+  };
+
+  STATIC_ASSERT((kBitsForElementsKind + kBitsForHoleMode + kBitsForBaseOffset +
+                 kBitsForIsDehoisted) <= sizeof(uint32_t) * 8);
+  STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
+  class ElementsKindField:
+    public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
+    {};  // NOLINT
+  class HoleModeField:
+    public BitField<LoadKeyedHoleMode, kStartHoleMode, kBitsForHoleMode>
+    {};  // NOLINT
+  class BaseOffsetField:
+    public BitField<uint32_t, kStartBaseOffset, kBitsForBaseOffset>
+    {};  // NOLINT
+  class IsDehoistedField:
+    public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
+    {};  // NOLINT
+  uint32_t bit_field_;
+};
+
+
+class HLoadKeyedGeneric final : public HTemplateInstruction<3> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HLoadKeyedGeneric, HValue*,
+                                              HValue*, LanguageMode,
+                                              InlineCacheState);
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* context() const { return OperandAt(2); }
+  InlineCacheState initialization_state() const {
+    return initialization_state_;
+  }
+  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<TypeFeedbackVector> feedback_vector() const {
+    return feedback_vector_;
+  }
+  bool HasVectorAndSlot() const {
+    DCHECK(initialization_state_ == MEGAMORPHIC || !feedback_vector_.is_null());
+    return !feedback_vector_.is_null();
+  }
+  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+                        FeedbackVectorSlot slot) {
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    // tagged[tagged]
+    return Representation::Tagged();
+  }
+
+  HValue* Canonicalize() override;
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
+
+  LanguageMode language_mode() const { return language_mode_; }
+
+ private:
+  HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key,
+                    LanguageMode language_mode,
+                    InlineCacheState initialization_state)
+      : initialization_state_(initialization_state),
+        language_mode_(language_mode) {
+    set_representation(Representation::Tagged());
+    SetOperandAt(0, obj);
+    SetOperandAt(1, key);
+    SetOperandAt(2, context);
+    SetAllSideEffects();
+  }
+
+  Handle<TypeFeedbackVector> feedback_vector_;
+  FeedbackVectorSlot slot_;
+  InlineCacheState initialization_state_;
+  LanguageMode language_mode_;
+};
+
+
+// Indicates whether the store is a store to an entry that was previously
+// initialized or not.
+enum StoreFieldOrKeyedMode {
+  // The entry could be either previously initialized or not.
+  INITIALIZING_STORE,
+  // At the time of this store it is guaranteed that the entry is already
+  // initialized.
+  STORE_TO_INITIALIZED_ENTRY
+};
+
+
+class HStoreNamedField final : public HTemplateInstruction<3> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
+                                 HObjectAccess, HValue*);
+  DECLARE_INSTRUCTION_FACTORY_P4(HStoreNamedField, HValue*,
+                                 HObjectAccess, HValue*, StoreFieldOrKeyedMode);
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
+
+  bool HasEscapingOperandAt(int index) override { return index == 1; }
+  bool HasOutOfBoundsAccess(int size) override {
+    return !access().IsInobject() || access().offset() >= size;
+  }
+  Representation RequiredInputRepresentation(int index) override {
+    if (index == 0 && access().IsExternalMemory()) {
+      // object must be external in case of external memory access
+      return Representation::External();
+    } else if (index == 1) {
+      if (field_representation().IsInteger8() ||
+          field_representation().IsUInteger8() ||
+          field_representation().IsInteger16() ||
+          field_representation().IsUInteger16() ||
+          field_representation().IsInteger32()) {
+        return Representation::Integer32();
+      } else if (field_representation().IsDouble()) {
+        return field_representation();
+      } else if (field_representation().IsSmi()) {
+        if (SmiValuesAre32Bits() &&
+            store_mode() == STORE_TO_INITIALIZED_ENTRY) {
+          return Representation::Integer32();
+        }
+        return field_representation();
+      } else if (field_representation().IsExternal()) {
+        return Representation::External();
+      }
+    }
+    return Representation::Tagged();
+  }
+  bool HandleSideEffectDominator(GVNFlag side_effect,
+                                 HValue* dominator) override {
+    DCHECK(side_effect == kNewSpacePromotion);
+    if (!FLAG_use_write_barrier_elimination) return false;
+    dominator_ = dominator;
+    return false;
+  }
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+  HValue* transition() const { return OperandAt(2); }
+
+  HObjectAccess access() const { return access_; }
+  HValue* dominator() const { return dominator_; }
+  bool has_transition() const { return HasTransitionField::decode(bit_field_); }
+  StoreFieldOrKeyedMode store_mode() const {
+    return StoreModeField::decode(bit_field_);
+  }
+
+  Handle<Map> transition_map() const {
+    if (has_transition()) {
+      return Handle<Map>::cast(
+          HConstant::cast(transition())->handle(isolate()));
+    } else {
+      return Handle<Map>();
+    }
+  }
+
+  void SetTransition(HConstant* transition) {
+    DCHECK(!has_transition());  // Only set once.
+    SetOperandAt(2, transition);
+    bit_field_ = HasTransitionField::update(bit_field_, true);
+    SetChangesFlag(kMaps);
+  }
+
+  bool NeedsWriteBarrier() const {
+    DCHECK(!field_representation().IsDouble() ||
+           (FLAG_unbox_double_fields && access_.IsInobject()) ||
+           !has_transition());
+    if (field_representation().IsDouble()) return false;
+    if (field_representation().IsSmi()) return false;
+    if (field_representation().IsInteger32()) return false;
+    if (field_representation().IsExternal()) return false;
+    return StoringValueNeedsWriteBarrier(value()) &&
+        ReceiverObjectNeedsWriteBarrier(object(), value(), dominator());
+  }
+
+  bool NeedsWriteBarrierForMap() {
+    return ReceiverObjectNeedsWriteBarrier(object(), transition(),
+                                           dominator());
+  }
+
+  SmiCheck SmiCheckForWriteBarrier() const {
+    if (field_representation().IsHeapObject()) return OMIT_SMI_CHECK;
+    if (value()->type().IsHeapObject()) return OMIT_SMI_CHECK;
+    return INLINE_SMI_CHECK;
+  }
+
+  PointersToHereCheck PointersToHereCheckForValue() const {
+    return PointersToHereCheckForObject(value(), dominator());
+  }
+
+  Representation field_representation() const {
+    return access_.representation();
+  }
+
+  void UpdateValue(HValue* value) {
+    SetOperandAt(1, value);
+  }
+
+  bool CanBeReplacedWith(HStoreNamedField* that) const {
+    if (!this->access().Equals(that->access())) return false;
+    if (SmiValuesAre32Bits() &&
+        this->field_representation().IsSmi() &&
+        this->store_mode() == INITIALIZING_STORE &&
+        that->store_mode() == STORE_TO_INITIALIZED_ENTRY) {
+      // We cannot replace an initializing store to a smi field with a store to
+      // an initialized entry on 64-bit architectures (with 32-bit smis).
+      return false;
+    }
+    return true;
+  }
+
+ private:
+  HStoreNamedField(HValue* obj, HObjectAccess access, HValue* val,
+                   StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE)
+      : access_(access),
+        dominator_(NULL),
+        bit_field_(HasTransitionField::encode(false) |
+                   StoreModeField::encode(store_mode)) {
+    // Stores to a non existing in-object property are allowed only to the
+    // newly allocated objects (via HAllocate or HInnerAllocatedObject).
+    DCHECK(!access.IsInobject() || access.existing_inobject_property() ||
+           obj->IsAllocate() || obj->IsInnerAllocatedObject());
+    SetOperandAt(0, obj);
+    SetOperandAt(1, val);
+    SetOperandAt(2, obj);
+    access.SetGVNFlags(this, STORE);
+  }
+
+  class HasTransitionField : public BitField<bool, 0, 1> {};
+  class StoreModeField : public BitField<StoreFieldOrKeyedMode, 1, 1> {};
+
+  HObjectAccess access_;
+  HValue* dominator_;
+  uint32_t bit_field_;
+};
+
+
+class HStoreNamedGeneric final : public HTemplateInstruction<3> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreNamedGeneric, HValue*,
+                                              Handle<Name>, HValue*,
+                                              LanguageMode, InlineCacheState);
+  HValue* object() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+  HValue* context() const { return OperandAt(2); }
+  Handle<Name> name() const { return name_; }
+  LanguageMode language_mode() const { return language_mode_; }
+  InlineCacheState initialization_state() const {
+    return initialization_state_;
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<TypeFeedbackVector> feedback_vector() const {
+    return feedback_vector_;
+  }
+  bool HasVectorAndSlot() const { return true; }
+  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+                        FeedbackVectorSlot slot) {
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric)
+
+ private:
+  HStoreNamedGeneric(HValue* context, HValue* object, Handle<Name> name,
+                     HValue* value, LanguageMode language_mode,
+                     InlineCacheState initialization_state)
+      : name_(name),
+        language_mode_(language_mode),
+        initialization_state_(initialization_state) {
+    SetOperandAt(0, object);
+    SetOperandAt(1, value);
+    SetOperandAt(2, context);
+    SetAllSideEffects();
+  }
+
+  Handle<Name> name_;
+  Handle<TypeFeedbackVector> feedback_vector_;
+  FeedbackVectorSlot slot_;
+  LanguageMode language_mode_;
+  InlineCacheState initialization_state_;
+};
+
+
+class HStoreKeyed final : public HTemplateInstruction<4>,
+                          public ArrayInstructionInterface {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P5(HStoreKeyed, HValue*, HValue*, HValue*,
+                                 HValue*, ElementsKind);
+  DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
+                                 HValue*, ElementsKind, StoreFieldOrKeyedMode);
+  DECLARE_INSTRUCTION_FACTORY_P7(HStoreKeyed, HValue*, HValue*, HValue*,
+                                 HValue*, ElementsKind, StoreFieldOrKeyedMode,
+                                 int);
+
+  Representation RequiredInputRepresentation(int index) override {
+    // kind_fast:               tagged[int32] = tagged
+    // kind_double:             tagged[int32] = double
+    // kind_smi   :             tagged[int32] = smi
+    // kind_fixed_typed_array:  tagged[int32] = (double | int32)
+    // kind_external:           external[int32] = (double | int32)
+    if (index == 0) {
+      return is_fixed_typed_array() ? Representation::External()
+                                    : Representation::Tagged();
+    } else if (index == 1) {
+      return ArrayInstructionInterface::KeyedAccessIndexRequirement(
+          OperandAt(1)->representation());
+    } else if (index == 2) {
+      return RequiredValueRepresentation(elements_kind(), store_mode());
+    }
+
+    DCHECK_EQ(3, index);
+    return HasBackingStoreOwner() ? Representation::Tagged()
+                                  : Representation::None();
+  }
+
+  static Representation RequiredValueRepresentation(
+      ElementsKind kind, StoreFieldOrKeyedMode mode) {
+    if (IsDoubleOrFloatElementsKind(kind)) {
+      return Representation::Double();
+    }
+
+    if (kind == FAST_SMI_ELEMENTS && SmiValuesAre32Bits() &&
+        mode == STORE_TO_INITIALIZED_ENTRY) {
+      return Representation::Integer32();
+    }
+
+    if (IsFastSmiElementsKind(kind)) {
+      return Representation::Smi();
+    }
+
+    if (IsFixedTypedArrayElementsKind(kind)) {
+      return Representation::Integer32();
+    }
+    return Representation::Tagged();
+  }
+
+  bool is_fixed_typed_array() const {
+    return IsFixedTypedArrayElementsKind(elements_kind());
+  }
+
+  Representation observed_input_representation(int index) override {
+    if (index != 2) return RequiredInputRepresentation(index);
+    if (IsUninitialized()) {
+      return Representation::None();
+    }
+    Representation r =
+        RequiredValueRepresentation(elements_kind(), store_mode());
+    // For fast object elements kinds, don't assume anything.
+    if (r.IsTagged()) return Representation::None();
+    return r;
+  }
+
+  HValue* elements() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* value() const { return OperandAt(2); }
+  HValue* backing_store_owner() const {
+    DCHECK(HasBackingStoreOwner());
+    return OperandAt(3);
+  }
+  bool HasBackingStoreOwner() const { return OperandAt(0) != OperandAt(3); }
+  bool value_is_smi() const { return IsFastSmiElementsKind(elements_kind()); }
+  StoreFieldOrKeyedMode store_mode() const {
+    return StoreModeField::decode(bit_field_);
+  }
+  ElementsKind elements_kind() const override {
+    return ElementsKindField::decode(bit_field_);
+  }
+  uint32_t base_offset() const { return base_offset_; }
+  bool TryIncreaseBaseOffset(uint32_t increase_by_value) override;
+  HValue* GetKey() override { return key(); }
+  void SetKey(HValue* key) override { SetOperandAt(1, key); }
+  bool IsDehoisted() const override {
+    return IsDehoistedField::decode(bit_field_);
+  }
+  void SetDehoisted(bool is_dehoisted) override {
+    bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
+  }
+  bool IsUninitialized() { return IsUninitializedField::decode(bit_field_); }
+  void SetUninitialized(bool is_uninitialized) {
+    bit_field_ = IsUninitializedField::update(bit_field_, is_uninitialized);
+  }
+
+  bool IsConstantHoleStore() {
+    return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
+  }
+
+  bool HandleSideEffectDominator(GVNFlag side_effect,
+                                 HValue* dominator) override {
+    DCHECK(side_effect == kNewSpacePromotion);
+    dominator_ = dominator;
+    return false;
+  }
+
+  HValue* dominator() const { return dominator_; }
+
+  bool NeedsWriteBarrier() {
+    if (value_is_smi()) {
+      return false;
+    } else {
+      return StoringValueNeedsWriteBarrier(value()) &&
+          ReceiverObjectNeedsWriteBarrier(elements(), value(), dominator());
+    }
+  }
+
+  PointersToHereCheck PointersToHereCheckForValue() const {
+    return PointersToHereCheckForObject(value(), dominator());
+  }
+
+  bool NeedsCanonicalization();
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
+
+ private:
+  HStoreKeyed(HValue* obj, HValue* key, HValue* val,
+              HValue* backing_store_owner, ElementsKind elements_kind,
+              StoreFieldOrKeyedMode store_mode = INITIALIZING_STORE,
+              int offset = kDefaultKeyedHeaderOffsetSentinel)
+      : base_offset_(offset == kDefaultKeyedHeaderOffsetSentinel
+                         ? GetDefaultHeaderSizeForElementsKind(elements_kind)
+                         : offset),
+        bit_field_(IsDehoistedField::encode(false) |
+                   IsUninitializedField::encode(false) |
+                   StoreModeField::encode(store_mode) |
+                   ElementsKindField::encode(elements_kind)),
+        dominator_(NULL) {
+    SetOperandAt(0, obj);
+    SetOperandAt(1, key);
+    SetOperandAt(2, val);
+    SetOperandAt(3, backing_store_owner != nullptr ? backing_store_owner : obj);
+    DCHECK_EQ(HasBackingStoreOwner(), is_fixed_typed_array());
+
+    if (IsFastObjectElementsKind(elements_kind)) {
+      SetFlag(kTrackSideEffectDominators);
+      SetDependsOnFlag(kNewSpacePromotion);
+    }
+    if (IsFastDoubleElementsKind(elements_kind)) {
+      SetChangesFlag(kDoubleArrayElements);
+    } else if (IsFastSmiElementsKind(elements_kind)) {
+      SetChangesFlag(kArrayElements);
+    } else if (is_fixed_typed_array()) {
+      SetChangesFlag(kTypedArrayElements);
+      SetChangesFlag(kExternalMemory);
+      SetFlag(kAllowUndefinedAsNaN);
+    } else {
+      SetChangesFlag(kArrayElements);
+    }
+
+    // {UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
+    if (elements_kind >= UINT8_ELEMENTS && elements_kind <= INT32_ELEMENTS) {
+      SetFlag(kTruncatingToInt32);
+    }
+  }
+
+  class IsDehoistedField : public BitField<bool, 0, 1> {};
+  class IsUninitializedField : public BitField<bool, 1, 1> {};
+  class StoreModeField : public BitField<StoreFieldOrKeyedMode, 2, 1> {};
+  class ElementsKindField : public BitField<ElementsKind, 3, 5> {};
+
+  uint32_t base_offset_;
+  uint32_t bit_field_;
+  HValue* dominator_;
+};
+
+
+class HStoreKeyedGeneric final : public HTemplateInstruction<4> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(HStoreKeyedGeneric, HValue*,
+                                              HValue*, HValue*, LanguageMode,
+                                              InlineCacheState);
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* value() const { return OperandAt(2); }
+  HValue* context() const { return OperandAt(3); }
+  LanguageMode language_mode() const { return language_mode_; }
+  InlineCacheState initialization_state() const {
+    return initialization_state_;
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    // tagged[tagged] = tagged
+    return Representation::Tagged();
+  }
+
+  FeedbackVectorSlot slot() const { return slot_; }
+  Handle<TypeFeedbackVector> feedback_vector() const {
+    return feedback_vector_;
+  }
+  bool HasVectorAndSlot() const {
+    return !feedback_vector_.is_null();
+  }
+  void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
+                        FeedbackVectorSlot slot) {
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
+
+ private:
+  HStoreKeyedGeneric(HValue* context, HValue* object, HValue* key,
+                     HValue* value, LanguageMode language_mode,
+                     InlineCacheState initialization_state)
+      : language_mode_(language_mode),
+        initialization_state_(initialization_state) {
+    SetOperandAt(0, object);
+    SetOperandAt(1, key);
+    SetOperandAt(2, value);
+    SetOperandAt(3, context);
+    SetAllSideEffects();
+  }
+
+  Handle<TypeFeedbackVector> feedback_vector_;
+  FeedbackVectorSlot slot_;
+  LanguageMode language_mode_;
+  InlineCacheState initialization_state_;
+};
+
+
+class HTransitionElementsKind final : public HTemplateInstruction<2> {
+ public:
+  inline static HTransitionElementsKind* New(Isolate* isolate, Zone* zone,
+                                             HValue* context, HValue* object,
+                                             Handle<Map> original_map,
+                                             Handle<Map> transitioned_map) {
+    return new(zone) HTransitionElementsKind(context, object,
+                                             original_map, transitioned_map);
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* context() const { return OperandAt(1); }
+  Unique<Map> original_map() const { return original_map_; }
+  Unique<Map> transitioned_map() const { return transitioned_map_; }
+  ElementsKind from_kind() const {
+    return FromElementsKindField::decode(bit_field_);
+  }
+  ElementsKind to_kind() const {
+    return ToElementsKindField::decode(bit_field_);
+  }
+  bool map_is_stable() const { return MapIsStableField::decode(bit_field_); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
+    return original_map_ == instr->original_map_ &&
+           transitioned_map_ == instr->transitioned_map_;
+  }
+
+  int RedefinedOperandIndex() override { return 0; }
+
+ private:
+  HTransitionElementsKind(HValue* context, HValue* object,
+                          Handle<Map> original_map,
+                          Handle<Map> transitioned_map)
+      : original_map_(Unique<Map>(original_map)),
+        transitioned_map_(Unique<Map>(transitioned_map)),
+        bit_field_(
+            FromElementsKindField::encode(original_map->elements_kind()) |
+            ToElementsKindField::encode(transitioned_map->elements_kind()) |
+            MapIsStableField::encode(transitioned_map->is_stable())) {
+    SetOperandAt(0, object);
+    SetOperandAt(1, context);
+    SetFlag(kUseGVN);
+    SetChangesFlag(kElementsKind);
+    if (!IsSimpleMapChangeTransition(from_kind(), to_kind())) {
+      SetChangesFlag(kElementsPointer);
+      SetChangesFlag(kNewSpacePromotion);
+    }
+    set_representation(Representation::Tagged());
+  }
+
+  class FromElementsKindField : public BitField<ElementsKind, 0, 5> {};
+  class ToElementsKindField : public BitField<ElementsKind, 5, 5> {};
+  class MapIsStableField : public BitField<bool, 10, 1> {};
+
+  Unique<Map> original_map_;
+  Unique<Map> transitioned_map_;
+  uint32_t bit_field_;
+};
+
+
+class HStringAdd final : public HBinaryOperation {
+ public:
+  static HInstruction* New(
+      Isolate* isolate, Zone* zone, HValue* context, HValue* left,
+      HValue* right, PretenureFlag pretenure_flag = NOT_TENURED,
+      StringAddFlags flags = STRING_ADD_CHECK_BOTH,
+      Handle<AllocationSite> allocation_site = Handle<AllocationSite>::null());
+
+  StringAddFlags flags() const { return flags_; }
+  PretenureFlag pretenure_flag() const { return pretenure_flag_; }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    return flags_ == HStringAdd::cast(other)->flags_ &&
+        pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_;
+  }
+
+ private:
+  HStringAdd(HValue* context, HValue* left, HValue* right,
+             PretenureFlag pretenure_flag, StringAddFlags flags,
+             Handle<AllocationSite> allocation_site)
+      : HBinaryOperation(context, left, right, Strength::WEAK, HType::String()),
+        flags_(flags),
+        pretenure_flag_(pretenure_flag) {
+    set_representation(Representation::Tagged());
+    if ((flags & STRING_ADD_CONVERT) == STRING_ADD_CONVERT) {
+      SetAllSideEffects();
+      ClearFlag(kUseGVN);
+    } else {
+      SetChangesFlag(kNewSpacePromotion);
+      SetFlag(kUseGVN);
+    }
+    SetDependsOnFlag(kMaps);
+    if (FLAG_trace_pretenuring) {
+      PrintF("HStringAdd with AllocationSite %p %s\n",
+             allocation_site.is_null()
+                 ? static_cast<void*>(NULL)
+                 : static_cast<void*>(*allocation_site),
+             pretenure_flag == TENURED ? "tenured" : "not tenured");
+    }
+  }
+
+  bool IsDeletable() const final {
+    return (flags_ & STRING_ADD_CONVERT) != STRING_ADD_CONVERT;
+  }
+
+  const StringAddFlags flags_;
+  const PretenureFlag pretenure_flag_;
+};
+
+
+class HStringCharCodeAt final : public HTemplateInstruction<3> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
+                                              HValue*,
+                                              HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    // The index is supposed to be Integer32.
+    return index == 2
+        ? Representation::Integer32()
+        : Representation::Tagged();
+  }
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* string() const { return OperandAt(1); }
+  HValue* index() const { return OperandAt(2); }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+  Range* InferRange(Zone* zone) override {
+    return new(zone) Range(0, String::kMaxUtf16CodeUnit);
+  }
+
+ private:
+  HStringCharCodeAt(HValue* context, HValue* string, HValue* index) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, string);
+    SetOperandAt(2, index);
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kMaps);
+    SetDependsOnFlag(kStringChars);
+    SetChangesFlag(kNewSpacePromotion);
+  }
+
+  // No side effects: runtime function assumes string + number inputs.
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HStringCharFromCode final : public HTemplateInstruction<2> {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           HValue* char_code);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return index == 0
+        ? Representation::Tagged()
+        : Representation::Integer32();
+  }
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+
+  bool DataEquals(HValue* other) override { return true; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
+
+ private:
+  HStringCharFromCode(HValue* context, HValue* char_code)
+      : HTemplateInstruction<2>(HType::String()) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, char_code);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetChangesFlag(kNewSpacePromotion);
+  }
+
+  bool IsDeletable() const override {
+    return !value()->ToNumberCanBeObserved();
+  }
+};
+
+
+class HTypeof final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof)
+
+ private:
+  explicit HTypeof(HValue* context, HValue* value) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, value);
+    set_representation(Representation::Tagged());
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HTrapAllocationMemento final : public HTemplateInstruction<1> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HValue* object() { return OperandAt(0); }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento)
+
+ private:
+  explicit HTrapAllocationMemento(HValue* obj) {
+    SetOperandAt(0, obj);
+  }
+};
+
+
+class HMaybeGrowElements final : public HTemplateInstruction<5> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P6(HMaybeGrowElements, HValue*,
+                                              HValue*, HValue*, HValue*, bool,
+                                              ElementsKind);
+
+  Representation RequiredInputRepresentation(int index) override {
+    if (index < 3) {
+      return Representation::Tagged();
+    }
+    DCHECK(index == 3 || index == 4);
+    return Representation::Integer32();
+  }
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* object() const { return OperandAt(1); }
+  HValue* elements() const { return OperandAt(2); }
+  HValue* key() const { return OperandAt(3); }
+  HValue* current_capacity() const { return OperandAt(4); }
+
+  bool is_js_array() const { return is_js_array_; }
+  ElementsKind kind() const { return kind_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements)
+
+ protected:
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  explicit HMaybeGrowElements(HValue* context, HValue* object, HValue* elements,
+                              HValue* key, HValue* current_capacity,
+                              bool is_js_array, ElementsKind kind) {
+    is_js_array_ = is_js_array;
+    kind_ = kind;
+
+    SetOperandAt(0, context);
+    SetOperandAt(1, object);
+    SetOperandAt(2, elements);
+    SetOperandAt(3, key);
+    SetOperandAt(4, current_capacity);
+
+    SetFlag(kUseGVN);
+    SetChangesFlag(kElementsPointer);
+    SetChangesFlag(kNewSpacePromotion);
+    set_representation(Representation::Tagged());
+  }
+
+  bool is_js_array_;
+  ElementsKind kind_;
+};
+
+
+class HToFastProperties final : public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties)
+
+ private:
+  explicit HToFastProperties(HValue* value) : HUnaryOperation(value) {
+    set_representation(Representation::Tagged());
+    SetChangesFlag(kNewSpacePromotion);
+
+    // This instruction is not marked as kChangesMaps, but does
+    // change the map of the input operand. Use it only when creating
+    // object literals via a runtime call.
+    DCHECK(value->IsCallRuntime());
+#ifdef DEBUG
+    const Runtime::Function* function = HCallRuntime::cast(value)->function();
+    DCHECK(function->function_id == Runtime::kCreateObjectLiteral);
+#endif
+  }
+
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HSeqStringGetChar final : public HTemplateInstruction<2> {
+ public:
+  static HInstruction* New(Isolate* isolate, Zone* zone, HValue* context,
+                           String::Encoding encoding, HValue* string,
+                           HValue* index);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return (index == 0) ? Representation::Tagged()
+                        : Representation::Integer32();
+  }
+
+  String::Encoding encoding() const { return encoding_; }
+  HValue* string() const { return OperandAt(0); }
+  HValue* index() const { return OperandAt(1); }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar)
+
+ protected:
+  bool DataEquals(HValue* other) override {
+    return encoding() == HSeqStringGetChar::cast(other)->encoding();
+  }
+
+  Range* InferRange(Zone* zone) override {
+    if (encoding() == String::ONE_BYTE_ENCODING) {
+      return new(zone) Range(0, String::kMaxOneByteCharCode);
+    } else {
+      DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding());
+      return  new(zone) Range(0, String::kMaxUtf16CodeUnit);
+    }
+  }
+
+ private:
+  HSeqStringGetChar(String::Encoding encoding,
+                    HValue* string,
+                    HValue* index) : encoding_(encoding) {
+    SetOperandAt(0, string);
+    SetOperandAt(1, index);
+    set_representation(Representation::Integer32());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kStringChars);
+  }
+
+  bool IsDeletable() const override { return true; }
+
+  String::Encoding encoding_;
+};
+
+
+class HSeqStringSetChar final : public HTemplateInstruction<4> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(
+      HSeqStringSetChar, String::Encoding,
+      HValue*, HValue*, HValue*);
+
+  String::Encoding encoding() { return encoding_; }
+  HValue* context() { return OperandAt(0); }
+  HValue* string() { return OperandAt(1); }
+  HValue* index() { return OperandAt(2); }
+  HValue* value() { return OperandAt(3); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return (index <= 1) ? Representation::Tagged()
+                        : Representation::Integer32();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
+
+ private:
+  HSeqStringSetChar(HValue* context,
+                    String::Encoding encoding,
+                    HValue* string,
+                    HValue* index,
+                    HValue* value) : encoding_(encoding) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, string);
+    SetOperandAt(2, index);
+    SetOperandAt(3, value);
+    set_representation(Representation::Tagged());
+    SetChangesFlag(kStringChars);
+  }
+
+  String::Encoding encoding_;
+};
+
+
+class HCheckMapValue final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HType CalculateInferredType() override {
+    if (value()->type().IsHeapObject()) return value()->type();
+    return HType::HeapObject();
+  }
+
+  HValue* value() const { return OperandAt(0); }
+  HValue* map() const { return OperandAt(1); }
+
+  HValue* Canonicalize() override;
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
+
+ protected:
+  int RedefinedOperandIndex() override { return 0; }
+
+  bool DataEquals(HValue* other) override { return true; }
+
+ private:
+  HCheckMapValue(HValue* value, HValue* map)
+      : HTemplateInstruction<2>(HType::HeapObject()) {
+    SetOperandAt(0, value);
+    SetOperandAt(1, map);
+    set_representation(Representation::Tagged());
+    SetFlag(kUseGVN);
+    SetDependsOnFlag(kMaps);
+    SetDependsOnFlag(kElementsKind);
+  }
+};
+
+
+class HForInPrepareMap final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* enumerable() const { return OperandAt(1); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HType CalculateInferredType() override { return HType::Tagged(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
+
+ private:
+  HForInPrepareMap(HValue* context,
+                   HValue* object) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, object);
+    set_representation(Representation::Tagged());
+    SetAllSideEffects();
+  }
+};
+
+
+class HForInCacheArray final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  HValue* enumerable() const { return OperandAt(0); }
+  HValue* map() const { return OperandAt(1); }
+  int idx() const { return idx_; }
+
+  HForInCacheArray* index_cache() {
+    return index_cache_;
+  }
+
+  void set_index_cache(HForInCacheArray* index_cache) {
+    index_cache_ = index_cache;
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HType CalculateInferredType() override { return HType::Tagged(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
+
+ private:
+  HForInCacheArray(HValue* enumerable,
+                   HValue* keys,
+                   int idx) : idx_(idx) {
+    SetOperandAt(0, enumerable);
+    SetOperandAt(1, keys);
+    set_representation(Representation::Tagged());
+  }
+
+  int idx_;
+  HForInCacheArray* index_cache_;
+};
+
+
+class HLoadFieldByIndex final : public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*);
+
+  HLoadFieldByIndex(HValue* object,
+                    HValue* index) {
+    SetOperandAt(0, object);
+    SetOperandAt(1, index);
+    SetChangesFlag(kNewSpacePromotion);
+    set_representation(Representation::Tagged());
+  }
+
+  Representation RequiredInputRepresentation(int index) override {
+    if (index == 1) {
+      return Representation::Smi();
+    } else {
+      return Representation::Tagged();
+    }
+  }
+
+  HValue* object() const { return OperandAt(0); }
+  HValue* index() const { return OperandAt(1); }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  HType CalculateInferredType() override { return HType::Tagged(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
+
+ private:
+  bool IsDeletable() const override { return true; }
+};
+
+
+class HStoreFrameContext: public HUnaryOperation {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P1(HStoreFrameContext, HValue*);
+
+  HValue* context() { return OperandAt(0); }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext)
+ private:
+  explicit HStoreFrameContext(HValue* context)
+      : HUnaryOperation(context) {
+    set_representation(Representation::Tagged());
+    SetChangesFlag(kContextSlots);
+  }
+};
+
+
+class HAllocateBlockContext: public HTemplateInstruction<2> {
+ public:
+  DECLARE_INSTRUCTION_FACTORY_P3(HAllocateBlockContext, HValue*,
+                                 HValue*, Handle<ScopeInfo>);
+  HValue* context() const { return OperandAt(0); }
+  HValue* function() const { return OperandAt(1); }
+  Handle<ScopeInfo> scope_info() const { return scope_info_; }
+
+  Representation RequiredInputRepresentation(int index) override {
+    return Representation::Tagged();
+  }
+
+  std::ostream& PrintDataTo(std::ostream& os) const override;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext)
+
+ private:
+  HAllocateBlockContext(HValue* context,
+                        HValue* function,
+                        Handle<ScopeInfo> scope_info)
+      : scope_info_(scope_info) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, function);
+    set_representation(Representation::Tagged());
+  }
+
+  Handle<ScopeInfo> scope_info_;
+};
+
+
+
+#undef DECLARE_INSTRUCTION
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_INSTRUCTIONS_H_
diff --git a/src/crankshaft/hydrogen-load-elimination.cc b/src/crankshaft/hydrogen-load-elimination.cc
new file mode 100644
index 0000000..da8d186
--- /dev/null
+++ b/src/crankshaft/hydrogen-load-elimination.cc
@@ -0,0 +1,513 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-load-elimination.h"
+
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+#include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-instructions.h"
+
+namespace v8 {
+namespace internal {
+
+#define GLOBAL true
+#define TRACE(x) if (FLAG_trace_load_elimination) PrintF x
+
+static const int kMaxTrackedFields = 16;
+static const int kMaxTrackedObjects = 5;
+
+// An element in the field approximation list.
+class HFieldApproximation : public ZoneObject {
+ public:  // Just a data blob.
+  HValue* object_;
+  HValue* last_value_;
+  HFieldApproximation* next_;
+
+  // Recursively copy the entire linked list of field approximations.
+  HFieldApproximation* Copy(Zone* zone) {
+    HFieldApproximation* copy = new(zone) HFieldApproximation();
+    copy->object_ = this->object_;
+    copy->last_value_ = this->last_value_;
+    copy->next_ = this->next_ == NULL ? NULL : this->next_->Copy(zone);
+    return copy;
+  }
+};
+
+
+// The main datastructure used during load/store elimination. Each in-object
+// field is tracked separately. For each field, store a list of known field
+// values for known objects.
+class HLoadEliminationTable : public ZoneObject {
+ public:
+  HLoadEliminationTable(Zone* zone, HAliasAnalyzer* aliasing)
+    : zone_(zone), fields_(kMaxTrackedFields, zone), aliasing_(aliasing) { }
+
+  // The main processing of instructions.
+  HLoadEliminationTable* Process(HInstruction* instr, Zone* zone) {
+    switch (instr->opcode()) {
+      case HValue::kLoadNamedField: {
+        HLoadNamedField* l = HLoadNamedField::cast(instr);
+        TRACE((" process L%d field %d (o%d)\n",
+               instr->id(),
+               FieldOf(l->access()),
+               l->object()->ActualValue()->id()));
+        HValue* result = load(l);
+        if (result != instr && l->CanBeReplacedWith(result)) {
+          // The load can be replaced with a previous load or a value.
+          TRACE(("  replace L%d -> v%d\n", instr->id(), result->id()));
+          instr->DeleteAndReplaceWith(result);
+        }
+        break;
+      }
+      case HValue::kStoreNamedField: {
+        HStoreNamedField* s = HStoreNamedField::cast(instr);
+        TRACE((" process S%d field %d (o%d) = v%d\n",
+               instr->id(),
+               FieldOf(s->access()),
+               s->object()->ActualValue()->id(),
+               s->value()->id()));
+        HValue* result = store(s);
+        if (result == NULL) {
+          // The store is redundant. Remove it.
+          TRACE(("  remove S%d\n", instr->id()));
+          instr->DeleteAndReplaceWith(NULL);
+        }
+        break;
+      }
+      case HValue::kTransitionElementsKind: {
+        HTransitionElementsKind* t = HTransitionElementsKind::cast(instr);
+        HValue* object = t->object()->ActualValue();
+        KillFieldInternal(object, FieldOf(JSArray::kElementsOffset), NULL);
+        KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+        break;
+      }
+      default: {
+        if (instr->CheckChangesFlag(kInobjectFields)) {
+          TRACE((" kill-all i%d\n", instr->id()));
+          Kill();
+          break;
+        }
+        if (instr->CheckChangesFlag(kMaps)) {
+          TRACE((" kill-maps i%d\n", instr->id()));
+          KillOffset(JSObject::kMapOffset);
+        }
+        if (instr->CheckChangesFlag(kElementsKind)) {
+          TRACE((" kill-elements-kind i%d\n", instr->id()));
+          KillOffset(JSObject::kMapOffset);
+          KillOffset(JSObject::kElementsOffset);
+        }
+        if (instr->CheckChangesFlag(kElementsPointer)) {
+          TRACE((" kill-elements i%d\n", instr->id()));
+          KillOffset(JSObject::kElementsOffset);
+        }
+        if (instr->CheckChangesFlag(kOsrEntries)) {
+          TRACE((" kill-osr i%d\n", instr->id()));
+          Kill();
+        }
+      }
+      // Improvements possible:
+      // - learn from HCheckMaps for field 0
+      // - remove unobservable stores (write-after-write)
+      // - track cells
+      // - track globals
+      // - track roots
+    }
+    return this;
+  }
+
+  // Support for global analysis with HFlowEngine: Merge given state with
+  // the other incoming state.
+  static HLoadEliminationTable* Merge(HLoadEliminationTable* succ_state,
+                                      HBasicBlock* succ_block,
+                                      HLoadEliminationTable* pred_state,
+                                      HBasicBlock* pred_block,
+                                      Zone* zone) {
+    DCHECK(pred_state != NULL);
+    if (succ_state == NULL) {
+      return pred_state->Copy(succ_block, pred_block, zone);
+    } else {
+      return succ_state->Merge(succ_block, pred_state, pred_block, zone);
+    }
+  }
+
+  // Support for global analysis with HFlowEngine: Given state merged with all
+  // the other incoming states, prepare it for use.
+  static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
+                                       HBasicBlock* block,
+                                       Zone* zone) {
+    DCHECK(state != NULL);
+    return state;
+  }
+
+ private:
+  // Copy state to successor block.
+  HLoadEliminationTable* Copy(HBasicBlock* succ, HBasicBlock* from_block,
+                              Zone* zone) {
+    HLoadEliminationTable* copy =
+        new(zone) HLoadEliminationTable(zone, aliasing_);
+    copy->EnsureFields(fields_.length());
+    for (int i = 0; i < fields_.length(); i++) {
+      copy->fields_[i] = fields_[i] == NULL ? NULL : fields_[i]->Copy(zone);
+    }
+    if (FLAG_trace_load_elimination) {
+      TRACE((" copy-to B%d\n", succ->block_id()));
+      copy->Print();
+    }
+    return copy;
+  }
+
+  // Merge this state with the other incoming state.
+  HLoadEliminationTable* Merge(HBasicBlock* succ, HLoadEliminationTable* that,
+                               HBasicBlock* that_block, Zone* zone) {
+    if (that->fields_.length() < fields_.length()) {
+      // Drop fields not in the other table.
+      fields_.Rewind(that->fields_.length());
+    }
+    for (int i = 0; i < fields_.length(); i++) {
+      // Merge the field approximations for like fields.
+      HFieldApproximation* approx = fields_[i];
+      HFieldApproximation* prev = NULL;
+      while (approx != NULL) {
+        // TODO(titzer): Merging is O(N * M); sort?
+        HFieldApproximation* other = that->Find(approx->object_, i);
+        if (other == NULL || !Equal(approx->last_value_, other->last_value_)) {
+          // Kill an entry that doesn't agree with the other value.
+          if (prev != NULL) {
+            prev->next_ = approx->next_;
+          } else {
+            fields_[i] = approx->next_;
+          }
+          approx = approx->next_;
+          continue;
+        }
+        prev = approx;
+        approx = approx->next_;
+      }
+    }
+    if (FLAG_trace_load_elimination) {
+      TRACE((" merge-to B%d\n", succ->block_id()));
+      Print();
+    }
+    return this;
+  }
+
+  friend class HLoadEliminationEffects;  // Calls Kill() and others.
+  friend class HLoadEliminationPhase;
+
+ private:
+  // Process a load instruction, updating internal table state. If a previous
+  // load or store for this object and field exists, return the new value with
+  // which the load should be replaced. Otherwise, return {instr}.
+  HValue* load(HLoadNamedField* instr) {
+    // There must be no loads from non observable in-object properties.
+    DCHECK(!instr->access().IsInobject() ||
+           instr->access().existing_inobject_property());
+
+    int field = FieldOf(instr->access());
+    if (field < 0) return instr;
+
+    HValue* object = instr->object()->ActualValue();
+    HFieldApproximation* approx = FindOrCreate(object, field);
+
+    if (approx->last_value_ == NULL) {
+      // Load is not redundant. Fill out a new entry.
+      approx->last_value_ = instr;
+      return instr;
+    } else if (approx->last_value_->block()->EqualToOrDominates(
+        instr->block())) {
+      // Eliminate the load. Reuse previously stored value or load instruction.
+      return approx->last_value_;
+    } else {
+      return instr;
+    }
+  }
+
+  // Process a store instruction, updating internal table state. If a previous
+  // store to the same object and field makes this store redundant (e.g. because
+  // the stored values are the same), return NULL indicating that this store
+  // instruction is redundant. Otherwise, return {instr}.
+  HValue* store(HStoreNamedField* instr) {
+    if (instr->access().IsInobject() &&
+        !instr->access().existing_inobject_property()) {
+      TRACE(("  skipping non existing property initialization store\n"));
+      return instr;
+    }
+
+    int field = FieldOf(instr->access());
+    if (field < 0) return KillIfMisaligned(instr);
+
+    HValue* object = instr->object()->ActualValue();
+    HValue* value = instr->value();
+
+    if (instr->has_transition()) {
+      // A transition introduces a new field and alters the map of the object.
+      // Since the field in the object is new, it cannot alias existing entries.
+      // TODO(titzer): introduce a constant for the new map and remember it.
+      KillFieldInternal(object, FieldOf(JSObject::kMapOffset), NULL);
+    } else {
+      // Kill non-equivalent may-alias entries.
+      KillFieldInternal(object, field, value);
+    }
+    HFieldApproximation* approx = FindOrCreate(object, field);
+
+    if (Equal(approx->last_value_, value)) {
+      // The store is redundant because the field already has this value.
+      return NULL;
+    } else {
+      // The store is not redundant. Update the entry.
+      approx->last_value_ = value;
+      return instr;
+    }
+  }
+
+  // Kill everything in this table.
+  void Kill() {
+    fields_.Rewind(0);
+  }
+
+  // Kill all entries matching the given offset.
+  void KillOffset(int offset) {
+    int field = FieldOf(offset);
+    if (field >= 0 && field < fields_.length()) {
+      fields_[field] = NULL;
+    }
+  }
+
+  // Kill all entries aliasing the given store.
+  void KillStore(HStoreNamedField* s) {
+    int field = FieldOf(s->access());
+    if (field >= 0) {
+      KillFieldInternal(s->object()->ActualValue(), field, s->value());
+    } else {
+      KillIfMisaligned(s);
+    }
+  }
+
+  // Kill multiple entries in the case of a misaligned store.
+  HValue* KillIfMisaligned(HStoreNamedField* instr) {
+    HObjectAccess access = instr->access();
+    if (access.IsInobject()) {
+      int offset = access.offset();
+      if ((offset % kPointerSize) != 0) {
+        // Kill the field containing the first word of the access.
+        HValue* object = instr->object()->ActualValue();
+        int field = offset / kPointerSize;
+        KillFieldInternal(object, field, NULL);
+
+        // Kill the next field in case of overlap.
+        int size = access.representation().size();
+        int next_field = (offset + size - 1) / kPointerSize;
+        if (next_field != field) KillFieldInternal(object, next_field, NULL);
+      }
+    }
+    return instr;
+  }
+
+  // Find an entry for the given object and field pair.
+  HFieldApproximation* Find(HValue* object, int field) {
+    // Search for a field approximation for this object.
+    HFieldApproximation* approx = fields_[field];
+    while (approx != NULL) {
+      if (aliasing_->MustAlias(object, approx->object_)) return approx;
+      approx = approx->next_;
+    }
+    return NULL;
+  }
+
+  // Find or create an entry for the given object and field pair.
+  HFieldApproximation* FindOrCreate(HValue* object, int field) {
+    EnsureFields(field + 1);
+
+    // Search for a field approximation for this object.
+    HFieldApproximation* approx = fields_[field];
+    int count = 0;
+    while (approx != NULL) {
+      if (aliasing_->MustAlias(object, approx->object_)) return approx;
+      count++;
+      approx = approx->next_;
+    }
+
+    if (count >= kMaxTrackedObjects) {
+      // Pull the last entry off the end and repurpose it for this object.
+      approx = ReuseLastApproximation(field);
+    } else {
+      // Allocate a new entry.
+      approx = new(zone_) HFieldApproximation();
+    }
+
+    // Insert the entry at the head of the list.
+    approx->object_ = object;
+    approx->last_value_ = NULL;
+    approx->next_ = fields_[field];
+    fields_[field] = approx;
+
+    return approx;
+  }
+
+  // Kill all entries for a given field that _may_ alias the given object
+  // and do _not_ have the given value.
+  void KillFieldInternal(HValue* object, int field, HValue* value) {
+    if (field >= fields_.length()) return;  // Nothing to do.
+
+    HFieldApproximation* approx = fields_[field];
+    HFieldApproximation* prev = NULL;
+    while (approx != NULL) {
+      if (aliasing_->MayAlias(object, approx->object_)) {
+        if (!Equal(approx->last_value_, value)) {
+          // Kill an aliasing entry that doesn't agree on the value.
+          if (prev != NULL) {
+            prev->next_ = approx->next_;
+          } else {
+            fields_[field] = approx->next_;
+          }
+          approx = approx->next_;
+          continue;
+        }
+      }
+      prev = approx;
+      approx = approx->next_;
+    }
+  }
+
+  bool Equal(HValue* a, HValue* b) {
+    if (a == b) return true;
+    if (a != NULL && b != NULL && a->CheckFlag(HValue::kUseGVN)) {
+      return a->Equals(b);
+    }
+    return false;
+  }
+
+  // Remove the last approximation for a field so that it can be reused.
+  // We reuse the last entry because it was the first inserted and is thus
+  // farthest away from the current instruction.
+  HFieldApproximation* ReuseLastApproximation(int field) {
+    HFieldApproximation* approx = fields_[field];
+    DCHECK(approx != NULL);
+
+    HFieldApproximation* prev = NULL;
+    while (approx->next_ != NULL) {
+      prev = approx;
+      approx = approx->next_;
+    }
+    if (prev != NULL) prev->next_ = NULL;
+    return approx;
+  }
+
+  // Compute the field index for the given object access; -1 if not tracked.
+  int FieldOf(HObjectAccess access) {
+    return access.IsInobject() ? FieldOf(access.offset()) : -1;
+  }
+
+  // Compute the field index for the given in-object offset; -1 if not tracked.
+  int FieldOf(int offset) {
+    if (offset >= kMaxTrackedFields * kPointerSize) return -1;
+    // TODO(titzer): track misaligned loads in a separate list?
+    if ((offset % kPointerSize) != 0) return -1;  // Ignore misaligned accesses.
+    return offset / kPointerSize;
+  }
+
+  // Ensure internal storage for the given number of fields.
+  void EnsureFields(int num_fields) {
+    if (fields_.length() < num_fields) {
+      fields_.AddBlock(NULL, num_fields - fields_.length(), zone_);
+    }
+  }
+
+  // Print this table to stdout.
+  void Print() {
+    for (int i = 0; i < fields_.length(); i++) {
+      PrintF("  field %d: ", i);
+      for (HFieldApproximation* a = fields_[i]; a != NULL; a = a->next_) {
+        PrintF("[o%d =", a->object_->id());
+        if (a->last_value_ != NULL) PrintF(" v%d", a->last_value_->id());
+        PrintF("] ");
+      }
+      PrintF("\n");
+    }
+  }
+
+  Zone* zone_;
+  ZoneList<HFieldApproximation*> fields_;
+  HAliasAnalyzer* aliasing_;
+};
+
+
+// Support for HFlowEngine: collect store effects within loops.
+class HLoadEliminationEffects : public ZoneObject {
+ public:
+  explicit HLoadEliminationEffects(Zone* zone)
+    : zone_(zone), stores_(5, zone) { }
+
+  inline bool Disabled() {
+    return false;  // Effects are _not_ disabled.
+  }
+
+  // Process a possibly side-effecting instruction.
+  void Process(HInstruction* instr, Zone* zone) {
+    if (instr->IsStoreNamedField()) {
+      stores_.Add(HStoreNamedField::cast(instr), zone_);
+    } else {
+      flags_.Add(instr->ChangesFlags());
+    }
+  }
+
+  // Apply these effects to the given load elimination table.
+  void Apply(HLoadEliminationTable* table) {
+    // Loads must not be hoisted past the OSR entry, therefore we kill
+    // everything if we see an OSR entry.
+    if (flags_.Contains(kInobjectFields) || flags_.Contains(kOsrEntries)) {
+      table->Kill();
+      return;
+    }
+    if (flags_.Contains(kElementsKind) || flags_.Contains(kMaps)) {
+      table->KillOffset(JSObject::kMapOffset);
+    }
+    if (flags_.Contains(kElementsKind) || flags_.Contains(kElementsPointer)) {
+      table->KillOffset(JSObject::kElementsOffset);
+    }
+
+    // Kill non-agreeing fields for each store contained in these effects.
+    for (int i = 0; i < stores_.length(); i++) {
+      table->KillStore(stores_[i]);
+    }
+  }
+
+  // Union these effects with the other effects.
+  void Union(HLoadEliminationEffects* that, Zone* zone) {
+    flags_.Add(that->flags_);
+    for (int i = 0; i < that->stores_.length(); i++) {
+      stores_.Add(that->stores_[i], zone);
+    }
+  }
+
+ private:
+  Zone* zone_;
+  GVNFlagSet flags_;
+  ZoneList<HStoreNamedField*> stores_;
+};
+
+
+// The main routine of the analysis phase. Use the HFlowEngine for either a
+// local or a global analysis.
+void HLoadEliminationPhase::Run() {
+  HFlowEngine<HLoadEliminationTable, HLoadEliminationEffects>
+    engine(graph(), zone());
+  HAliasAnalyzer aliasing;
+  HLoadEliminationTable* table =
+      new(zone()) HLoadEliminationTable(zone(), &aliasing);
+
+  if (GLOBAL) {
+    // Perform a global analysis.
+    engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), table);
+  } else {
+    // Perform only local analysis.
+    for (int i = 0; i < graph()->blocks()->length(); i++) {
+      table->Kill();
+      engine.AnalyzeOneBlock(graph()->blocks()->at(i), table);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-load-elimination.h b/src/crankshaft/hydrogen-load-elimination.h
new file mode 100644
index 0000000..e565645
--- /dev/null
+++ b/src/crankshaft/hydrogen-load-elimination.h
@@ -0,0 +1,28 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+class HLoadEliminationPhase : public HPhase {
+ public:
+  explicit HLoadEliminationPhase(HGraph* graph)
+      : HPhase("H_Load elimination", graph) { }
+
+  void Run();
+
+ private:
+  void EliminateLoads(HBasicBlock* block);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_LOAD_ELIMINATION_H_
diff --git a/src/crankshaft/hydrogen-mark-deoptimize.cc b/src/crankshaft/hydrogen-mark-deoptimize.cc
new file mode 100644
index 0000000..a706d91
--- /dev/null
+++ b/src/crankshaft/hydrogen-mark-deoptimize.cc
@@ -0,0 +1,62 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-mark-deoptimize.h"
+
+namespace v8 {
+namespace internal {
+
+void HMarkDeoptimizeOnUndefinedPhase::Run() {
+  const ZoneList<HPhi*>* phi_list = graph()->phi_list();
+  for (int i = 0; i < phi_list->length(); i++) {
+    HPhi* phi = phi_list->at(i);
+    if (phi->CheckFlag(HValue::kAllowUndefinedAsNaN) &&
+        !phi->CheckUsesForFlag(HValue::kAllowUndefinedAsNaN)) {
+      ProcessPhi(phi);
+    }
+  }
+}
+
+
+void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) {
+  DCHECK(phi->CheckFlag(HValue::kAllowUndefinedAsNaN));
+  DCHECK(worklist_.is_empty());
+
+  // Push the phi onto the worklist
+  phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
+  worklist_.Add(phi, zone());
+
+  // Process all phis that can reach this phi
+  while (!worklist_.is_empty()) {
+    phi = worklist_.RemoveLast();
+    for (int i = phi->OperandCount() - 1; i >= 0; --i) {
+      HValue* input = phi->OperandAt(i);
+      if (input->IsPhi() && input->CheckFlag(HValue::kAllowUndefinedAsNaN)) {
+        input->ClearFlag(HValue::kAllowUndefinedAsNaN);
+        worklist_.Add(HPhi::cast(input), zone());
+      }
+    }
+  }
+}
+
+
+void HComputeChangeUndefinedToNaN::Run() {
+  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+  for (int i = 0; i < blocks->length(); ++i) {
+    const HBasicBlock* block(blocks->at(i));
+    for (HInstruction* current = block->first(); current != NULL; ) {
+      HInstruction* next = current->next();
+      if (current->IsChange()) {
+        if (HChange::cast(current)->can_convert_undefined_to_nan()) {
+          current->SetFlag(HValue::kAllowUndefinedAsNaN);
+        }
+      }
+      current = next;
+    }
+  }
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-mark-deoptimize.h b/src/crankshaft/hydrogen-mark-deoptimize.h
new file mode 100644
index 0000000..45d40ac
--- /dev/null
+++ b/src/crankshaft/hydrogen-mark-deoptimize.h
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
+#define V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Compute DeoptimizeOnUndefined flag for phis.  Any phi that can reach a use
+// with DeoptimizeOnUndefined set must have DeoptimizeOnUndefined set.
+// Currently only HCompareNumericAndBranch, with double input representation,
+// has this flag set.  The flag is used by HChange tagged->double, which must
+// deoptimize if one of its uses has this flag set.
+class HMarkDeoptimizeOnUndefinedPhase : public HPhase {
+ public:
+  explicit HMarkDeoptimizeOnUndefinedPhase(HGraph* graph)
+      : HPhase("H_Mark deoptimize on undefined", graph),
+        worklist_(16, zone()) {}
+
+  void Run();
+
+ private:
+  void ProcessPhi(HPhi* phi);
+
+  // Preallocated worklist used as an optimization so we don't have
+  // to allocate a new ZoneList for every ProcessPhi() invocation.
+  ZoneList<HPhi*> worklist_;
+
+  DISALLOW_COPY_AND_ASSIGN(HMarkDeoptimizeOnUndefinedPhase);
+};
+
+
+class HComputeChangeUndefinedToNaN : public HPhase {
+ public:
+  explicit HComputeChangeUndefinedToNaN(HGraph* graph)
+      : HPhase("H_Compute change undefined to nan", graph) {}
+
+  void Run();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HComputeChangeUndefinedToNaN);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_MARK_DEOPTIMIZE_H_
diff --git a/src/crankshaft/hydrogen-mark-unreachable.cc b/src/crankshaft/hydrogen-mark-unreachable.cc
new file mode 100644
index 0000000..4e1dd68
--- /dev/null
+++ b/src/crankshaft/hydrogen-mark-unreachable.cc
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-mark-unreachable.h"
+
+namespace v8 {
+namespace internal {
+
+
+void HMarkUnreachableBlocksPhase::MarkUnreachableBlocks() {
+  // If there is unreachable code in the graph, propagate the unreachable marks
+  // using a fixed-point iteration.
+  bool changed = true;
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  while (changed) {
+    changed = false;
+    for (int i = 0; i < blocks->length(); i++) {
+      HBasicBlock* block = blocks->at(i);
+      if (!block->IsReachable()) continue;
+      bool is_reachable = blocks->at(0) == block;
+      for (HPredecessorIterator it(block); !it.Done(); it.Advance()) {
+        HBasicBlock* predecessor = it.Current();
+        // A block is reachable if one of its predecessors is reachable,
+        // doesn't deoptimize and either is known to transfer control to the
+        // block or has a control flow instruction for which the next block
+        // cannot be determined.
+        if (predecessor->IsReachable() && !predecessor->IsDeoptimizing()) {
+          HBasicBlock* pred_succ;
+          bool known_pred_succ =
+              predecessor->end()->KnownSuccessorBlock(&pred_succ);
+          if (!known_pred_succ || pred_succ == block) {
+            is_reachable = true;
+            break;
+          }
+        }
+        if (block->is_osr_entry()) {
+          is_reachable = true;
+        }
+      }
+      if (!is_reachable) {
+        block->MarkUnreachable();
+        changed = true;
+      }
+    }
+  }
+}
+
+
+void HMarkUnreachableBlocksPhase::Run() {
+  MarkUnreachableBlocks();
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-mark-unreachable.h b/src/crankshaft/hydrogen-mark-unreachable.h
new file mode 100644
index 0000000..1243b1f
--- /dev/null
+++ b/src/crankshaft/hydrogen-mark-unreachable.h
@@ -0,0 +1,31 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
+#define V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HMarkUnreachableBlocksPhase : public HPhase {
+ public:
+  explicit HMarkUnreachableBlocksPhase(HGraph* graph)
+      : HPhase("H_Mark unreachable blocks", graph) { }
+
+  void Run();
+
+ private:
+  void MarkUnreachableBlocks();
+
+  DISALLOW_COPY_AND_ASSIGN(HMarkUnreachableBlocksPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_MARK_UNREACHABLE_H_
diff --git a/src/crankshaft/hydrogen-osr.cc b/src/crankshaft/hydrogen-osr.cc
new file mode 100644
index 0000000..c98bbf6
--- /dev/null
+++ b/src/crankshaft/hydrogen-osr.cc
@@ -0,0 +1,104 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-osr.h"
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+// True iff. we are compiling for OSR and the statement is the entry.
+bool HOsrBuilder::HasOsrEntryAt(IterationStatement* statement) {
+  return statement->OsrEntryId() == builder_->current_info()->osr_ast_id();
+}
+
+
+HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
+  DCHECK(HasOsrEntryAt(statement));
+
+  Zone* zone = builder_->zone();
+  HGraph* graph = builder_->graph();
+
+  // only one OSR point per compile is allowed.
+  DCHECK(graph->osr() == NULL);
+
+  // remember this builder as the one OSR builder in the graph.
+  graph->set_osr(this);
+
+  HBasicBlock* non_osr_entry = graph->CreateBasicBlock();
+  osr_entry_ = graph->CreateBasicBlock();
+  HValue* true_value = graph->GetConstantTrue();
+  HBranch* test = builder_->New<HBranch>(true_value, ToBooleanStub::Types(),
+                                         non_osr_entry, osr_entry_);
+  builder_->FinishCurrentBlock(test);
+
+  HBasicBlock* loop_predecessor = graph->CreateBasicBlock();
+  builder_->Goto(non_osr_entry, loop_predecessor);
+
+  builder_->set_current_block(osr_entry_);
+  osr_entry_->set_osr_entry();
+  BailoutId osr_entry_id = statement->OsrEntryId();
+
+  HEnvironment *environment = builder_->environment();
+  int first_expression_index = environment->first_expression_index();
+  int length = environment->length();
+  osr_values_ = new(zone) ZoneList<HUnknownOSRValue*>(length, zone);
+
+  for (int i = 0; i < first_expression_index; ++i) {
+    HUnknownOSRValue* osr_value
+        = builder_->Add<HUnknownOSRValue>(environment, i);
+    environment->Bind(i, osr_value);
+    osr_values_->Add(osr_value, zone);
+  }
+
+  if (first_expression_index != length) {
+    environment->Drop(length - first_expression_index);
+    for (int i = first_expression_index; i < length; ++i) {
+      HUnknownOSRValue* osr_value
+          = builder_->Add<HUnknownOSRValue>(environment, i);
+      environment->Push(osr_value);
+      osr_values_->Add(osr_value, zone);
+    }
+  }
+
+  unoptimized_frame_slots_ =
+      environment->local_count() + environment->push_count();
+
+  // Keep a copy of the old environment, since the OSR values need it
+  // to figure out where exactly they are located in the unoptimized frame.
+  environment = environment->Copy();
+  builder_->current_block()->UpdateEnvironment(environment);
+
+  builder_->Add<HSimulate>(osr_entry_id);
+  builder_->Add<HOsrEntry>(osr_entry_id);
+  HContext* context = builder_->Add<HContext>();
+  environment->BindContext(context);
+  builder_->Goto(loop_predecessor);
+  loop_predecessor->SetJoinId(statement->EntryId());
+  builder_->set_current_block(loop_predecessor);
+
+  // Create the final loop entry
+  osr_loop_entry_ = builder_->BuildLoopEntry();
+  return osr_loop_entry_;
+}
+
+
+void HOsrBuilder::FinishGraph() {
+  // do nothing for now.
+}
+
+
+void HOsrBuilder::FinishOsrValues() {
+  const ZoneList<HPhi*>* phis = osr_loop_entry_->phis();
+  for (int j = 0; j < phis->length(); j++) {
+    HPhi* phi = phis->at(j);
+    if (phi->HasMergedIndex()) {
+      osr_values_->at(phi->merged_index())->set_incoming_value(phi);
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-osr.h b/src/crankshaft/hydrogen-osr.h
new file mode 100644
index 0000000..e2cfd30
--- /dev/null
+++ b/src/crankshaft/hydrogen-osr.h
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_OSR_H_
+#define V8_CRANKSHAFT_HYDROGEN_OSR_H_
+
+#include "src/ast/ast.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Responsible for building graph parts related to OSR and otherwise
+// setting up the graph to do an OSR compile.
+class HOsrBuilder : public ZoneObject {
+ public:
+  explicit HOsrBuilder(HOptimizedGraphBuilder* builder)
+    : unoptimized_frame_slots_(0),
+      builder_(builder),
+      osr_entry_(NULL),
+      osr_loop_entry_(NULL),
+      osr_values_(NULL) { }
+
+  // Creates the loop entry block for the given statement, setting up OSR
+  // entries as necessary, and sets the current block to the new block.
+  HBasicBlock* BuildOsrLoopEntry(IterationStatement* statement);
+
+  // Process the hydrogen graph after it has been completed, performing
+  // any OSR-specific cleanups or changes.
+  void FinishGraph();
+
+  // Process the OSR values and phis after initial graph optimization.
+  void FinishOsrValues();
+
+  // Return the number of slots in the unoptimized frame at the entry to OSR.
+  int UnoptimizedFrameSlots() const {
+    return unoptimized_frame_slots_;
+  }
+
+  bool HasOsrEntryAt(IterationStatement* statement);
+
+ private:
+  int unoptimized_frame_slots_;
+  HOptimizedGraphBuilder* builder_;
+  HBasicBlock* osr_entry_;
+  HBasicBlock* osr_loop_entry_;
+  ZoneList<HUnknownOSRValue*>* osr_values_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_OSR_H_
diff --git a/src/crankshaft/hydrogen-range-analysis.cc b/src/crankshaft/hydrogen-range-analysis.cc
new file mode 100644
index 0000000..f5eba5e
--- /dev/null
+++ b/src/crankshaft/hydrogen-range-analysis.cc
@@ -0,0 +1,291 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-range-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+class Pending {
+ public:
+  Pending(HBasicBlock* block, int last_changed_range)
+      : block_(block), last_changed_range_(last_changed_range) {}
+
+  HBasicBlock* block() const { return block_; }
+  int last_changed_range() const { return last_changed_range_; }
+
+ private:
+  HBasicBlock* block_;
+  int last_changed_range_;
+};
+
+
+void HRangeAnalysisPhase::TraceRange(const char* msg, ...) {
+  if (FLAG_trace_range) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+void HRangeAnalysisPhase::Run() {
+  HBasicBlock* block(graph()->entry_block());
+  ZoneList<Pending> stack(graph()->blocks()->length(), zone());
+  while (block != NULL) {
+    TraceRange("Analyzing block B%d\n", block->block_id());
+
+    // Infer range based on control flow.
+    if (block->predecessors()->length() == 1) {
+      HBasicBlock* pred = block->predecessors()->first();
+      if (pred->end()->IsCompareNumericAndBranch()) {
+        InferControlFlowRange(HCompareNumericAndBranch::cast(pred->end()),
+                              block);
+      }
+    }
+
+    // Process phi instructions.
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      InferRange(phi);
+    }
+
+    // Go through all instructions of the current block.
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HValue* value = it.Current();
+      InferRange(value);
+
+      // Compute the bailout-on-minus-zero flag.
+      if (value->IsChange()) {
+        HChange* instr = HChange::cast(value);
+        // Propagate flags for negative zero checks upwards from conversions
+        // int32-to-tagged and int32-to-double.
+        Representation from = instr->value()->representation();
+        DCHECK(from.Equals(instr->from()));
+        if (from.IsSmiOrInteger32()) {
+          DCHECK(instr->to().IsTagged() ||
+                instr->to().IsDouble() ||
+                instr->to().IsSmiOrInteger32());
+          PropagateMinusZeroChecks(instr->value());
+        }
+      } else if (value->IsCompareMinusZeroAndBranch()) {
+        HCompareMinusZeroAndBranch* instr =
+            HCompareMinusZeroAndBranch::cast(value);
+        if (instr->value()->representation().IsSmiOrInteger32()) {
+          PropagateMinusZeroChecks(instr->value());
+        }
+      }
+    }
+
+    // Continue analysis in all dominated blocks.
+    const ZoneList<HBasicBlock*>* dominated_blocks(block->dominated_blocks());
+    if (!dominated_blocks->is_empty()) {
+      // Continue with first dominated block, and push the
+      // remaining blocks on the stack (in reverse order).
+      int last_changed_range = changed_ranges_.length();
+      for (int i = dominated_blocks->length() - 1; i > 0; --i) {
+        stack.Add(Pending(dominated_blocks->at(i), last_changed_range), zone());
+      }
+      block = dominated_blocks->at(0);
+    } else if (!stack.is_empty()) {
+      // Pop next pending block from stack.
+      Pending pending = stack.RemoveLast();
+      RollBackTo(pending.last_changed_range());
+      block = pending.block();
+    } else {
+      // All blocks done.
+      block = NULL;
+    }
+  }
+
+  // The ranges are not valid anymore due to SSI vs. SSA!
+  PoisonRanges();
+}
+
+
+void HRangeAnalysisPhase::PoisonRanges() {
+#ifdef DEBUG
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->HasRange()) instr->PoisonRange();
+    }
+  }
+#endif
+}
+
+
+void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test,
+                                                HBasicBlock* dest) {
+  DCHECK((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
+  if (test->representation().IsSmiOrInteger32()) {
+    Token::Value op = test->token();
+    if (test->SecondSuccessor() == dest) {
+      op = Token::NegateCompareOp(op);
+    }
+    Token::Value inverted_op = Token::ReverseCompareOp(op);
+    UpdateControlFlowRange(op, test->left(), test->right());
+    UpdateControlFlowRange(inverted_op, test->right(), test->left());
+  }
+}
+
+
+// We know that value [op] other. Use this information to update the range on
+// value.
+void HRangeAnalysisPhase::UpdateControlFlowRange(Token::Value op,
+                                                 HValue* value,
+                                                 HValue* other) {
+  Range temp_range;
+  Range* range = other->range() != NULL ? other->range() : &temp_range;
+  Range* new_range = NULL;
+
+  TraceRange("Control flow range infer %d %s %d\n",
+             value->id(),
+             Token::Name(op),
+             other->id());
+
+  if (op == Token::EQ || op == Token::EQ_STRICT) {
+    // The same range has to apply for value.
+    new_range = range->Copy(graph()->zone());
+  } else if (op == Token::LT || op == Token::LTE) {
+    new_range = range->CopyClearLower(graph()->zone());
+    if (op == Token::LT) {
+      new_range->AddConstant(-1);
+    }
+  } else if (op == Token::GT || op == Token::GTE) {
+    new_range = range->CopyClearUpper(graph()->zone());
+    if (op == Token::GT) {
+      new_range->AddConstant(1);
+    }
+  }
+
+  if (new_range != NULL && !new_range->IsMostGeneric()) {
+    AddRange(value, new_range);
+  }
+}
+
+
+void HRangeAnalysisPhase::InferRange(HValue* value) {
+  DCHECK(!value->HasRange());
+  if (!value->representation().IsNone()) {
+    value->ComputeInitialRange(graph()->zone());
+    Range* range = value->range();
+    TraceRange("Initial inferred range of %d (%s) set to [%d,%d]\n",
+               value->id(),
+               value->Mnemonic(),
+               range->lower(),
+               range->upper());
+  }
+}
+
+
+void HRangeAnalysisPhase::RollBackTo(int index) {
+  DCHECK(index <= changed_ranges_.length());
+  for (int i = index; i < changed_ranges_.length(); ++i) {
+    changed_ranges_[i]->RemoveLastAddedRange();
+  }
+  changed_ranges_.Rewind(index);
+}
+
+
+void HRangeAnalysisPhase::AddRange(HValue* value, Range* range) {
+  Range* original_range = value->range();
+  value->AddNewRange(range, graph()->zone());
+  changed_ranges_.Add(value, zone());
+  Range* new_range = value->range();
+  TraceRange("Updated range of %d set to [%d,%d]\n",
+             value->id(),
+             new_range->lower(),
+             new_range->upper());
+  if (original_range != NULL) {
+    TraceRange("Original range was [%d,%d]\n",
+               original_range->lower(),
+               original_range->upper());
+  }
+  TraceRange("New information was [%d,%d]\n",
+             range->lower(),
+             range->upper());
+}
+
+
+void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
+  DCHECK(worklist_.is_empty());
+  DCHECK(in_worklist_.IsEmpty());
+
+  AddToWorklist(value);
+  while (!worklist_.is_empty()) {
+    value = worklist_.RemoveLast();
+
+    if (value->IsPhi()) {
+      // For phis, we must propagate the check to all of its inputs.
+      HPhi* phi = HPhi::cast(value);
+      for (int i = 0; i < phi->OperandCount(); ++i) {
+        AddToWorklist(phi->OperandAt(i));
+      }
+    } else if (value->IsUnaryMathOperation()) {
+      HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
+      if (instr->representation().IsSmiOrInteger32() &&
+          !instr->value()->representation().Equals(instr->representation())) {
+        if (instr->value()->range() == NULL ||
+            instr->value()->range()->CanBeMinusZero()) {
+          instr->SetFlag(HValue::kBailoutOnMinusZero);
+        }
+      }
+      if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
+          instr->representation().Equals(
+              instr->RequiredInputRepresentation(0))) {
+        AddToWorklist(instr->value());
+      }
+    } else if (value->IsChange()) {
+      HChange* instr = HChange::cast(value);
+      if (!instr->from().IsSmiOrInteger32() &&
+          !instr->CanTruncateToInt32() &&
+          (instr->value()->range() == NULL ||
+           instr->value()->range()->CanBeMinusZero())) {
+        instr->SetFlag(HValue::kBailoutOnMinusZero);
+      }
+    } else if (value->IsForceRepresentation()) {
+      HForceRepresentation* instr = HForceRepresentation::cast(value);
+      AddToWorklist(instr->value());
+    } else if (value->IsMod()) {
+      HMod* instr = HMod::cast(value);
+      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+        instr->SetFlag(HValue::kBailoutOnMinusZero);
+        AddToWorklist(instr->left());
+      }
+    } else if (value->IsDiv() || value->IsMul()) {
+      HBinaryOperation* instr = HBinaryOperation::cast(value);
+      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+        instr->SetFlag(HValue::kBailoutOnMinusZero);
+      }
+      AddToWorklist(instr->right());
+      AddToWorklist(instr->left());
+    } else if (value->IsMathFloorOfDiv()) {
+      HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
+      instr->SetFlag(HValue::kBailoutOnMinusZero);
+    } else if (value->IsAdd() || value->IsSub()) {
+      HBinaryOperation* instr = HBinaryOperation::cast(value);
+      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+        // Propagate to the left argument. If the left argument cannot be -0,
+        // then the result of the add/sub operation cannot be either.
+        AddToWorklist(instr->left());
+      }
+    } else if (value->IsMathMinMax()) {
+      HMathMinMax* instr = HMathMinMax::cast(value);
+      AddToWorklist(instr->right());
+      AddToWorklist(instr->left());
+    }
+  }
+
+  in_worklist_.Clear();
+  DCHECK(in_worklist_.IsEmpty());
+  DCHECK(worklist_.is_empty());
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-range-analysis.h b/src/crankshaft/hydrogen-range-analysis.h
new file mode 100644
index 0000000..cff7026
--- /dev/null
+++ b/src/crankshaft/hydrogen-range-analysis.h
@@ -0,0 +1,51 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HRangeAnalysisPhase : public HPhase {
+ public:
+  explicit HRangeAnalysisPhase(HGraph* graph)
+      : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()),
+        in_worklist_(graph->GetMaximumValueID(), zone()),
+        worklist_(32, zone()) {}
+
+  void Run();
+
+ private:
+  void TraceRange(const char* msg, ...);
+  void InferControlFlowRange(HCompareNumericAndBranch* test,
+                             HBasicBlock* dest);
+  void UpdateControlFlowRange(Token::Value op, HValue* value, HValue* other);
+  void InferRange(HValue* value);
+  void RollBackTo(int index);
+  void AddRange(HValue* value, Range* range);
+  void AddToWorklist(HValue* value) {
+    if (in_worklist_.Contains(value->id())) return;
+    in_worklist_.Add(value->id());
+    worklist_.Add(value, zone());
+  }
+  void PropagateMinusZeroChecks(HValue* value);
+  void PoisonRanges();
+
+  ZoneList<HValue*> changed_ranges_;
+
+  BitVector in_worklist_;
+  ZoneList<HValue*> worklist_;
+
+  DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_RANGE_ANALYSIS_H_
diff --git a/src/crankshaft/hydrogen-redundant-phi.cc b/src/crankshaft/hydrogen-redundant-phi.cc
new file mode 100644
index 0000000..ef8b291
--- /dev/null
+++ b/src/crankshaft/hydrogen-redundant-phi.cc
@@ -0,0 +1,66 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-redundant-phi.h"
+
+namespace v8 {
+namespace internal {
+
+void HRedundantPhiEliminationPhase::Run() {
+  // Gather all phis from all blocks first.
+  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+  ZoneList<HPhi*> all_phis(blocks->length(), zone());
+  for (int i = 0; i < blocks->length(); ++i) {
+    HBasicBlock* block = blocks->at(i);
+    for (int j = 0; j < block->phis()->length(); j++) {
+      all_phis.Add(block->phis()->at(j), zone());
+    }
+  }
+
+  // Iteratively reduce all phis in the list.
+  ProcessPhis(&all_phis);
+
+#if DEBUG
+  // Make sure that we *really* removed all redundant phis.
+  for (int i = 0; i < blocks->length(); ++i) {
+    for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
+      DCHECK(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL);
+    }
+  }
+#endif
+}
+
+
+void HRedundantPhiEliminationPhase::ProcessBlock(HBasicBlock* block) {
+  ProcessPhis(block->phis());
+}
+
+
+void HRedundantPhiEliminationPhase::ProcessPhis(const ZoneList<HPhi*>* phis) {
+  bool updated;
+  do {
+    // Iterately replace all redundant phis in the given list.
+    updated = false;
+    for (int i = 0; i < phis->length(); i++) {
+      HPhi* phi = phis->at(i);
+      if (phi->CheckFlag(HValue::kIsDead)) continue;  // Already replaced.
+
+      HValue* replacement = phi->GetRedundantReplacement();
+      if (replacement != NULL) {
+        phi->SetFlag(HValue::kIsDead);
+        for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
+          HValue* value = it.value();
+          value->SetOperandAt(it.index(), replacement);
+          // Iterate again if used in another non-dead phi.
+          updated |= value->IsPhi() && !value->CheckFlag(HValue::kIsDead);
+        }
+        phi->block()->RemovePhi(phi);
+      }
+    }
+  } while (updated);
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-redundant-phi.h b/src/crankshaft/hydrogen-redundant-phi.h
new file mode 100644
index 0000000..e8735c8
--- /dev/null
+++ b/src/crankshaft/hydrogen-redundant-phi.h
@@ -0,0 +1,34 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
+#define V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Replace all phis consisting of a single non-loop operand plus any number of
+// loop operands by that single non-loop operand.
+class HRedundantPhiEliminationPhase : public HPhase {
+ public:
+  explicit HRedundantPhiEliminationPhase(HGraph* graph)
+      : HPhase("H_Redundant phi elimination", graph) { }
+
+  void Run();
+  void ProcessBlock(HBasicBlock* block);
+
+ private:
+  void ProcessPhis(const ZoneList<HPhi*>* phis);
+
+  DISALLOW_COPY_AND_ASSIGN(HRedundantPhiEliminationPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_REDUNDANT_PHI_H_
diff --git a/src/crankshaft/hydrogen-removable-simulates.cc b/src/crankshaft/hydrogen-removable-simulates.cc
new file mode 100644
index 0000000..ceef743
--- /dev/null
+++ b/src/crankshaft/hydrogen-removable-simulates.cc
@@ -0,0 +1,189 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-removable-simulates.h"
+
+#include "src/crankshaft/hydrogen-flow-engine.h"
+#include "src/crankshaft/hydrogen-instructions.h"
+
+namespace v8 {
+namespace internal {
+
+class State : public ZoneObject {
+ public:
+  explicit State(Zone* zone)
+      : zone_(zone), mergelist_(2, zone), first_(true), mode_(NORMAL) { }
+
+  State* Process(HInstruction* instr, Zone* zone) {
+    if (FLAG_trace_removable_simulates) {
+      PrintF("[%s with state %p in B%d: #%d %s]\n",
+             mode_ == NORMAL ? "processing" : "collecting",
+             reinterpret_cast<void*>(this), instr->block()->block_id(),
+             instr->id(), instr->Mnemonic());
+    }
+    // Forward-merge "trains" of simulates after an instruction with observable
+    // side effects to keep live ranges short.
+    if (mode_ == COLLECT_CONSECUTIVE_SIMULATES) {
+      if (instr->IsSimulate()) {
+        HSimulate* current_simulate = HSimulate::cast(instr);
+        if (current_simulate->is_candidate_for_removal() &&
+            !current_simulate->ast_id().IsNone()) {
+          Remember(current_simulate);
+          return this;
+        }
+      }
+      FlushSimulates();
+      mode_ = NORMAL;
+    }
+    // Ensure there's a non-foldable HSimulate before an HEnterInlined to avoid
+    // folding across HEnterInlined.
+    DCHECK(!(instr->IsEnterInlined() &&
+             HSimulate::cast(instr->previous())->is_candidate_for_removal()));
+    if (instr->IsLeaveInlined() || instr->IsReturn()) {
+      // Never fold simulates from inlined environments into simulates in the
+      // outer environment. Simply remove all accumulated simulates without
+      // merging. This is safe because simulates after instructions with side
+      // effects are never added to the merge list. The same reasoning holds for
+      // return instructions.
+      RemoveSimulates();
+      return this;
+    }
+    if (instr->IsControlInstruction()) {
+      // Merge the accumulated simulates at the end of the block.
+      FlushSimulates();
+      return this;
+    }
+    if (instr->IsCapturedObject()) {
+      // Do not merge simulates across captured objects - captured objects
+      // change environments during environment replay, and such changes
+      // would not be reflected in the simulate.
+      FlushSimulates();
+      return this;
+    }
+    // Skip the non-simulates and the first simulate.
+    if (!instr->IsSimulate()) return this;
+    if (first_) {
+      first_ = false;
+      return this;
+    }
+    HSimulate* current_simulate = HSimulate::cast(instr);
+    if (!current_simulate->is_candidate_for_removal()) {
+      Remember(current_simulate);
+      FlushSimulates();
+    } else if (current_simulate->ast_id().IsNone()) {
+      DCHECK(current_simulate->next()->IsEnterInlined());
+      FlushSimulates();
+    } else if (current_simulate->previous()->HasObservableSideEffects()) {
+      Remember(current_simulate);
+      mode_ = COLLECT_CONSECUTIVE_SIMULATES;
+    } else {
+      Remember(current_simulate);
+    }
+
+    return this;
+  }
+
+  static State* Merge(State* succ_state,
+                      HBasicBlock* succ_block,
+                      State* pred_state,
+                      HBasicBlock* pred_block,
+                      Zone* zone) {
+    return (succ_state == NULL)
+        ? pred_state->Copy(succ_block, pred_block, zone)
+        : succ_state->Merge(succ_block, pred_state, pred_block, zone);
+  }
+
+  static State* Finish(State* state, HBasicBlock* block, Zone* zone) {
+    if (FLAG_trace_removable_simulates) {
+      PrintF("[preparing state %p for B%d]\n", reinterpret_cast<void*>(state),
+             block->block_id());
+    }
+    // For our current local analysis, we should not remember simulates across
+    // block boundaries.
+    DCHECK(!state->HasRememberedSimulates());
+    // Nasty heuristic: Never remove the first simulate in a block. This
+    // just so happens to have a beneficial effect on register allocation.
+    state->first_ = true;
+    return state;
+  }
+
+ private:
+  explicit State(const State& other)
+      : zone_(other.zone_),
+        mergelist_(other.mergelist_, other.zone_),
+        first_(other.first_),
+        mode_(other.mode_) { }
+
+  enum Mode { NORMAL, COLLECT_CONSECUTIVE_SIMULATES };
+
+  bool HasRememberedSimulates() const { return !mergelist_.is_empty(); }
+
+  void Remember(HSimulate* sim) {
+    mergelist_.Add(sim, zone_);
+  }
+
+  void FlushSimulates() {
+    if (HasRememberedSimulates()) {
+      mergelist_.RemoveLast()->MergeWith(&mergelist_);
+    }
+  }
+
+  void RemoveSimulates() {
+    while (HasRememberedSimulates()) {
+      mergelist_.RemoveLast()->DeleteAndReplaceWith(NULL);
+    }
+  }
+
+  State* Copy(HBasicBlock* succ_block, HBasicBlock* pred_block, Zone* zone) {
+    State* copy = new(zone) State(*this);
+    if (FLAG_trace_removable_simulates) {
+      PrintF("[copy state %p from B%d to new state %p for B%d]\n",
+             reinterpret_cast<void*>(this), pred_block->block_id(),
+             reinterpret_cast<void*>(copy), succ_block->block_id());
+    }
+    return copy;
+  }
+
+  State* Merge(HBasicBlock* succ_block,
+               State* pred_state,
+               HBasicBlock* pred_block,
+               Zone* zone) {
+    // For our current local analysis, we should not remember simulates across
+    // block boundaries.
+    DCHECK(!pred_state->HasRememberedSimulates());
+    DCHECK(!HasRememberedSimulates());
+    if (FLAG_trace_removable_simulates) {
+      PrintF("[merge state %p from B%d into %p for B%d]\n",
+             reinterpret_cast<void*>(pred_state), pred_block->block_id(),
+             reinterpret_cast<void*>(this), succ_block->block_id());
+    }
+    return this;
+  }
+
+  Zone* zone_;
+  ZoneList<HSimulate*> mergelist_;
+  bool first_;
+  Mode mode_;
+};
+
+
+// We don't use effects here.
+class Effects : public ZoneObject {
+ public:
+  explicit Effects(Zone* zone) { }
+  bool Disabled() { return true; }
+  void Process(HInstruction* instr, Zone* zone) { }
+  void Apply(State* state) { }
+  void Union(Effects* that, Zone* zone) { }
+};
+
+
+void HMergeRemovableSimulatesPhase::Run() {
+  HFlowEngine<State, Effects> engine(graph(), zone());
+  State* state = new(zone()) State(zone());
+  engine.AnalyzeDominatedBlocks(graph()->blocks()->at(0), state);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-removable-simulates.h b/src/crankshaft/hydrogen-removable-simulates.h
new file mode 100644
index 0000000..3450001
--- /dev/null
+++ b/src/crankshaft/hydrogen-removable-simulates.h
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
+#define V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HMergeRemovableSimulatesPhase : public HPhase {
+ public:
+  explicit HMergeRemovableSimulatesPhase(HGraph* graph)
+      : HPhase("H_Merge removable simulates", graph) { }
+
+  void Run();
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HMergeRemovableSimulatesPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_REMOVABLE_SIMULATES_H_
diff --git a/src/crankshaft/hydrogen-representation-changes.cc b/src/crankshaft/hydrogen-representation-changes.cc
new file mode 100644
index 0000000..32b614c
--- /dev/null
+++ b/src/crankshaft/hydrogen-representation-changes.cc
@@ -0,0 +1,214 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-representation-changes.h"
+
+namespace v8 {
+namespace internal {
+
+void HRepresentationChangesPhase::InsertRepresentationChangeForUse(
+    HValue* value, HValue* use_value, int use_index, Representation to) {
+  // Insert the representation change right before its use. For phi-uses we
+  // insert at the end of the corresponding predecessor.
+  HInstruction* next = NULL;
+  if (use_value->IsPhi()) {
+    next = use_value->block()->predecessors()->at(use_index)->end();
+  } else {
+    next = HInstruction::cast(use_value);
+  }
+  // For constants we try to make the representation change at compile
+  // time. When a representation change is not possible without loss of
+  // information we treat constants like normal instructions and insert the
+  // change instructions for them.
+  HInstruction* new_value = NULL;
+  bool is_truncating_to_smi = use_value->CheckFlag(HValue::kTruncatingToSmi);
+  bool is_truncating_to_int = use_value->CheckFlag(HValue::kTruncatingToInt32);
+  if (value->IsConstant()) {
+    HConstant* constant = HConstant::cast(value);
+    // Try to create a new copy of the constant with the new representation.
+    if (is_truncating_to_int && to.IsInteger32()) {
+      Maybe<HConstant*> res = constant->CopyToTruncatedInt32(graph()->zone());
+      if (res.IsJust()) new_value = res.FromJust();
+    } else {
+      new_value = constant->CopyToRepresentation(to, graph()->zone());
+    }
+  }
+
+  if (new_value == NULL) {
+    new_value = new(graph()->zone()) HChange(
+        value, to, is_truncating_to_smi, is_truncating_to_int);
+    if (!use_value->operand_position(use_index).IsUnknown()) {
+      new_value->set_position(use_value->operand_position(use_index));
+    } else {
+      DCHECK(!FLAG_hydrogen_track_positions ||
+             !graph()->info()->IsOptimizing());
+    }
+  }
+
+  new_value->InsertBefore(next);
+  use_value->SetOperandAt(use_index, new_value);
+}
+
+
+static bool IsNonDeoptingIntToSmiChange(HChange* change) {
+  Representation from_rep = change->from();
+  Representation to_rep = change->to();
+  // Flags indicating Uint32 operations are set in a later Hydrogen phase.
+  DCHECK(!change->CheckFlag(HValue::kUint32));
+  return from_rep.IsInteger32() && to_rep.IsSmi() && SmiValuesAre32Bits();
+}
+
+
+void HRepresentationChangesPhase::InsertRepresentationChangesForValue(
+    HValue* value) {
+  Representation r = value->representation();
+  if (r.IsNone()) {
+#ifdef DEBUG
+    for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+      HValue* use_value = it.value();
+      int use_index = it.index();
+      Representation req = use_value->RequiredInputRepresentation(use_index);
+      DCHECK(req.IsNone());
+    }
+#endif
+    return;
+  }
+  if (value->HasNoUses()) {
+    if (value->IsForceRepresentation()) value->DeleteAndReplaceWith(NULL);
+    return;
+  }
+
+  for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+    HValue* use_value = it.value();
+    int use_index = it.index();
+    Representation req = use_value->RequiredInputRepresentation(use_index);
+    if (req.IsNone() || req.Equals(r)) continue;
+
+    // If this is an HForceRepresentation instruction, and an HChange has been
+    // inserted above it, examine the input representation of the HChange. If
+    // that's int32, and this HForceRepresentation use is int32, and int32 to
+    // smi changes can't cause deoptimisation, set the input of the use to the
+    // input of the HChange.
+    if (value->IsForceRepresentation()) {
+      HValue* input = HForceRepresentation::cast(value)->value();
+      if (input->IsChange()) {
+        HChange* change = HChange::cast(input);
+        if (change->from().Equals(req) && IsNonDeoptingIntToSmiChange(change)) {
+          use_value->SetOperandAt(use_index, change->value());
+          continue;
+        }
+      }
+    }
+    InsertRepresentationChangeForUse(value, use_value, use_index, req);
+  }
+  if (value->HasNoUses()) {
+    DCHECK(value->IsConstant() || value->IsForceRepresentation());
+    value->DeleteAndReplaceWith(NULL);
+  } else {
+    // The only purpose of a HForceRepresentation is to represent the value
+    // after the (possible) HChange instruction.  We make it disappear.
+    if (value->IsForceRepresentation()) {
+      value->DeleteAndReplaceWith(HForceRepresentation::cast(value)->value());
+    }
+  }
+}
+
+
+void HRepresentationChangesPhase::Run() {
+  // Compute truncation flag for phis: Initially assume that all
+  // int32-phis allow truncation and iteratively remove the ones that
+  // are used in an operation that does not allow a truncating
+  // conversion.
+  ZoneList<HPhi*> int_worklist(8, zone());
+  ZoneList<HPhi*> smi_worklist(8, zone());
+
+  const ZoneList<HPhi*>* phi_list(graph()->phi_list());
+  for (int i = 0; i < phi_list->length(); i++) {
+    HPhi* phi = phi_list->at(i);
+    if (phi->representation().IsInteger32()) {
+      phi->SetFlag(HValue::kTruncatingToInt32);
+    } else if (phi->representation().IsSmi()) {
+      phi->SetFlag(HValue::kTruncatingToSmi);
+      phi->SetFlag(HValue::kTruncatingToInt32);
+    }
+  }
+
+  for (int i = 0; i < phi_list->length(); i++) {
+    HPhi* phi = phi_list->at(i);
+    HValue* value = NULL;
+    if (phi->representation().IsSmiOrInteger32() &&
+        !phi->CheckUsesForFlag(HValue::kTruncatingToInt32, &value)) {
+      int_worklist.Add(phi, zone());
+      phi->ClearFlag(HValue::kTruncatingToInt32);
+      if (FLAG_trace_representation) {
+        PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+               phi->id(), value->id(), value->Mnemonic());
+      }
+    }
+
+    if (phi->representation().IsSmi() &&
+        !phi->CheckUsesForFlag(HValue::kTruncatingToSmi, &value)) {
+      smi_worklist.Add(phi, zone());
+      phi->ClearFlag(HValue::kTruncatingToSmi);
+      if (FLAG_trace_representation) {
+        PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
+               phi->id(), value->id(), value->Mnemonic());
+      }
+    }
+  }
+
+  while (!int_worklist.is_empty()) {
+    HPhi* current = int_worklist.RemoveLast();
+    for (int i = 0; i < current->OperandCount(); ++i) {
+      HValue* input = current->OperandAt(i);
+      if (input->IsPhi() &&
+          input->representation().IsSmiOrInteger32() &&
+          input->CheckFlag(HValue::kTruncatingToInt32)) {
+        if (FLAG_trace_representation) {
+          PrintF("#%d Phi is not truncating Int32 because of #%d %s\n",
+                 input->id(), current->id(), current->Mnemonic());
+        }
+        input->ClearFlag(HValue::kTruncatingToInt32);
+        int_worklist.Add(HPhi::cast(input), zone());
+      }
+    }
+  }
+
+  while (!smi_worklist.is_empty()) {
+    HPhi* current = smi_worklist.RemoveLast();
+    for (int i = 0; i < current->OperandCount(); ++i) {
+      HValue* input = current->OperandAt(i);
+      if (input->IsPhi() &&
+          input->representation().IsSmi() &&
+          input->CheckFlag(HValue::kTruncatingToSmi)) {
+        if (FLAG_trace_representation) {
+          PrintF("#%d Phi is not truncating Smi because of #%d %s\n",
+                 input->id(), current->id(), current->Mnemonic());
+        }
+        input->ClearFlag(HValue::kTruncatingToSmi);
+        smi_worklist.Add(HPhi::cast(input), zone());
+      }
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
+  for (int i = 0; i < blocks->length(); ++i) {
+    // Process phi instructions first.
+    const HBasicBlock* block(blocks->at(i));
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int j = 0; j < phis->length(); j++) {
+      InsertRepresentationChangesForValue(phis->at(j));
+    }
+
+    // Process normal instructions.
+    for (HInstruction* current = block->first(); current != NULL; ) {
+      HInstruction* next = current->next();
+      InsertRepresentationChangesForValue(current);
+      current = next;
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-representation-changes.h b/src/crankshaft/hydrogen-representation-changes.h
new file mode 100644
index 0000000..d840394
--- /dev/null
+++ b/src/crankshaft/hydrogen-representation-changes.h
@@ -0,0 +1,33 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
+#define V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HRepresentationChangesPhase : public HPhase {
+ public:
+  explicit HRepresentationChangesPhase(HGraph* graph)
+      : HPhase("H_Representation changes", graph) { }
+
+  void Run();
+
+ private:
+  void InsertRepresentationChangeForUse(HValue* value,
+                                        HValue* use_value,
+                                        int use_index,
+                                        Representation to);
+  void InsertRepresentationChangesForValue(HValue* value);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_REPRESENTATION_CHANGES_H_
diff --git a/src/crankshaft/hydrogen-sce.cc b/src/crankshaft/hydrogen-sce.cc
new file mode 100644
index 0000000..91e91d2
--- /dev/null
+++ b/src/crankshaft/hydrogen-sce.cc
@@ -0,0 +1,39 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-sce.h"
+
+namespace v8 {
+namespace internal {
+
+void HStackCheckEliminationPhase::Run() {
+  // For each loop block walk the dominator tree from the backwards branch to
+  // the loop header. If a call instruction is encountered the backwards branch
+  // is dominated by a call and the stack check in the backwards branch can be
+  // removed.
+  for (int i = 0; i < graph()->blocks()->length(); i++) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    if (block->IsLoopHeader()) {
+      HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+      HBasicBlock* dominator = back_edge;
+      while (true) {
+        for (HInstructionIterator it(dominator); !it.Done(); it.Advance()) {
+          if (it.Current()->HasStackCheck()) {
+            block->loop_information()->stack_check()->Eliminate();
+            break;
+          }
+        }
+
+        // Done when the loop header is processed.
+        if (dominator == block) break;
+
+        // Move up the dominator tree.
+        dominator = dominator->dominator();
+      }
+    }
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-sce.h b/src/crankshaft/hydrogen-sce.h
new file mode 100644
index 0000000..bb896ba
--- /dev/null
+++ b/src/crankshaft/hydrogen-sce.h
@@ -0,0 +1,26 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_SCE_H_
+#define V8_CRANKSHAFT_HYDROGEN_SCE_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+class HStackCheckEliminationPhase : public HPhase {
+ public:
+  explicit HStackCheckEliminationPhase(HGraph* graph)
+      : HPhase("H_Stack check elimination", graph) { }
+
+  void Run();
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_SCE_H_
diff --git a/src/crankshaft/hydrogen-store-elimination.cc b/src/crankshaft/hydrogen-store-elimination.cc
new file mode 100644
index 0000000..ba32c8a
--- /dev/null
+++ b/src/crankshaft/hydrogen-store-elimination.cc
@@ -0,0 +1,123 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-store-elimination.h"
+
+#include "src/crankshaft/hydrogen-instructions.h"
+
+namespace v8 {
+namespace internal {
+
+#define TRACE(x) if (FLAG_trace_store_elimination) PrintF x
+
+// Performs a block-by-block local analysis for removable stores.
+void HStoreEliminationPhase::Run() {
+  GVNFlagSet flags;  // Use GVN flags as an approximation for some instructions.
+  flags.RemoveAll();
+
+  flags.Add(kArrayElements);
+  flags.Add(kArrayLengths);
+  flags.Add(kStringLengths);
+  flags.Add(kBackingStoreFields);
+  flags.Add(kDoubleArrayElements);
+  flags.Add(kDoubleFields);
+  flags.Add(kElementsPointer);
+  flags.Add(kInobjectFields);
+  flags.Add(kExternalMemory);
+  flags.Add(kStringChars);
+  flags.Add(kTypedArrayElements);
+
+  for (int i = 0; i < graph()->blocks()->length(); i++) {
+    unobserved_.Rewind(0);
+    HBasicBlock* block = graph()->blocks()->at(i);
+    if (!block->IsReachable()) continue;
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instr = it.Current();
+      if (instr->CheckFlag(HValue::kIsDead)) continue;
+
+      // TODO(titzer): eliminate unobserved HStoreKeyed instructions too.
+      switch (instr->opcode()) {
+        case HValue::kStoreNamedField:
+          // Remove any unobserved stores overwritten by this store.
+          ProcessStore(HStoreNamedField::cast(instr));
+          break;
+        case HValue::kLoadNamedField:
+          // Observe any unobserved stores on this object + field.
+          ProcessLoad(HLoadNamedField::cast(instr));
+          break;
+        default:
+          ProcessInstr(instr, flags);
+          break;
+      }
+    }
+  }
+}
+
+
+void HStoreEliminationPhase::ProcessStore(HStoreNamedField* store) {
+  HValue* object = store->object()->ActualValue();
+  int i = 0;
+  while (i < unobserved_.length()) {
+    HStoreNamedField* prev = unobserved_.at(i);
+    if (aliasing_->MustAlias(object, prev->object()->ActualValue()) &&
+        prev->CanBeReplacedWith(store)) {
+      // This store is guaranteed to overwrite the previous store.
+      prev->DeleteAndReplaceWith(NULL);
+      TRACE(("++ Unobserved store S%d overwritten by S%d\n",
+             prev->id(), store->id()));
+      unobserved_.Remove(i);
+    } else {
+      // TODO(titzer): remove map word clearing from folded allocations.
+      i++;
+    }
+  }
+  // Only non-transitioning stores are removable.
+  if (!store->has_transition()) {
+    TRACE(("-- Might remove store S%d\n", store->id()));
+    unobserved_.Add(store, zone());
+  }
+}
+
+
+void HStoreEliminationPhase::ProcessLoad(HLoadNamedField* load) {
+  HValue* object = load->object()->ActualValue();
+  int i = 0;
+  while (i < unobserved_.length()) {
+    HStoreNamedField* prev = unobserved_.at(i);
+    if (aliasing_->MayAlias(object, prev->object()->ActualValue()) &&
+        load->access().Equals(prev->access())) {
+      TRACE(("-- Observed store S%d by load L%d\n", prev->id(), load->id()));
+      unobserved_.Remove(i);
+    } else {
+      i++;
+    }
+  }
+}
+
+
+void HStoreEliminationPhase::ProcessInstr(HInstruction* instr,
+    GVNFlagSet flags) {
+  if (unobserved_.length() == 0) return;  // Nothing to do.
+  if (instr->CanDeoptimize()) {
+    TRACE(("-- Observed stores at I%d (%s might deoptimize)\n",
+           instr->id(), instr->Mnemonic()));
+    unobserved_.Rewind(0);
+    return;
+  }
+  if (instr->CheckChangesFlag(kNewSpacePromotion)) {
+    TRACE(("-- Observed stores at I%d (%s might GC)\n",
+           instr->id(), instr->Mnemonic()));
+    unobserved_.Rewind(0);
+    return;
+  }
+  if (instr->DependsOnFlags().ContainsAnyOf(flags)) {
+    TRACE(("-- Observed stores at I%d (GVN flags of %s)\n",
+           instr->id(), instr->Mnemonic()));
+    unobserved_.Rewind(0);
+    return;
+  }
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-store-elimination.h b/src/crankshaft/hydrogen-store-elimination.h
new file mode 100644
index 0000000..2a9e0c1
--- /dev/null
+++ b/src/crankshaft/hydrogen-store-elimination.h
@@ -0,0 +1,35 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
+#define V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/hydrogen-alias-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+class HStoreEliminationPhase : public HPhase {
+ public:
+  explicit HStoreEliminationPhase(HGraph* graph)
+    : HPhase("H_Store elimination", graph),
+      unobserved_(10, zone()),
+      aliasing_() { }
+
+  void Run();
+ private:
+  ZoneList<HStoreNamedField*> unobserved_;
+  HAliasAnalyzer* aliasing_;
+
+  void ProcessStore(HStoreNamedField* store);
+  void ProcessLoad(HLoadNamedField* load);
+  void ProcessInstr(HInstruction* instr, GVNFlagSet flags);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_STORE_ELIMINATION_H_
diff --git a/src/crankshaft/hydrogen-types.cc b/src/crankshaft/hydrogen-types.cc
new file mode 100644
index 0000000..9c5e341
--- /dev/null
+++ b/src/crankshaft/hydrogen-types.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-types.h"
+
+#include "src/ostreams.h"
+#include "src/types-inl.h"
+
+
+namespace v8 {
+namespace internal {
+
+// static
+template <class T>
+HType HType::FromType(typename T::TypeHandle type) {
+  if (T::Any()->Is(type)) return HType::Any();
+  if (!type->IsInhabited()) return HType::None();
+  if (type->Is(T::SignedSmall())) return HType::Smi();
+  if (type->Is(T::Number())) return HType::TaggedNumber();
+  if (type->Is(T::Null())) return HType::Null();
+  if (type->Is(T::String())) return HType::String();
+  if (type->Is(T::Boolean())) return HType::Boolean();
+  if (type->Is(T::Undefined())) return HType::Undefined();
+  if (type->Is(T::Object())) return HType::JSObject();
+  if (type->Is(T::Receiver())) return HType::JSReceiver();
+  return HType::Tagged();
+}
+
+
+// static
+template
+HType HType::FromType<Type>(Type* type);
+
+
+// static
+template
+HType HType::FromType<HeapType>(Handle<HeapType> type);
+
+
+// static
+HType HType::FromValue(Handle<Object> value) {
+  if (value->IsSmi()) return HType::Smi();
+  if (value->IsNull()) return HType::Null();
+  if (value->IsHeapNumber()) {
+    double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
+    return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
+  }
+  if (value->IsString()) return HType::String();
+  if (value->IsBoolean()) return HType::Boolean();
+  if (value->IsUndefined()) return HType::Undefined();
+  if (value->IsJSArray()) return HType::JSArray();
+  if (value->IsJSObject()) return HType::JSObject();
+  DCHECK(value->IsHeapObject());
+  return HType::HeapObject();
+}
+
+
+std::ostream& operator<<(std::ostream& os, const HType& t) {
+  // Note: The c1visualizer syntax for locals allows only a sequence of the
+  // following characters: A-Za-z0-9_-|:
+  switch (t.kind_) {
+#define DEFINE_CASE(Name, mask) \
+  case HType::k##Name:          \
+    return os << #Name;
+    HTYPE_LIST(DEFINE_CASE)
+#undef DEFINE_CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-types.h b/src/crankshaft/hydrogen-types.h
new file mode 100644
index 0000000..87148ee
--- /dev/null
+++ b/src/crankshaft/hydrogen-types.h
@@ -0,0 +1,92 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_TYPES_H_
+#define V8_CRANKSHAFT_HYDROGEN_TYPES_H_
+
+#include <climits>
+#include <iosfwd>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+template <typename T> class Handle;
+class Object;
+
+#define HTYPE_LIST(V)                               \
+  V(Any, 0x0)             /* 0000 0000 0000 0000 */ \
+  V(Tagged, 0x1)          /* 0000 0000 0000 0001 */ \
+  V(TaggedPrimitive, 0x5) /* 0000 0000 0000 0101 */ \
+  V(TaggedNumber, 0xd)    /* 0000 0000 0000 1101 */ \
+  V(Smi, 0x1d)            /* 0000 0000 0001 1101 */ \
+  V(HeapObject, 0x21)     /* 0000 0000 0010 0001 */ \
+  V(HeapPrimitive, 0x25)  /* 0000 0000 0010 0101 */ \
+  V(Null, 0x27)           /* 0000 0000 0010 0111 */ \
+  V(HeapNumber, 0x2d)     /* 0000 0000 0010 1101 */ \
+  V(String, 0x65)         /* 0000 0000 0110 0101 */ \
+  V(Boolean, 0xa5)        /* 0000 0000 1010 0101 */ \
+  V(Undefined, 0x125)     /* 0000 0001 0010 0101 */ \
+  V(JSReceiver, 0x221)    /* 0000 0010 0010 0001 */ \
+  V(JSObject, 0x621)      /* 0000 0110 0010 0001 */ \
+  V(JSArray, 0xe21)       /* 0000 1110 0010 0001 */ \
+  V(None, 0xfff)          /* 0000 1111 1111 1111 */
+
+class HType final {
+ public:
+  #define DECLARE_CONSTRUCTOR(Name, mask) \
+    static HType Name() WARN_UNUSED_RESULT { return HType(k##Name); }
+  HTYPE_LIST(DECLARE_CONSTRUCTOR)
+  #undef DECLARE_CONSTRUCTOR
+
+  // Return the weakest (least precise) common type.
+  HType Combine(HType other) const WARN_UNUSED_RESULT {
+    return HType(static_cast<Kind>(kind_ & other.kind_));
+  }
+
+  bool Equals(HType other) const WARN_UNUSED_RESULT {
+    return kind_ == other.kind_;
+  }
+
+  bool IsSubtypeOf(HType other) const WARN_UNUSED_RESULT {
+    return Combine(other).Equals(other);
+  }
+
+  #define DECLARE_IS_TYPE(Name, mask)               \
+    bool Is##Name() const WARN_UNUSED_RESULT {   \
+      return IsSubtypeOf(HType::Name());            \
+    }
+  HTYPE_LIST(DECLARE_IS_TYPE)
+  #undef DECLARE_IS_TYPE
+
+  template <class T>
+  static HType FromType(typename T::TypeHandle type) WARN_UNUSED_RESULT;
+  static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
+
+  friend std::ostream& operator<<(std::ostream& os, const HType& t);
+
+ private:
+  enum Kind {
+    #define DECLARE_TYPE(Name, mask) k##Name = mask,
+    HTYPE_LIST(DECLARE_TYPE)
+    #undef DECLARE_TYPE
+    LAST_KIND = kNone
+  };
+
+  // Make sure type fits in int16.
+  STATIC_ASSERT(LAST_KIND < (1 << (CHAR_BIT * sizeof(int16_t))));
+
+  explicit HType(Kind kind) : kind_(kind) { }
+
+  int16_t kind_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const HType& t);
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_TYPES_H_
diff --git a/src/crankshaft/hydrogen-uint32-analysis.cc b/src/crankshaft/hydrogen-uint32-analysis.cc
new file mode 100644
index 0000000..ac4a63f
--- /dev/null
+++ b/src/crankshaft/hydrogen-uint32-analysis.cc
@@ -0,0 +1,237 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen-uint32-analysis.h"
+
+namespace v8 {
+namespace internal {
+
+
+static bool IsUnsignedLoad(HLoadKeyed* instr) {
+  switch (instr->elements_kind()) {
+    case UINT8_ELEMENTS:
+    case UINT16_ELEMENTS:
+    case UINT32_ELEMENTS:
+    case UINT8_CLAMPED_ELEMENTS:
+      return true;
+    default:
+      return false;
+  }
+}
+
+
+static bool IsUint32Operation(HValue* instr) {
+  return instr->IsShr() ||
+      (instr->IsLoadKeyed() && IsUnsignedLoad(HLoadKeyed::cast(instr))) ||
+      (instr->IsInteger32Constant() && instr->GetInteger32Constant() >= 0);
+}
+
+
+bool HUint32AnalysisPhase::IsSafeUint32Use(HValue* val, HValue* use) {
+  // Operations that operate on bits are safe.
+  if (use->IsBitwise() || use->IsShl() || use->IsSar() || use->IsShr()) {
+    return true;
+  } else if (use->IsSimulate() || use->IsArgumentsObject()) {
+    // Deoptimization has special support for uint32.
+    return true;
+  } else if (use->IsChange()) {
+    // Conversions have special support for uint32.
+    // This DCHECK guards that the conversion in question is actually
+    // implemented. Do not extend the whitelist without adding
+    // support to LChunkBuilder::DoChange().
+    DCHECK(HChange::cast(use)->to().IsDouble() ||
+           HChange::cast(use)->to().IsSmi() ||
+           HChange::cast(use)->to().IsTagged());
+    return true;
+  } else if (use->IsStoreKeyed()) {
+    HStoreKeyed* store = HStoreKeyed::cast(use);
+    if (store->is_fixed_typed_array()) {
+      // Storing a value into an external integer array is a bit level
+      // operation.
+      if (store->value() == val) {
+        // Clamping or a conversion to double should have beed inserted.
+        DCHECK(store->elements_kind() != UINT8_CLAMPED_ELEMENTS);
+        DCHECK(store->elements_kind() != FLOAT32_ELEMENTS);
+        DCHECK(store->elements_kind() != FLOAT64_ELEMENTS);
+        return true;
+      }
+    }
+  } else if (use->IsCompareNumericAndBranch()) {
+    HCompareNumericAndBranch* c = HCompareNumericAndBranch::cast(use);
+    return IsUint32Operation(c->left()) && IsUint32Operation(c->right());
+  }
+
+  return false;
+}
+
+
+// Iterate over all uses and verify that they are uint32 safe: either don't
+// distinguish between int32 and uint32 due to their bitwise nature or
+// have special support for uint32 values.
+// Encountered phis are optimistically treated as safe uint32 uses,
+// marked with kUint32 flag and collected in the phis_ list. A separate
+// pass will be performed later by UnmarkUnsafePhis to clear kUint32 from
+// phis that are not actually uint32-safe (it requires fix point iteration).
+bool HUint32AnalysisPhase::Uint32UsesAreSafe(HValue* uint32val) {
+  bool collect_phi_uses = false;
+  for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+    HValue* use = it.value();
+
+    if (use->IsPhi()) {
+      if (!use->CheckFlag(HInstruction::kUint32)) {
+        // There is a phi use of this value from a phi that is not yet
+        // collected in phis_ array. Separate pass is required.
+        collect_phi_uses = true;
+      }
+
+      // Optimistically treat phis as uint32 safe.
+      continue;
+    }
+
+    if (!IsSafeUint32Use(uint32val, use)) {
+      return false;
+    }
+  }
+
+  if (collect_phi_uses) {
+    for (HUseIterator it(uint32val->uses()); !it.Done(); it.Advance()) {
+      HValue* use = it.value();
+
+      // There is a phi use of this value from a phi that is not yet
+      // collected in phis_ array. Separate pass is required.
+      if (use->IsPhi() && !use->CheckFlag(HInstruction::kUint32)) {
+        use->SetFlag(HInstruction::kUint32);
+        phis_.Add(HPhi::cast(use), zone());
+      }
+    }
+  }
+
+  return true;
+}
+
+
+// Check if all operands to the given phi are marked with kUint32 flag.
+bool HUint32AnalysisPhase::CheckPhiOperands(HPhi* phi) {
+  if (!phi->CheckFlag(HInstruction::kUint32)) {
+    // This phi is not uint32 safe. No need to check operands.
+    return false;
+  }
+
+  for (int j = 0; j < phi->OperandCount(); j++) {
+    HValue* operand = phi->OperandAt(j);
+    if (!operand->CheckFlag(HInstruction::kUint32)) {
+      // Lazily mark constants that fit into uint32 range with kUint32 flag.
+      if (operand->IsInteger32Constant() &&
+          operand->GetInteger32Constant() >= 0) {
+        operand->SetFlag(HInstruction::kUint32);
+        continue;
+      }
+
+      // This phi is not safe, some operands are not uint32 values.
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+// Remove kUint32 flag from the phi itself and its operands. If any operand
+// was a phi marked with kUint32 place it into a worklist for
+// transitive clearing of kUint32 flag.
+void HUint32AnalysisPhase::UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist) {
+  phi->ClearFlag(HInstruction::kUint32);
+  for (int j = 0; j < phi->OperandCount(); j++) {
+    HValue* operand = phi->OperandAt(j);
+    if (operand->CheckFlag(HInstruction::kUint32)) {
+      operand->ClearFlag(HInstruction::kUint32);
+      if (operand->IsPhi()) {
+        worklist->Add(HPhi::cast(operand), zone());
+      }
+    }
+  }
+}
+
+
+void HUint32AnalysisPhase::UnmarkUnsafePhis() {
+  // No phis were collected. Nothing to do.
+  if (phis_.length() == 0) return;
+
+  // Worklist used to transitively clear kUint32 from phis that
+  // are used as arguments to other phis.
+  ZoneList<HPhi*> worklist(phis_.length(), zone());
+
+  // Phi can be used as a uint32 value if and only if
+  // all its operands are uint32 values and all its
+  // uses are uint32 safe.
+
+  // Iterate over collected phis and unmark those that
+  // are unsafe. When unmarking phi unmark its operands
+  // and add it to the worklist if it is a phi as well.
+  // Phis that are still marked as safe are shifted down
+  // so that all safe phis form a prefix of the phis_ array.
+  int phi_count = 0;
+  for (int i = 0; i < phis_.length(); i++) {
+    HPhi* phi = phis_[i];
+
+    if (CheckPhiOperands(phi) && Uint32UsesAreSafe(phi)) {
+      phis_[phi_count++] = phi;
+    } else {
+      UnmarkPhi(phi, &worklist);
+    }
+  }
+
+  // Now phis array contains only those phis that have safe
+  // non-phi uses. Start transitively clearing kUint32 flag
+  // from phi operands of discovered non-safe phis until
+  // only safe phis are left.
+  while (!worklist.is_empty())  {
+    while (!worklist.is_empty()) {
+      HPhi* phi = worklist.RemoveLast();
+      UnmarkPhi(phi, &worklist);
+    }
+
+    // Check if any operands to safe phis were unmarked
+    // turning a safe phi into unsafe. The same value
+    // can flow into several phis.
+    int new_phi_count = 0;
+    for (int i = 0; i < phi_count; i++) {
+      HPhi* phi = phis_[i];
+
+      if (CheckPhiOperands(phi)) {
+        phis_[new_phi_count++] = phi;
+      } else {
+        UnmarkPhi(phi, &worklist);
+      }
+    }
+    phi_count = new_phi_count;
+  }
+}
+
+
+void HUint32AnalysisPhase::Run() {
+  if (!graph()->has_uint32_instructions()) return;
+
+  ZoneList<HInstruction*>* uint32_instructions = graph()->uint32_instructions();
+  for (int i = 0; i < uint32_instructions->length(); ++i) {
+    // Analyze instruction and mark it with kUint32 if all
+    // its uses are uint32 safe.
+    HInstruction* current = uint32_instructions->at(i);
+    if (current->IsLinked() &&
+        current->representation().IsInteger32() &&
+        Uint32UsesAreSafe(current)) {
+      current->SetFlag(HInstruction::kUint32);
+    }
+  }
+
+  // Some phis might have been optimistically marked with kUint32 flag.
+  // Remove this flag from those phis that are unsafe and propagate
+  // this information transitively potentially clearing kUint32 flag
+  // from some non-phi operations that are used as operands to unsafe phis.
+  UnmarkUnsafePhis();
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen-uint32-analysis.h b/src/crankshaft/hydrogen-uint32-analysis.h
new file mode 100644
index 0000000..0d959b5
--- /dev/null
+++ b/src/crankshaft/hydrogen-uint32-analysis.h
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
+#define V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
+
+#include "src/crankshaft/hydrogen.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Discover instructions that can be marked with kUint32 flag allowing
+// them to produce full range uint32 values.
+class HUint32AnalysisPhase : public HPhase {
+ public:
+  explicit HUint32AnalysisPhase(HGraph* graph)
+      : HPhase("H_Compute safe UInt32 operations", graph), phis_(4, zone()) { }
+
+  void Run();
+
+ private:
+  INLINE(bool IsSafeUint32Use(HValue* val, HValue* use));
+  INLINE(bool Uint32UsesAreSafe(HValue* uint32val));
+  INLINE(bool CheckPhiOperands(HPhi* phi));
+  INLINE(void UnmarkPhi(HPhi* phi, ZoneList<HPhi*>* worklist));
+  INLINE(void UnmarkUnsafePhis());
+
+  ZoneList<HPhi*> phis_;
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_UINT32_ANALYSIS_H_
diff --git a/src/crankshaft/hydrogen.cc b/src/crankshaft/hydrogen.cc
new file mode 100644
index 0000000..98337be
--- /dev/null
+++ b/src/crankshaft/hydrogen.cc
@@ -0,0 +1,13598 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/hydrogen.h"
+
+#include <sstream>
+
+#include "src/allocation-site-scopes.h"
+#include "src/ast/ast-numbering.h"
+#include "src/ast/scopeinfo.h"
+#include "src/code-factory.h"
+#include "src/crankshaft/hydrogen-bce.h"
+#include "src/crankshaft/hydrogen-bch.h"
+#include "src/crankshaft/hydrogen-canonicalize.h"
+#include "src/crankshaft/hydrogen-check-elimination.h"
+#include "src/crankshaft/hydrogen-dce.h"
+#include "src/crankshaft/hydrogen-dehoist.h"
+#include "src/crankshaft/hydrogen-environment-liveness.h"
+#include "src/crankshaft/hydrogen-escape-analysis.h"
+#include "src/crankshaft/hydrogen-gvn.h"
+#include "src/crankshaft/hydrogen-infer-representation.h"
+#include "src/crankshaft/hydrogen-infer-types.h"
+#include "src/crankshaft/hydrogen-load-elimination.h"
+#include "src/crankshaft/hydrogen-mark-deoptimize.h"
+#include "src/crankshaft/hydrogen-mark-unreachable.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/hydrogen-range-analysis.h"
+#include "src/crankshaft/hydrogen-redundant-phi.h"
+#include "src/crankshaft/hydrogen-removable-simulates.h"
+#include "src/crankshaft/hydrogen-representation-changes.h"
+#include "src/crankshaft/hydrogen-sce.h"
+#include "src/crankshaft/hydrogen-store-elimination.h"
+#include "src/crankshaft/hydrogen-uint32-analysis.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/crankshaft/typing.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/ic.h"
+// GetRootConstructor
+#include "src/ic/ic-inl.h"
+#include "src/isolate-inl.h"
+#include "src/parsing/parser.h"
+#include "src/runtime/runtime.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-codegen-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/crankshaft/arm/lithium-codegen-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/crankshaft/mips/lithium-codegen-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-codegen-x87.h"  // NOLINT
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+HBasicBlock::HBasicBlock(HGraph* graph)
+    : block_id_(graph->GetNextBlockID()),
+      graph_(graph),
+      phis_(4, graph->zone()),
+      first_(NULL),
+      last_(NULL),
+      end_(NULL),
+      loop_information_(NULL),
+      predecessors_(2, graph->zone()),
+      dominator_(NULL),
+      dominated_blocks_(4, graph->zone()),
+      last_environment_(NULL),
+      argument_count_(-1),
+      first_instruction_index_(-1),
+      last_instruction_index_(-1),
+      deleted_phis_(4, graph->zone()),
+      parent_loop_header_(NULL),
+      inlined_entry_block_(NULL),
+      is_inline_return_target_(false),
+      is_reachable_(true),
+      dominates_loop_successors_(false),
+      is_osr_entry_(false),
+      is_ordered_(false) { }
+
+
+Isolate* HBasicBlock::isolate() const {
+  return graph_->isolate();
+}
+
+
+void HBasicBlock::MarkUnreachable() {
+  is_reachable_ = false;
+}
+
+
+void HBasicBlock::AttachLoopInformation() {
+  DCHECK(!IsLoopHeader());
+  loop_information_ = new(zone()) HLoopInformation(this, zone());
+}
+
+
+void HBasicBlock::DetachLoopInformation() {
+  DCHECK(IsLoopHeader());
+  loop_information_ = NULL;
+}
+
+
+void HBasicBlock::AddPhi(HPhi* phi) {
+  DCHECK(!IsStartBlock());
+  phis_.Add(phi, zone());
+  phi->SetBlock(this);
+}
+
+
+void HBasicBlock::RemovePhi(HPhi* phi) {
+  DCHECK(phi->block() == this);
+  DCHECK(phis_.Contains(phi));
+  phi->Kill();
+  phis_.RemoveElement(phi);
+  phi->SetBlock(NULL);
+}
+
+
+void HBasicBlock::AddInstruction(HInstruction* instr, SourcePosition position) {
+  DCHECK(!IsStartBlock() || !IsFinished());
+  DCHECK(!instr->IsLinked());
+  DCHECK(!IsFinished());
+
+  if (!position.IsUnknown()) {
+    instr->set_position(position);
+  }
+  if (first_ == NULL) {
+    DCHECK(last_environment() != NULL);
+    DCHECK(!last_environment()->ast_id().IsNone());
+    HBlockEntry* entry = new(zone()) HBlockEntry();
+    entry->InitializeAsFirst(this);
+    if (!position.IsUnknown()) {
+      entry->set_position(position);
+    } else {
+      DCHECK(!FLAG_hydrogen_track_positions ||
+             !graph()->info()->IsOptimizing() || instr->IsAbnormalExit());
+    }
+    first_ = last_ = entry;
+  }
+  instr->InsertAfter(last_);
+}
+
+
+HPhi* HBasicBlock::AddNewPhi(int merged_index) {
+  if (graph()->IsInsideNoSideEffectsScope()) {
+    merged_index = HPhi::kInvalidMergedIndex;
+  }
+  HPhi* phi = new(zone()) HPhi(merged_index, zone());
+  AddPhi(phi);
+  return phi;
+}
+
+
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
+                                       RemovableSimulate removable) {
+  DCHECK(HasEnvironment());
+  HEnvironment* environment = last_environment();
+  DCHECK(ast_id.IsNone() ||
+         ast_id == BailoutId::StubEntry() ||
+         environment->closure()->shared()->VerifyBailoutId(ast_id));
+
+  int push_count = environment->push_count();
+  int pop_count = environment->pop_count();
+
+  HSimulate* instr =
+      new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
+#ifdef DEBUG
+  instr->set_closure(environment->closure());
+#endif
+  // Order of pushed values: newest (top of stack) first. This allows
+  // HSimulate::MergeWith() to easily append additional pushed values
+  // that are older (from further down the stack).
+  for (int i = 0; i < push_count; ++i) {
+    instr->AddPushedValue(environment->ExpressionStackAt(i));
+  }
+  for (GrowableBitVector::Iterator it(environment->assigned_variables(),
+                                      zone());
+       !it.Done();
+       it.Advance()) {
+    int index = it.Current();
+    instr->AddAssignedValue(index, environment->Lookup(index));
+  }
+  environment->ClearHistory();
+  return instr;
+}
+
+
+void HBasicBlock::Finish(HControlInstruction* end, SourcePosition position) {
+  DCHECK(!IsFinished());
+  AddInstruction(end, position);
+  end_ = end;
+  for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+    it.Current()->RegisterPredecessor(this);
+  }
+}
+
+
+void HBasicBlock::Goto(HBasicBlock* block, SourcePosition position,
+                       FunctionState* state, bool add_simulate) {
+  bool drop_extra = state != NULL &&
+      state->inlining_kind() == NORMAL_RETURN;
+
+  if (block->IsInlineReturnTarget()) {
+    HEnvironment* env = last_environment();
+    int argument_count = env->arguments_environment()->parameter_count();
+    AddInstruction(new(zone())
+                   HLeaveInlined(state->entry(), argument_count),
+                   position);
+    UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
+  }
+
+  if (add_simulate) AddNewSimulate(BailoutId::None(), position);
+  HGoto* instr = new(zone()) HGoto(block);
+  Finish(instr, position);
+}
+
+
+void HBasicBlock::AddLeaveInlined(HValue* return_value, FunctionState* state,
+                                  SourcePosition position) {
+  HBasicBlock* target = state->function_return();
+  bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
+
+  DCHECK(target->IsInlineReturnTarget());
+  DCHECK(return_value != NULL);
+  HEnvironment* env = last_environment();
+  int argument_count = env->arguments_environment()->parameter_count();
+  AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count),
+                 position);
+  UpdateEnvironment(last_environment()->DiscardInlined(drop_extra));
+  last_environment()->Push(return_value);
+  AddNewSimulate(BailoutId::None(), position);
+  HGoto* instr = new(zone()) HGoto(target);
+  Finish(instr, position);
+}
+
+
+void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
+  DCHECK(!HasEnvironment());
+  DCHECK(first() == NULL);
+  UpdateEnvironment(env);
+}
+
+
+void HBasicBlock::UpdateEnvironment(HEnvironment* env) {
+  last_environment_ = env;
+  graph()->update_maximum_environment_size(env->first_expression_index());
+}
+
+
+void HBasicBlock::SetJoinId(BailoutId ast_id) {
+  int length = predecessors_.length();
+  DCHECK(length > 0);
+  for (int i = 0; i < length; i++) {
+    HBasicBlock* predecessor = predecessors_[i];
+    DCHECK(predecessor->end()->IsGoto());
+    HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
+    DCHECK(i != 0 ||
+           (predecessor->last_environment()->closure().is_null() ||
+            predecessor->last_environment()->closure()->shared()
+              ->VerifyBailoutId(ast_id)));
+    simulate->set_ast_id(ast_id);
+    predecessor->last_environment()->set_ast_id(ast_id);
+  }
+}
+
+
+bool HBasicBlock::Dominates(HBasicBlock* other) const {
+  HBasicBlock* current = other->dominator();
+  while (current != NULL) {
+    if (current == this) return true;
+    current = current->dominator();
+  }
+  return false;
+}
+
+
+bool HBasicBlock::EqualToOrDominates(HBasicBlock* other) const {
+  if (this == other) return true;
+  return Dominates(other);
+}
+
+
+int HBasicBlock::LoopNestingDepth() const {
+  const HBasicBlock* current = this;
+  int result  = (current->IsLoopHeader()) ? 1 : 0;
+  while (current->parent_loop_header() != NULL) {
+    current = current->parent_loop_header();
+    result++;
+  }
+  return result;
+}
+
+
+void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
+  DCHECK(IsLoopHeader());
+
+  SetJoinId(stmt->EntryId());
+  if (predecessors()->length() == 1) {
+    // This is a degenerated loop.
+    DetachLoopInformation();
+    return;
+  }
+
+  // Only the first entry into the loop is from outside the loop. All other
+  // entries must be back edges.
+  for (int i = 1; i < predecessors()->length(); ++i) {
+    loop_information()->RegisterBackEdge(predecessors()->at(i));
+  }
+}
+
+
+void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
+  DCHECK(IsFinished());
+  HBasicBlock* succ_block = end()->SuccessorAt(succ);
+
+  DCHECK(succ_block->predecessors()->length() == 1);
+  succ_block->MarkUnreachable();
+}
+
+
+void HBasicBlock::RegisterPredecessor(HBasicBlock* pred) {
+  if (HasPredecessor()) {
+    // Only loop header blocks can have a predecessor added after
+    // instructions have been added to the block (they have phis for all
+    // values in the environment, these phis may be eliminated later).
+    DCHECK(IsLoopHeader() || first_ == NULL);
+    HEnvironment* incoming_env = pred->last_environment();
+    if (IsLoopHeader()) {
+      DCHECK_EQ(phis()->length(), incoming_env->length());
+      for (int i = 0; i < phis_.length(); ++i) {
+        phis_[i]->AddInput(incoming_env->values()->at(i));
+      }
+    } else {
+      last_environment()->AddIncomingEdge(this, pred->last_environment());
+    }
+  } else if (!HasEnvironment() && !IsFinished()) {
+    DCHECK(!IsLoopHeader());
+    SetInitialEnvironment(pred->last_environment()->Copy());
+  }
+
+  predecessors_.Add(pred, zone());
+}
+
+
+void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
+  DCHECK(!dominated_blocks_.Contains(block));
+  // Keep the list of dominated blocks sorted such that if there is two
+  // succeeding block in this list, the predecessor is before the successor.
+  int index = 0;
+  while (index < dominated_blocks_.length() &&
+         dominated_blocks_[index]->block_id() < block->block_id()) {
+    ++index;
+  }
+  dominated_blocks_.InsertAt(index, block, zone());
+}
+
+
+void HBasicBlock::AssignCommonDominator(HBasicBlock* other) {
+  if (dominator_ == NULL) {
+    dominator_ = other;
+    other->AddDominatedBlock(this);
+  } else if (other->dominator() != NULL) {
+    HBasicBlock* first = dominator_;
+    HBasicBlock* second = other;
+
+    while (first != second) {
+      if (first->block_id() > second->block_id()) {
+        first = first->dominator();
+      } else {
+        second = second->dominator();
+      }
+      DCHECK(first != NULL && second != NULL);
+    }
+
+    if (dominator_ != first) {
+      DCHECK(dominator_->dominated_blocks_.Contains(this));
+      dominator_->dominated_blocks_.RemoveElement(this);
+      dominator_ = first;
+      first->AddDominatedBlock(this);
+    }
+  }
+}
+
+
+void HBasicBlock::AssignLoopSuccessorDominators() {
+  // Mark blocks that dominate all subsequent reachable blocks inside their
+  // loop. Exploit the fact that blocks are sorted in reverse post order. When
+  // the loop is visited in increasing block id order, if the number of
+  // non-loop-exiting successor edges at the dominator_candidate block doesn't
+  // exceed the number of previously encountered predecessor edges, there is no
+  // path from the loop header to any block with higher id that doesn't go
+  // through the dominator_candidate block. In this case, the
+  // dominator_candidate block is guaranteed to dominate all blocks reachable
+  // from it with higher ids.
+  HBasicBlock* last = loop_information()->GetLastBackEdge();
+  int outstanding_successors = 1;  // one edge from the pre-header
+  // Header always dominates everything.
+  MarkAsLoopSuccessorDominator();
+  for (int j = block_id(); j <= last->block_id(); ++j) {
+    HBasicBlock* dominator_candidate = graph_->blocks()->at(j);
+    for (HPredecessorIterator it(dominator_candidate); !it.Done();
+         it.Advance()) {
+      HBasicBlock* predecessor = it.Current();
+      // Don't count back edges.
+      if (predecessor->block_id() < dominator_candidate->block_id()) {
+        outstanding_successors--;
+      }
+    }
+
+    // If more successors than predecessors have been seen in the loop up to
+    // now, it's not possible to guarantee that the current block dominates
+    // all of the blocks with higher IDs. In this case, assume conservatively
+    // that those paths through loop that don't go through the current block
+    // contain all of the loop's dependencies. Also be careful to record
+    // dominator information about the current loop that's being processed,
+    // and not nested loops, which will be processed when
+    // AssignLoopSuccessorDominators gets called on their header.
+    DCHECK(outstanding_successors >= 0);
+    HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header();
+    if (outstanding_successors == 0 &&
+        (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) {
+      dominator_candidate->MarkAsLoopSuccessorDominator();
+    }
+    HControlInstruction* end = dominator_candidate->end();
+    for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+      HBasicBlock* successor = it.Current();
+      // Only count successors that remain inside the loop and don't loop back
+      // to a loop header.
+      if (successor->block_id() > dominator_candidate->block_id() &&
+          successor->block_id() <= last->block_id()) {
+        // Backwards edges must land on loop headers.
+        DCHECK(successor->block_id() > dominator_candidate->block_id() ||
+               successor->IsLoopHeader());
+        outstanding_successors++;
+      }
+    }
+  }
+}
+
+
+int HBasicBlock::PredecessorIndexOf(HBasicBlock* predecessor) const {
+  for (int i = 0; i < predecessors_.length(); ++i) {
+    if (predecessors_[i] == predecessor) return i;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+#ifdef DEBUG
+void HBasicBlock::Verify() {
+  // Check that every block is finished.
+  DCHECK(IsFinished());
+  DCHECK(block_id() >= 0);
+
+  // Check that the incoming edges are in edge split form.
+  if (predecessors_.length() > 1) {
+    for (int i = 0; i < predecessors_.length(); ++i) {
+      DCHECK(predecessors_[i]->end()->SecondSuccessor() == NULL);
+    }
+  }
+}
+#endif
+
+
+void HLoopInformation::RegisterBackEdge(HBasicBlock* block) {
+  this->back_edges_.Add(block, block->zone());
+  AddBlock(block);
+}
+
+
+HBasicBlock* HLoopInformation::GetLastBackEdge() const {
+  int max_id = -1;
+  HBasicBlock* result = NULL;
+  for (int i = 0; i < back_edges_.length(); ++i) {
+    HBasicBlock* cur = back_edges_[i];
+    if (cur->block_id() > max_id) {
+      max_id = cur->block_id();
+      result = cur;
+    }
+  }
+  return result;
+}
+
+
+void HLoopInformation::AddBlock(HBasicBlock* block) {
+  if (block == loop_header()) return;
+  if (block->parent_loop_header() == loop_header()) return;
+  if (block->parent_loop_header() != NULL) {
+    AddBlock(block->parent_loop_header());
+  } else {
+    block->set_parent_loop_header(loop_header());
+    blocks_.Add(block, block->zone());
+    for (int i = 0; i < block->predecessors()->length(); ++i) {
+      AddBlock(block->predecessors()->at(i));
+    }
+  }
+}
+
+
+#ifdef DEBUG
+
+// Checks reachability of the blocks in this graph and stores a bit in
+// the BitVector "reachable()" for every block that can be reached
+// from the start block of the graph. If "dont_visit" is non-null, the given
+// block is treated as if it would not be part of the graph. "visited_count()"
+// returns the number of reachable blocks.
+class ReachabilityAnalyzer BASE_EMBEDDED {
+ public:
+  ReachabilityAnalyzer(HBasicBlock* entry_block,
+                       int block_count,
+                       HBasicBlock* dont_visit)
+      : visited_count_(0),
+        stack_(16, entry_block->zone()),
+        reachable_(block_count, entry_block->zone()),
+        dont_visit_(dont_visit) {
+    PushBlock(entry_block);
+    Analyze();
+  }
+
+  int visited_count() const { return visited_count_; }
+  const BitVector* reachable() const { return &reachable_; }
+
+ private:
+  void PushBlock(HBasicBlock* block) {
+    if (block != NULL && block != dont_visit_ &&
+        !reachable_.Contains(block->block_id())) {
+      reachable_.Add(block->block_id());
+      stack_.Add(block, block->zone());
+      visited_count_++;
+    }
+  }
+
+  void Analyze() {
+    while (!stack_.is_empty()) {
+      HControlInstruction* end = stack_.RemoveLast()->end();
+      for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
+        PushBlock(it.Current());
+      }
+    }
+  }
+
+  int visited_count_;
+  ZoneList<HBasicBlock*> stack_;
+  BitVector reachable_;
+  HBasicBlock* dont_visit_;
+};
+
+
+void HGraph::Verify(bool do_full_verify) const {
+  Heap::RelocationLock relocation_lock(isolate()->heap());
+  AllowHandleDereference allow_deref;
+  AllowDeferredHandleDereference allow_deferred_deref;
+  for (int i = 0; i < blocks_.length(); i++) {
+    HBasicBlock* block = blocks_.at(i);
+
+    block->Verify();
+
+    // Check that every block contains at least one node and that only the last
+    // node is a control instruction.
+    HInstruction* current = block->first();
+    DCHECK(current != NULL && current->IsBlockEntry());
+    while (current != NULL) {
+      DCHECK((current->next() == NULL) == current->IsControlInstruction());
+      DCHECK(current->block() == block);
+      current->Verify();
+      current = current->next();
+    }
+
+    // Check that successors are correctly set.
+    HBasicBlock* first = block->end()->FirstSuccessor();
+    HBasicBlock* second = block->end()->SecondSuccessor();
+    DCHECK(second == NULL || first != NULL);
+
+    // Check that the predecessor array is correct.
+    if (first != NULL) {
+      DCHECK(first->predecessors()->Contains(block));
+      if (second != NULL) {
+        DCHECK(second->predecessors()->Contains(block));
+      }
+    }
+
+    // Check that phis have correct arguments.
+    for (int j = 0; j < block->phis()->length(); j++) {
+      HPhi* phi = block->phis()->at(j);
+      phi->Verify();
+    }
+
+    // Check that all join blocks have predecessors that end with an
+    // unconditional goto and agree on their environment node id.
+    if (block->predecessors()->length() >= 2) {
+      BailoutId id =
+          block->predecessors()->first()->last_environment()->ast_id();
+      for (int k = 0; k < block->predecessors()->length(); k++) {
+        HBasicBlock* predecessor = block->predecessors()->at(k);
+        DCHECK(predecessor->end()->IsGoto() ||
+               predecessor->end()->IsDeoptimize());
+        DCHECK(predecessor->last_environment()->ast_id() == id);
+      }
+    }
+  }
+
+  // Check special property of first block to have no predecessors.
+  DCHECK(blocks_.at(0)->predecessors()->is_empty());
+
+  if (do_full_verify) {
+    // Check that the graph is fully connected.
+    ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
+    DCHECK(analyzer.visited_count() == blocks_.length());
+
+    // Check that entry block dominator is NULL.
+    DCHECK(entry_block_->dominator() == NULL);
+
+    // Check dominators.
+    for (int i = 0; i < blocks_.length(); ++i) {
+      HBasicBlock* block = blocks_.at(i);
+      if (block->dominator() == NULL) {
+        // Only start block may have no dominator assigned to.
+        DCHECK(i == 0);
+      } else {
+        // Assert that block is unreachable if dominator must not be visited.
+        ReachabilityAnalyzer dominator_analyzer(entry_block_,
+                                                blocks_.length(),
+                                                block->dominator());
+        DCHECK(!dominator_analyzer.reachable()->Contains(block->block_id()));
+      }
+    }
+  }
+}
+
+#endif
+
+
+HConstant* HGraph::GetConstant(SetOncePointer<HConstant>* pointer,
+                               int32_t value) {
+  if (!pointer->is_set()) {
+    // Can't pass GetInvalidContext() to HConstant::New, because that will
+    // recursively call GetConstant
+    HConstant* constant = HConstant::New(isolate(), zone(), NULL, value);
+    constant->InsertAfter(entry_block()->first());
+    pointer->set(constant);
+    return constant;
+  }
+  return ReinsertConstantIfNecessary(pointer->get());
+}
+
+
+HConstant* HGraph::ReinsertConstantIfNecessary(HConstant* constant) {
+  if (!constant->IsLinked()) {
+    // The constant was removed from the graph. Reinsert.
+    constant->ClearFlag(HValue::kIsDead);
+    constant->InsertAfter(entry_block()->first());
+  }
+  return constant;
+}
+
+
+HConstant* HGraph::GetConstant0() {
+  return GetConstant(&constant_0_, 0);
+}
+
+
+HConstant* HGraph::GetConstant1() {
+  return GetConstant(&constant_1_, 1);
+}
+
+
+HConstant* HGraph::GetConstantMinus1() {
+  return GetConstant(&constant_minus1_, -1);
+}
+
+
+HConstant* HGraph::GetConstantBool(bool value) {
+  return value ? GetConstantTrue() : GetConstantFalse();
+}
+
+
+#define DEFINE_GET_CONSTANT(Name, name, type, htype, boolean_value)            \
+HConstant* HGraph::GetConstant##Name() {                                       \
+  if (!constant_##name##_.is_set()) {                                          \
+    HConstant* constant = new(zone()) HConstant(                               \
+        Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
+        Unique<Map>::CreateImmovable(isolate()->factory()->type##_map()),      \
+        false,                                                                 \
+        Representation::Tagged(),                                              \
+        htype,                                                                 \
+        true,                                                                  \
+        boolean_value,                                                         \
+        false,                                                                 \
+        ODDBALL_TYPE);                                                         \
+    constant->InsertAfter(entry_block()->first());                             \
+    constant_##name##_.set(constant);                                          \
+  }                                                                            \
+  return ReinsertConstantIfNecessary(constant_##name##_.get());                \
+}
+
+
+DEFINE_GET_CONSTANT(Undefined, undefined, undefined, HType::Undefined(), false)
+DEFINE_GET_CONSTANT(True, true, boolean, HType::Boolean(), true)
+DEFINE_GET_CONSTANT(False, false, boolean, HType::Boolean(), false)
+DEFINE_GET_CONSTANT(Hole, the_hole, the_hole, HType::None(), false)
+DEFINE_GET_CONSTANT(Null, null, null, HType::Null(), false)
+
+
+#undef DEFINE_GET_CONSTANT
+
+#define DEFINE_IS_CONSTANT(Name, name)                                         \
+bool HGraph::IsConstant##Name(HConstant* constant) {                           \
+  return constant_##name##_.is_set() && constant == constant_##name##_.get();  \
+}
+DEFINE_IS_CONSTANT(Undefined, undefined)
+DEFINE_IS_CONSTANT(0, 0)
+DEFINE_IS_CONSTANT(1, 1)
+DEFINE_IS_CONSTANT(Minus1, minus1)
+DEFINE_IS_CONSTANT(True, true)
+DEFINE_IS_CONSTANT(False, false)
+DEFINE_IS_CONSTANT(Hole, the_hole)
+DEFINE_IS_CONSTANT(Null, null)
+
+#undef DEFINE_IS_CONSTANT
+
+
+HConstant* HGraph::GetInvalidContext() {
+  return GetConstant(&constant_invalid_context_, 0xFFFFC0C7);
+}
+
+
+bool HGraph::IsStandardConstant(HConstant* constant) {
+  if (IsConstantUndefined(constant)) return true;
+  if (IsConstant0(constant)) return true;
+  if (IsConstant1(constant)) return true;
+  if (IsConstantMinus1(constant)) return true;
+  if (IsConstantTrue(constant)) return true;
+  if (IsConstantFalse(constant)) return true;
+  if (IsConstantHole(constant)) return true;
+  if (IsConstantNull(constant)) return true;
+  return false;
+}
+
+
+HGraphBuilder::IfBuilder::IfBuilder() : builder_(NULL), needs_compare_(true) {}
+
+
+HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
+    : needs_compare_(true) {
+  Initialize(builder);
+}
+
+
+HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder,
+                                    HIfContinuation* continuation)
+    : needs_compare_(false), first_true_block_(NULL), first_false_block_(NULL) {
+  InitializeDontCreateBlocks(builder);
+  continuation->Continue(&first_true_block_, &first_false_block_);
+}
+
+
+void HGraphBuilder::IfBuilder::InitializeDontCreateBlocks(
+    HGraphBuilder* builder) {
+  builder_ = builder;
+  finished_ = false;
+  did_then_ = false;
+  did_else_ = false;
+  did_else_if_ = false;
+  did_and_ = false;
+  did_or_ = false;
+  captured_ = false;
+  pending_merge_block_ = false;
+  split_edge_merge_block_ = NULL;
+  merge_at_join_blocks_ = NULL;
+  normal_merge_at_join_block_count_ = 0;
+  deopt_merge_at_join_block_count_ = 0;
+}
+
+
+void HGraphBuilder::IfBuilder::Initialize(HGraphBuilder* builder) {
+  InitializeDontCreateBlocks(builder);
+  HEnvironment* env = builder->environment();
+  first_true_block_ = builder->CreateBasicBlock(env->Copy());
+  first_false_block_ = builder->CreateBasicBlock(env->Copy());
+}
+
+
+HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
+    HControlInstruction* compare) {
+  DCHECK(did_then_ == did_else_);
+  if (did_else_) {
+    // Handle if-then-elseif
+    did_else_if_ = true;
+    did_else_ = false;
+    did_then_ = false;
+    did_and_ = false;
+    did_or_ = false;
+    pending_merge_block_ = false;
+    split_edge_merge_block_ = NULL;
+    HEnvironment* env = builder()->environment();
+    first_true_block_ = builder()->CreateBasicBlock(env->Copy());
+    first_false_block_ = builder()->CreateBasicBlock(env->Copy());
+  }
+  if (split_edge_merge_block_ != NULL) {
+    HEnvironment* env = first_false_block_->last_environment();
+    HBasicBlock* split_edge = builder()->CreateBasicBlock(env->Copy());
+    if (did_or_) {
+      compare->SetSuccessorAt(0, split_edge);
+      compare->SetSuccessorAt(1, first_false_block_);
+    } else {
+      compare->SetSuccessorAt(0, first_true_block_);
+      compare->SetSuccessorAt(1, split_edge);
+    }
+    builder()->GotoNoSimulate(split_edge, split_edge_merge_block_);
+  } else {
+    compare->SetSuccessorAt(0, first_true_block_);
+    compare->SetSuccessorAt(1, first_false_block_);
+  }
+  builder()->FinishCurrentBlock(compare);
+  needs_compare_ = false;
+  return compare;
+}
+
+
+void HGraphBuilder::IfBuilder::Or() {
+  DCHECK(!needs_compare_);
+  DCHECK(!did_and_);
+  did_or_ = true;
+  HEnvironment* env = first_false_block_->last_environment();
+  if (split_edge_merge_block_ == NULL) {
+    split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy());
+    builder()->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
+    first_true_block_ = split_edge_merge_block_;
+  }
+  builder()->set_current_block(first_false_block_);
+  first_false_block_ = builder()->CreateBasicBlock(env->Copy());
+}
+
+
+void HGraphBuilder::IfBuilder::And() {
+  DCHECK(!needs_compare_);
+  DCHECK(!did_or_);
+  did_and_ = true;
+  HEnvironment* env = first_false_block_->last_environment();
+  if (split_edge_merge_block_ == NULL) {
+    split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy());
+    builder()->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
+    first_false_block_ = split_edge_merge_block_;
+  }
+  builder()->set_current_block(first_true_block_);
+  first_true_block_ = builder()->CreateBasicBlock(env->Copy());
+}
+
+
+void HGraphBuilder::IfBuilder::CaptureContinuation(
+    HIfContinuation* continuation) {
+  DCHECK(!did_else_if_);
+  DCHECK(!finished_);
+  DCHECK(!captured_);
+
+  HBasicBlock* true_block = NULL;
+  HBasicBlock* false_block = NULL;
+  Finish(&true_block, &false_block);
+  DCHECK(true_block != NULL);
+  DCHECK(false_block != NULL);
+  continuation->Capture(true_block, false_block);
+  captured_ = true;
+  builder()->set_current_block(NULL);
+  End();
+}
+
+
+void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
+  DCHECK(!did_else_if_);
+  DCHECK(!finished_);
+  DCHECK(!captured_);
+  HBasicBlock* true_block = NULL;
+  HBasicBlock* false_block = NULL;
+  Finish(&true_block, &false_block);
+  merge_at_join_blocks_ = NULL;
+  if (true_block != NULL && !true_block->IsFinished()) {
+    DCHECK(continuation->IsTrueReachable());
+    builder()->GotoNoSimulate(true_block, continuation->true_branch());
+  }
+  if (false_block != NULL && !false_block->IsFinished()) {
+    DCHECK(continuation->IsFalseReachable());
+    builder()->GotoNoSimulate(false_block, continuation->false_branch());
+  }
+  captured_ = true;
+  End();
+}
+
+
+void HGraphBuilder::IfBuilder::Then() {
+  DCHECK(!captured_);
+  DCHECK(!finished_);
+  did_then_ = true;
+  if (needs_compare_) {
+    // Handle if's without any expressions, they jump directly to the "else"
+    // branch. However, we must pretend that the "then" branch is reachable,
+    // so that the graph builder visits it and sees any live range extending
+    // constructs within it.
+    HConstant* constant_false = builder()->graph()->GetConstantFalse();
+    ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
+    boolean_type.Add(ToBooleanStub::BOOLEAN);
+    HBranch* branch = builder()->New<HBranch>(
+        constant_false, boolean_type, first_true_block_, first_false_block_);
+    builder()->FinishCurrentBlock(branch);
+  }
+  builder()->set_current_block(first_true_block_);
+  pending_merge_block_ = true;
+}
+
+
+void HGraphBuilder::IfBuilder::Else() {
+  DCHECK(did_then_);
+  DCHECK(!captured_);
+  DCHECK(!finished_);
+  AddMergeAtJoinBlock(false);
+  builder()->set_current_block(first_false_block_);
+  pending_merge_block_ = true;
+  did_else_ = true;
+}
+
+
+void HGraphBuilder::IfBuilder::Deopt(Deoptimizer::DeoptReason reason) {
+  DCHECK(did_then_);
+  builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+  AddMergeAtJoinBlock(true);
+}
+
+
+void HGraphBuilder::IfBuilder::Return(HValue* value) {
+  HValue* parameter_count = builder()->graph()->GetConstantMinus1();
+  builder()->FinishExitCurrentBlock(
+      builder()->New<HReturn>(value, parameter_count));
+  AddMergeAtJoinBlock(false);
+}
+
+
+void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) {
+  if (!pending_merge_block_) return;
+  HBasicBlock* block = builder()->current_block();
+  DCHECK(block == NULL || !block->IsFinished());
+  MergeAtJoinBlock* record = new (builder()->zone())
+      MergeAtJoinBlock(block, deopt, merge_at_join_blocks_);
+  merge_at_join_blocks_ = record;
+  if (block != NULL) {
+    DCHECK(block->end() == NULL);
+    if (deopt) {
+      normal_merge_at_join_block_count_++;
+    } else {
+      deopt_merge_at_join_block_count_++;
+    }
+  }
+  builder()->set_current_block(NULL);
+  pending_merge_block_ = false;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish() {
+  DCHECK(!finished_);
+  if (!did_then_) {
+    Then();
+  }
+  AddMergeAtJoinBlock(false);
+  if (!did_else_) {
+    Else();
+    AddMergeAtJoinBlock(false);
+  }
+  finished_ = true;
+}
+
+
+void HGraphBuilder::IfBuilder::Finish(HBasicBlock** then_continuation,
+                                      HBasicBlock** else_continuation) {
+  Finish();
+
+  MergeAtJoinBlock* else_record = merge_at_join_blocks_;
+  if (else_continuation != NULL) {
+    *else_continuation = else_record->block_;
+  }
+  MergeAtJoinBlock* then_record = else_record->next_;
+  if (then_continuation != NULL) {
+    *then_continuation = then_record->block_;
+  }
+  DCHECK(then_record->next_ == NULL);
+}
+
+
+void HGraphBuilder::IfBuilder::EndUnreachable() {
+  if (captured_) return;
+  Finish();
+  builder()->set_current_block(nullptr);
+}
+
+
+void HGraphBuilder::IfBuilder::End() {
+  if (captured_) return;
+  Finish();
+
+  int total_merged_blocks = normal_merge_at_join_block_count_ +
+    deopt_merge_at_join_block_count_;
+  DCHECK(total_merged_blocks >= 1);
+  HBasicBlock* merge_block =
+      total_merged_blocks == 1 ? NULL : builder()->graph()->CreateBasicBlock();
+
+  // Merge non-deopt blocks first to ensure environment has right size for
+  // padding.
+  MergeAtJoinBlock* current = merge_at_join_blocks_;
+  while (current != NULL) {
+    if (!current->deopt_ && current->block_ != NULL) {
+      // If there is only one block that makes it through to the end of the
+      // if, then just set it as the current block and continue rather then
+      // creating an unnecessary merge block.
+      if (total_merged_blocks == 1) {
+        builder()->set_current_block(current->block_);
+        return;
+      }
+      builder()->GotoNoSimulate(current->block_, merge_block);
+    }
+    current = current->next_;
+  }
+
+  // Merge deopt blocks, padding when necessary.
+  current = merge_at_join_blocks_;
+  while (current != NULL) {
+    if (current->deopt_ && current->block_ != NULL) {
+      current->block_->FinishExit(
+          HAbnormalExit::New(builder()->isolate(), builder()->zone(), NULL),
+          SourcePosition::Unknown());
+    }
+    current = current->next_;
+  }
+  builder()->set_current_block(merge_block);
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder) {
+  Initialize(builder, NULL, kWhileTrue, NULL);
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context,
+                                        LoopBuilder::Direction direction) {
+  Initialize(builder, context, direction, builder->graph()->GetConstant1());
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context,
+                                        LoopBuilder::Direction direction,
+                                        HValue* increment_amount) {
+  Initialize(builder, context, direction, increment_amount);
+  increment_amount_ = increment_amount;
+}
+
+
+void HGraphBuilder::LoopBuilder::Initialize(HGraphBuilder* builder,
+                                            HValue* context,
+                                            Direction direction,
+                                            HValue* increment_amount) {
+  builder_ = builder;
+  context_ = context;
+  direction_ = direction;
+  increment_amount_ = increment_amount;
+
+  finished_ = false;
+  header_block_ = builder->CreateLoopHeaderBlock();
+  body_block_ = NULL;
+  exit_block_ = NULL;
+  exit_trampoline_block_ = NULL;
+}
+
+
+HValue* HGraphBuilder::LoopBuilder::BeginBody(
+    HValue* initial,
+    HValue* terminating,
+    Token::Value token) {
+  DCHECK(direction_ != kWhileTrue);
+  HEnvironment* env = builder_->environment();
+  phi_ = header_block_->AddNewPhi(env->values()->length());
+  phi_->AddInput(initial);
+  env->Push(initial);
+  builder_->GotoNoSimulate(header_block_);
+
+  HEnvironment* body_env = env->Copy();
+  HEnvironment* exit_env = env->Copy();
+  // Remove the phi from the expression stack
+  body_env->Pop();
+  exit_env->Pop();
+  body_block_ = builder_->CreateBasicBlock(body_env);
+  exit_block_ = builder_->CreateBasicBlock(exit_env);
+
+  builder_->set_current_block(header_block_);
+  env->Pop();
+  builder_->FinishCurrentBlock(builder_->New<HCompareNumericAndBranch>(
+          phi_, terminating, token, body_block_, exit_block_));
+
+  builder_->set_current_block(body_block_);
+  if (direction_ == kPreIncrement || direction_ == kPreDecrement) {
+    Isolate* isolate = builder_->isolate();
+    HValue* one = builder_->graph()->GetConstant1();
+    if (direction_ == kPreIncrement) {
+      increment_ = HAdd::New(isolate, zone(), context_, phi_, one);
+    } else {
+      increment_ = HSub::New(isolate, zone(), context_, phi_, one);
+    }
+    increment_->ClearFlag(HValue::kCanOverflow);
+    builder_->AddInstruction(increment_);
+    return increment_;
+  } else {
+    return phi_;
+  }
+}
+
+
+void HGraphBuilder::LoopBuilder::BeginBody(int drop_count) {
+  DCHECK(direction_ == kWhileTrue);
+  HEnvironment* env = builder_->environment();
+  builder_->GotoNoSimulate(header_block_);
+  builder_->set_current_block(header_block_);
+  env->Drop(drop_count);
+}
+
+
+void HGraphBuilder::LoopBuilder::Break() {
+  if (exit_trampoline_block_ == NULL) {
+    // Its the first time we saw a break.
+    if (direction_ == kWhileTrue) {
+      HEnvironment* env = builder_->environment()->Copy();
+      exit_trampoline_block_ = builder_->CreateBasicBlock(env);
+    } else {
+      HEnvironment* env = exit_block_->last_environment()->Copy();
+      exit_trampoline_block_ = builder_->CreateBasicBlock(env);
+      builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
+    }
+  }
+
+  builder_->GotoNoSimulate(exit_trampoline_block_);
+  builder_->set_current_block(NULL);
+}
+
+
+void HGraphBuilder::LoopBuilder::EndBody() {
+  DCHECK(!finished_);
+
+  if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
+    Isolate* isolate = builder_->isolate();
+    if (direction_ == kPostIncrement) {
+      increment_ =
+          HAdd::New(isolate, zone(), context_, phi_, increment_amount_);
+    } else {
+      increment_ =
+          HSub::New(isolate, zone(), context_, phi_, increment_amount_);
+    }
+    increment_->ClearFlag(HValue::kCanOverflow);
+    builder_->AddInstruction(increment_);
+  }
+
+  if (direction_ != kWhileTrue) {
+    // Push the new increment value on the expression stack to merge into
+    // the phi.
+    builder_->environment()->Push(increment_);
+  }
+  HBasicBlock* last_block = builder_->current_block();
+  builder_->GotoNoSimulate(last_block, header_block_);
+  header_block_->loop_information()->RegisterBackEdge(last_block);
+
+  if (exit_trampoline_block_ != NULL) {
+    builder_->set_current_block(exit_trampoline_block_);
+  } else {
+    builder_->set_current_block(exit_block_);
+  }
+  finished_ = true;
+}
+
+
+HGraph* HGraphBuilder::CreateGraph() {
+  graph_ = new(zone()) HGraph(info_);
+  if (FLAG_hydrogen_stats) isolate()->GetHStatistics()->Initialize(info_);
+  CompilationPhase phase("H_Block building", info_);
+  set_current_block(graph()->entry_block());
+  if (!BuildGraph()) return NULL;
+  graph()->FinalizeUniqueness();
+  return graph_;
+}
+
+
+HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
+  DCHECK(current_block() != NULL);
+  DCHECK(!FLAG_hydrogen_track_positions ||
+         !position_.IsUnknown() ||
+         !info_->IsOptimizing());
+  current_block()->AddInstruction(instr, source_position());
+  if (graph()->IsInsideNoSideEffectsScope()) {
+    instr->SetFlag(HValue::kHasNoObservableSideEffects);
+  }
+  return instr;
+}
+
+
+void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
+  DCHECK(!FLAG_hydrogen_track_positions ||
+         !info_->IsOptimizing() ||
+         !position_.IsUnknown());
+  current_block()->Finish(last, source_position());
+  if (last->IsReturn() || last->IsAbnormalExit()) {
+    set_current_block(NULL);
+  }
+}
+
+
+void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
+  DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+         !position_.IsUnknown());
+  current_block()->FinishExit(instruction, source_position());
+  if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
+    set_current_block(NULL);
+  }
+}
+
+
+void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    HValue* reference = Add<HConstant>(ExternalReference(counter));
+    HValue* old_value =
+        Add<HLoadNamedField>(reference, nullptr, HObjectAccess::ForCounter());
+    HValue* new_value = AddUncasted<HAdd>(old_value, graph()->GetConstant1());
+    new_value->ClearFlag(HValue::kCanOverflow);  // Ignore counter overflow
+    Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
+                          new_value, STORE_TO_INITIALIZED_ENTRY);
+  }
+}
+
+
+void HGraphBuilder::AddSimulate(BailoutId id,
+                                RemovableSimulate removable) {
+  DCHECK(current_block() != NULL);
+  DCHECK(!graph()->IsInsideNoSideEffectsScope());
+  current_block()->AddNewSimulate(id, source_position(), removable);
+}
+
+
+HBasicBlock* HGraphBuilder::CreateBasicBlock(HEnvironment* env) {
+  HBasicBlock* b = graph()->CreateBasicBlock();
+  b->SetInitialEnvironment(env);
+  return b;
+}
+
+
+HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
+  HBasicBlock* header = graph()->CreateBasicBlock();
+  HEnvironment* entry_env = environment()->CopyAsLoopHeader(header);
+  header->SetInitialEnvironment(entry_env);
+  header->AttachLoopInformation();
+  return header;
+}
+
+
+HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) {
+  HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
+
+  HValue* bit_field2 =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
+  return BuildDecodeField<Map::ElementsKindBits>(bit_field2);
+}
+
+
+HValue* HGraphBuilder::BuildCheckHeapObject(HValue* obj) {
+  if (obj->type().IsHeapObject()) return obj;
+  return Add<HCheckHeapObject>(obj);
+}
+
+
+void HGraphBuilder::FinishExitWithHardDeoptimization(
+    Deoptimizer::DeoptReason reason) {
+  Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+  FinishExitCurrentBlock(New<HAbnormalExit>());
+}
+
+
+HValue* HGraphBuilder::BuildCheckString(HValue* string) {
+  if (!string->type().IsString()) {
+    DCHECK(!string->IsConstant() ||
+           !HConstant::cast(string)->HasStringValue());
+    BuildCheckHeapObject(string);
+    return Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
+  }
+  return string;
+}
+
+
+HValue* HGraphBuilder::BuildWrapReceiver(HValue* object, HValue* function) {
+  if (object->type().IsJSObject()) return object;
+  if (function->IsConstant() &&
+      HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+    Handle<JSFunction> f = Handle<JSFunction>::cast(
+        HConstant::cast(function)->handle(isolate()));
+    SharedFunctionInfo* shared = f->shared();
+    if (is_strict(shared->language_mode()) || shared->native()) return object;
+  }
+  return Add<HWrapReceiver>(object, function);
+}
+
+
+HValue* HGraphBuilder::BuildCheckAndGrowElementsCapacity(
+    HValue* object, HValue* elements, ElementsKind kind, HValue* length,
+    HValue* capacity, HValue* key) {
+  HValue* max_gap = Add<HConstant>(static_cast<int32_t>(JSObject::kMaxGap));
+  HValue* max_capacity = AddUncasted<HAdd>(capacity, max_gap);
+  Add<HBoundsCheck>(key, max_capacity);
+
+  HValue* new_capacity = BuildNewElementsCapacity(key);
+  HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind, kind,
+                                                   length, new_capacity);
+  return new_elements;
+}
+
+
+HValue* HGraphBuilder::BuildCheckForCapacityGrow(
+    HValue* object,
+    HValue* elements,
+    ElementsKind kind,
+    HValue* length,
+    HValue* key,
+    bool is_js_array,
+    PropertyAccessType access_type) {
+  IfBuilder length_checker(this);
+
+  Token::Value token = IsHoleyElementsKind(kind) ? Token::GTE : Token::EQ;
+  length_checker.If<HCompareNumericAndBranch>(key, length, token);
+
+  length_checker.Then();
+
+  HValue* current_capacity = AddLoadFixedArrayLength(elements);
+
+  if (top_info()->IsStub()) {
+    IfBuilder capacity_checker(this);
+    capacity_checker.If<HCompareNumericAndBranch>(key, current_capacity,
+                                                  Token::GTE);
+    capacity_checker.Then();
+    HValue* new_elements = BuildCheckAndGrowElementsCapacity(
+        object, elements, kind, length, current_capacity, key);
+    environment()->Push(new_elements);
+    capacity_checker.Else();
+    environment()->Push(elements);
+    capacity_checker.End();
+  } else {
+    HValue* result = Add<HMaybeGrowElements>(
+        object, elements, key, current_capacity, is_js_array, kind);
+    environment()->Push(result);
+  }
+
+  if (is_js_array) {
+    HValue* new_length = AddUncasted<HAdd>(key, graph_->GetConstant1());
+    new_length->ClearFlag(HValue::kCanOverflow);
+
+    Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(kind),
+                          new_length);
+  }
+
+  if (access_type == STORE && kind == FAST_SMI_ELEMENTS) {
+    HValue* checked_elements = environment()->Top();
+
+    // Write zero to ensure that the new element is initialized with some smi.
+    Add<HStoreKeyed>(checked_elements, key, graph()->GetConstant0(), nullptr,
+                     kind);
+  }
+
+  length_checker.Else();
+  Add<HBoundsCheck>(key, length);
+
+  environment()->Push(elements);
+  length_checker.End();
+
+  return environment()->Pop();
+}
+
+
+HValue* HGraphBuilder::BuildCopyElementsOnWrite(HValue* object,
+                                                HValue* elements,
+                                                ElementsKind kind,
+                                                HValue* length) {
+  Factory* factory = isolate()->factory();
+
+  IfBuilder cow_checker(this);
+
+  cow_checker.If<HCompareMap>(elements, factory->fixed_cow_array_map());
+  cow_checker.Then();
+
+  HValue* capacity = AddLoadFixedArrayLength(elements);
+
+  HValue* new_elements = BuildGrowElementsCapacity(object, elements, kind,
+                                                   kind, length, capacity);
+
+  environment()->Push(new_elements);
+
+  cow_checker.Else();
+
+  environment()->Push(elements);
+
+  cow_checker.End();
+
+  return environment()->Pop();
+}
+
+
+void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
+                                                HValue* map,
+                                                ElementsKind from_kind,
+                                                ElementsKind to_kind,
+                                                bool is_jsarray) {
+  DCHECK(!IsFastHoleyElementsKind(from_kind) ||
+         IsFastHoleyElementsKind(to_kind));
+
+  if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
+    Add<HTrapAllocationMemento>(object);
+  }
+
+  if (!IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    HInstruction* elements = AddLoadElements(object);
+
+    HInstruction* empty_fixed_array = Add<HConstant>(
+        isolate()->factory()->empty_fixed_array());
+
+    IfBuilder if_builder(this);
+
+    if_builder.IfNot<HCompareObjectEqAndBranch>(elements, empty_fixed_array);
+
+    if_builder.Then();
+
+    HInstruction* elements_length = AddLoadFixedArrayLength(elements);
+
+    HInstruction* array_length =
+        is_jsarray
+            ? Add<HLoadNamedField>(object, nullptr,
+                                   HObjectAccess::ForArrayLength(from_kind))
+            : elements_length;
+
+    BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
+                              array_length, elements_length);
+
+    if_builder.End();
+  }
+
+  Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map);
+}
+
+
+void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
+                                       int bit_field_mask) {
+  // Check that the object isn't a smi.
+  Add<HCheckHeapObject>(receiver);
+
+  // Get the map of the receiver.
+  HValue* map =
+      Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
+
+  // Check the instance type and if an access check is needed, this can be
+  // done with a single load, since both bytes are adjacent in the map.
+  HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField());
+  HValue* instance_type_and_bit_field =
+      Add<HLoadNamedField>(map, nullptr, access);
+
+  HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8));
+  HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND,
+                                             instance_type_and_bit_field,
+                                             mask);
+  HValue* sub_result = AddUncasted<HSub>(and_result,
+                                         Add<HConstant>(JS_OBJECT_TYPE));
+  Add<HBoundsCheck>(sub_result,
+                    Add<HConstant>(LAST_JS_OBJECT_TYPE + 1 - JS_OBJECT_TYPE));
+}
+
+
+void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
+                                         HIfContinuation* join_continuation) {
+  // The sometimes unintuitively backward ordering of the ifs below is
+  // convoluted, but necessary.  All of the paths must guarantee that the
+  // if-true of the continuation returns a smi element index and the if-false of
+  // the continuation returns either a symbol or a unique string key. All other
+  // object types cause a deopt to fall back to the runtime.
+
+  IfBuilder key_smi_if(this);
+  key_smi_if.If<HIsSmiAndBranch>(key);
+  key_smi_if.Then();
+  {
+    Push(key);  // Nothing to do, just continue to true of continuation.
+  }
+  key_smi_if.Else();
+  {
+    HValue* map = Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForMap());
+    HValue* instance_type =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
+
+    // Non-unique string, check for a string with a hash code that is actually
+    // an index.
+    STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+    IfBuilder not_string_or_name_if(this);
+    not_string_or_name_if.If<HCompareNumericAndBranch>(
+        instance_type,
+        Add<HConstant>(LAST_UNIQUE_NAME_TYPE),
+        Token::GT);
+
+    not_string_or_name_if.Then();
+    {
+      // Non-smi, non-Name, non-String: Try to convert to smi in case of
+      // HeapNumber.
+      // TODO(danno): This could call some variant of ToString
+      Push(AddUncasted<HForceRepresentation>(key, Representation::Smi()));
+    }
+    not_string_or_name_if.Else();
+    {
+      // String or Name: check explicitly for Name, they can short-circuit
+      // directly to unique non-index key path.
+      IfBuilder not_symbol_if(this);
+      not_symbol_if.If<HCompareNumericAndBranch>(
+          instance_type,
+          Add<HConstant>(SYMBOL_TYPE),
+          Token::NE);
+
+      not_symbol_if.Then();
+      {
+        // String: check whether the String is a String of an index. If it is,
+        // extract the index value from the hash.
+        HValue* hash = Add<HLoadNamedField>(key, nullptr,
+                                            HObjectAccess::ForNameHashField());
+        HValue* not_index_mask = Add<HConstant>(static_cast<int>(
+            String::kContainsCachedArrayIndexMask));
+
+        HValue* not_index_test = AddUncasted<HBitwise>(
+            Token::BIT_AND, hash, not_index_mask);
+
+        IfBuilder string_index_if(this);
+        string_index_if.If<HCompareNumericAndBranch>(not_index_test,
+                                                     graph()->GetConstant0(),
+                                                     Token::EQ);
+        string_index_if.Then();
+        {
+          // String with index in hash: extract string and merge to index path.
+          Push(BuildDecodeField<String::ArrayIndexValueBits>(hash));
+        }
+        string_index_if.Else();
+        {
+          // Key is a non-index String, check for uniqueness/internalization.
+          // If it's not internalized yet, internalize it now.
+          HValue* not_internalized_bit = AddUncasted<HBitwise>(
+              Token::BIT_AND,
+              instance_type,
+              Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
+
+          IfBuilder internalized(this);
+          internalized.If<HCompareNumericAndBranch>(not_internalized_bit,
+                                                    graph()->GetConstant0(),
+                                                    Token::EQ);
+          internalized.Then();
+          Push(key);
+
+          internalized.Else();
+          Add<HPushArguments>(key);
+          HValue* intern_key = Add<HCallRuntime>(
+              Runtime::FunctionForId(Runtime::kInternalizeString), 1);
+          Push(intern_key);
+
+          internalized.End();
+          // Key guaranteed to be a unique string
+        }
+        string_index_if.JoinContinuation(join_continuation);
+      }
+      not_symbol_if.Else();
+      {
+        Push(key);  // Key is symbol
+      }
+      not_symbol_if.JoinContinuation(join_continuation);
+    }
+    not_string_or_name_if.JoinContinuation(join_continuation);
+  }
+  key_smi_if.JoinContinuation(join_continuation);
+}
+
+
+void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
+  // Get the the instance type of the receiver, and make sure that it is
+  // not one of the global object types.
+  HValue* map =
+      Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
+  HValue* instance_type =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
+  HValue* global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
+
+  IfBuilder if_global_object(this);
+  if_global_object.If<HCompareNumericAndBranch>(instance_type, global_type,
+                                                Token::EQ);
+  if_global_object.ThenDeopt(Deoptimizer::kReceiverWasAGlobalObject);
+  if_global_object.End();
+}
+
+
+void HGraphBuilder::BuildTestForDictionaryProperties(
+    HValue* object,
+    HIfContinuation* continuation) {
+  HValue* properties = Add<HLoadNamedField>(
+      object, nullptr, HObjectAccess::ForPropertiesPointer());
+  HValue* properties_map =
+      Add<HLoadNamedField>(properties, nullptr, HObjectAccess::ForMap());
+  HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex);
+  IfBuilder builder(this);
+  builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map);
+  builder.CaptureContinuation(continuation);
+}
+
+
+HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object,
+                                                 HValue* key) {
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  HValue* object_map =
+      Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMapAsInteger32());
+  HValue* shifted_map = AddUncasted<HShr>(
+      object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift));
+  HValue* string_hash =
+      Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForStringHashField());
+  HValue* shifted_hash = AddUncasted<HShr>(
+      string_hash, Add<HConstant>(String::kHashShift));
+  HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map,
+                                             shifted_hash);
+  int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+  return AddUncasted<HBitwise>(Token::BIT_AND, xor_result,
+                               Add<HConstant>(mask));
+}
+
+
+HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
+  int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
+  HValue* seed = Add<HConstant>(seed_value);
+  HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, index, seed);
+
+  // hash = ~hash + (hash << 15);
+  HValue* shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(15));
+  HValue* not_hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash,
+                                           graph()->GetConstantMinus1());
+  hash = AddUncasted<HAdd>(shifted_hash, not_hash);
+
+  // hash = hash ^ (hash >> 12);
+  shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(12));
+  hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+  // hash = hash + (hash << 2);
+  shifted_hash = AddUncasted<HShl>(hash, Add<HConstant>(2));
+  hash = AddUncasted<HAdd>(hash, shifted_hash);
+
+  // hash = hash ^ (hash >> 4);
+  shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(4));
+  hash = AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+
+  // hash = hash * 2057;
+  hash = AddUncasted<HMul>(hash, Add<HConstant>(2057));
+  hash->ClearFlag(HValue::kCanOverflow);
+
+  // hash = hash ^ (hash >> 16);
+  shifted_hash = AddUncasted<HShr>(hash, Add<HConstant>(16));
+  return AddUncasted<HBitwise>(Token::BIT_XOR, hash, shifted_hash);
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(
+    HValue* receiver, HValue* elements, HValue* key, HValue* hash,
+    LanguageMode language_mode) {
+  HValue* capacity =
+      Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
+                      nullptr, nullptr, FAST_ELEMENTS);
+
+  HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
+  mask->ChangeRepresentation(Representation::Integer32());
+  mask->ClearFlag(HValue::kCanOverflow);
+
+  HValue* entry = hash;
+  HValue* count = graph()->GetConstant1();
+  Push(entry);
+  Push(count);
+
+  HIfContinuation return_or_loop_continuation(graph()->CreateBasicBlock(),
+                                              graph()->CreateBasicBlock());
+  HIfContinuation found_key_match_continuation(graph()->CreateBasicBlock(),
+                                               graph()->CreateBasicBlock());
+  LoopBuilder probe_loop(this);
+  probe_loop.BeginBody(2);  // Drop entry, count from last environment to
+                            // appease live range building without simulates.
+
+  count = Pop();
+  entry = Pop();
+  entry = AddUncasted<HBitwise>(Token::BIT_AND, entry, mask);
+  int entry_size = SeededNumberDictionary::kEntrySize;
+  HValue* base_index = AddUncasted<HMul>(entry, Add<HConstant>(entry_size));
+  base_index->ClearFlag(HValue::kCanOverflow);
+  int start_offset = SeededNumberDictionary::kElementsStartIndex;
+  HValue* key_index =
+      AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset));
+  key_index->ClearFlag(HValue::kCanOverflow);
+
+  HValue* candidate_key =
+      Add<HLoadKeyed>(elements, key_index, nullptr, nullptr, FAST_ELEMENTS);
+  IfBuilder if_undefined(this);
+  if_undefined.If<HCompareObjectEqAndBranch>(candidate_key,
+                                             graph()->GetConstantUndefined());
+  if_undefined.Then();
+  {
+    // element == undefined means "not found". Call the runtime.
+    // TODO(jkummerow): walk the prototype chain instead.
+    Add<HPushArguments>(receiver, key);
+    Push(Add<HCallRuntime>(
+        Runtime::FunctionForId(is_strong(language_mode)
+                                   ? Runtime::kKeyedGetPropertyStrong
+                                   : Runtime::kKeyedGetProperty),
+        2));
+  }
+  if_undefined.Else();
+  {
+    IfBuilder if_match(this);
+    if_match.If<HCompareObjectEqAndBranch>(candidate_key, key);
+    if_match.Then();
+    if_match.Else();
+
+    // Update non-internalized string in the dictionary with internalized key?
+    IfBuilder if_update_with_internalized(this);
+    HValue* smi_check =
+        if_update_with_internalized.IfNot<HIsSmiAndBranch>(candidate_key);
+    if_update_with_internalized.And();
+    HValue* map = AddLoadMap(candidate_key, smi_check);
+    HValue* instance_type =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
+    HValue* not_internalized_bit = AddUncasted<HBitwise>(
+        Token::BIT_AND, instance_type,
+        Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
+    if_update_with_internalized.If<HCompareNumericAndBranch>(
+        not_internalized_bit, graph()->GetConstant0(), Token::NE);
+    if_update_with_internalized.And();
+    if_update_with_internalized.IfNot<HCompareObjectEqAndBranch>(
+        candidate_key, graph()->GetConstantHole());
+    if_update_with_internalized.AndIf<HStringCompareAndBranch>(candidate_key,
+                                                               key, Token::EQ);
+    if_update_with_internalized.Then();
+    // Replace a key that is a non-internalized string by the equivalent
+    // internalized string for faster further lookups.
+    Add<HStoreKeyed>(elements, key_index, key, nullptr, FAST_ELEMENTS);
+    if_update_with_internalized.Else();
+
+    if_update_with_internalized.JoinContinuation(&found_key_match_continuation);
+    if_match.JoinContinuation(&found_key_match_continuation);
+
+    IfBuilder found_key_match(this, &found_key_match_continuation);
+    found_key_match.Then();
+    // Key at current probe matches. Relevant bits in the |details| field must
+    // be zero, otherwise the dictionary element requires special handling.
+    HValue* details_index =
+        AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 2));
+    details_index->ClearFlag(HValue::kCanOverflow);
+    HValue* details = Add<HLoadKeyed>(elements, details_index, nullptr, nullptr,
+                                      FAST_ELEMENTS);
+    int details_mask = PropertyDetails::TypeField::kMask;
+    details = AddUncasted<HBitwise>(Token::BIT_AND, details,
+                                    Add<HConstant>(details_mask));
+    IfBuilder details_compare(this);
+    details_compare.If<HCompareNumericAndBranch>(
+        details, graph()->GetConstant0(), Token::EQ);
+    details_compare.Then();
+    HValue* result_index =
+        AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
+    result_index->ClearFlag(HValue::kCanOverflow);
+    Push(Add<HLoadKeyed>(elements, result_index, nullptr, nullptr,
+                         FAST_ELEMENTS));
+    details_compare.Else();
+    Add<HPushArguments>(receiver, key);
+    Push(Add<HCallRuntime>(
+        Runtime::FunctionForId(is_strong(language_mode)
+                                   ? Runtime::kKeyedGetPropertyStrong
+                                   : Runtime::kKeyedGetProperty),
+        2));
+    details_compare.End();
+
+    found_key_match.Else();
+    found_key_match.JoinContinuation(&return_or_loop_continuation);
+  }
+  if_undefined.JoinContinuation(&return_or_loop_continuation);
+
+  IfBuilder return_or_loop(this, &return_or_loop_continuation);
+  return_or_loop.Then();
+  probe_loop.Break();
+
+  return_or_loop.Else();
+  entry = AddUncasted<HAdd>(entry, count);
+  entry->ClearFlag(HValue::kCanOverflow);
+  count = AddUncasted<HAdd>(count, graph()->GetConstant1());
+  count->ClearFlag(HValue::kCanOverflow);
+  Push(entry);
+  Push(count);
+
+  probe_loop.EndBody();
+
+  return_or_loop.End();
+
+  return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildCreateIterResultObject(HValue* value,
+                                                   HValue* done) {
+  NoObservableSideEffectsScope scope(this);
+
+  // Allocate the JSIteratorResult object.
+  HValue* result =
+      Add<HAllocate>(Add<HConstant>(JSIteratorResult::kSize), HType::JSObject(),
+                     NOT_TENURED, JS_ITERATOR_RESULT_TYPE);
+
+  // Initialize the JSIteratorResult object.
+  HValue* native_context = BuildGetNativeContext();
+  HValue* map = Add<HLoadNamedField>(
+      native_context, nullptr,
+      HObjectAccess::ForContextSlot(Context::ITERATOR_RESULT_MAP_INDEX));
+  Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
+  HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+  Add<HStoreNamedField>(result, HObjectAccess::ForPropertiesPointer(),
+                        empty_fixed_array);
+  Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(),
+                        empty_fixed_array);
+  Add<HStoreNamedField>(result, HObjectAccess::ForObservableJSObjectOffset(
+                                    JSIteratorResult::kValueOffset),
+                        value);
+  Add<HStoreNamedField>(result, HObjectAccess::ForObservableJSObjectOffset(
+                                    JSIteratorResult::kDoneOffset),
+                        done);
+  STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
+  return result;
+}
+
+
+HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
+                                                  HValue* index,
+                                                  HValue* input) {
+  NoObservableSideEffectsScope scope(this);
+  HConstant* max_length = Add<HConstant>(JSArray::kInitialMaxFastElementArray);
+  Add<HBoundsCheck>(length, max_length);
+
+  // Generate size calculation code here in order to make it dominate
+  // the JSRegExpResult allocation.
+  ElementsKind elements_kind = FAST_ELEMENTS;
+  HValue* size = BuildCalculateElementsSize(elements_kind, length);
+
+  // Allocate the JSRegExpResult and the FixedArray in one step.
+  HValue* result = Add<HAllocate>(
+      Add<HConstant>(JSRegExpResult::kSize), HType::JSArray(),
+      NOT_TENURED, JS_ARRAY_TYPE);
+
+  // Initialize the JSRegExpResult header.
+  HValue* native_context = Add<HLoadNamedField>(
+      context(), nullptr,
+      HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForMap(),
+      Add<HLoadNamedField>(
+          native_context, nullptr,
+          HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX)));
+  HConstant* empty_fixed_array =
+      Add<HConstant>(isolate()->factory()->empty_fixed_array());
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
+      empty_fixed_array);
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
+      empty_fixed_array);
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSArray::kLengthOffset), length);
+
+  // Initialize the additional fields.
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kIndexOffset),
+      index);
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSRegExpResult::kInputOffset),
+      input);
+
+  // Allocate and initialize the elements header.
+  HAllocate* elements = BuildAllocateElements(elements_kind, size);
+  BuildInitializeElementsHeader(elements, elements_kind, length);
+
+  if (!elements->has_size_upper_bound()) {
+    HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
+        elements_kind, max_length->Integer32Value());
+    elements->set_size_upper_bound(size_in_bytes_upper_bound);
+  }
+
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
+      elements);
+
+  // Initialize the elements contents with undefined.
+  BuildFillElementsWithValue(
+      elements, elements_kind, graph()->GetConstant0(), length,
+      graph()->GetConstantUndefined());
+
+  return result;
+}
+
+
+HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
+  NoObservableSideEffectsScope scope(this);
+
+  // Convert constant numbers at compile time.
+  if (object->IsConstant() && HConstant::cast(object)->HasNumberValue()) {
+    Handle<Object> number = HConstant::cast(object)->handle(isolate());
+    Handle<String> result = isolate()->factory()->NumberToString(number);
+    return Add<HConstant>(result);
+  }
+
+  // Create a joinable continuation.
+  HIfContinuation found(graph()->CreateBasicBlock(),
+                        graph()->CreateBasicBlock());
+
+  // Load the number string cache.
+  HValue* number_string_cache =
+      Add<HLoadRoot>(Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  HValue* mask = AddLoadFixedArrayLength(number_string_cache);
+  mask->set_type(HType::Smi());
+  mask = AddUncasted<HSar>(mask, graph()->GetConstant1());
+  mask = AddUncasted<HSub>(mask, graph()->GetConstant1());
+
+  // Check whether object is a smi.
+  IfBuilder if_objectissmi(this);
+  if_objectissmi.If<HIsSmiAndBranch>(object);
+  if_objectissmi.Then();
+  {
+    // Compute hash for smi similar to smi_get_hash().
+    HValue* hash = AddUncasted<HBitwise>(Token::BIT_AND, object, mask);
+
+    // Load the key.
+    HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
+    HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
+                                  nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+    // Check if object == key.
+    IfBuilder if_objectiskey(this);
+    if_objectiskey.If<HCompareObjectEqAndBranch>(object, key);
+    if_objectiskey.Then();
+    {
+      // Make the key_index available.
+      Push(key_index);
+    }
+    if_objectiskey.JoinContinuation(&found);
+  }
+  if_objectissmi.Else();
+  {
+    if (type->Is(Type::SignedSmall())) {
+      if_objectissmi.Deopt(Deoptimizer::kExpectedSmi);
+    } else {
+      // Check if the object is a heap number.
+      IfBuilder if_objectisnumber(this);
+      HValue* objectisnumber = if_objectisnumber.If<HCompareMap>(
+          object, isolate()->factory()->heap_number_map());
+      if_objectisnumber.Then();
+      {
+        // Compute hash for heap number similar to double_get_hash().
+        HValue* low = Add<HLoadNamedField>(
+            object, objectisnumber,
+            HObjectAccess::ForHeapNumberValueLowestBits());
+        HValue* high = Add<HLoadNamedField>(
+            object, objectisnumber,
+            HObjectAccess::ForHeapNumberValueHighestBits());
+        HValue* hash = AddUncasted<HBitwise>(Token::BIT_XOR, low, high);
+        hash = AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
+
+        // Load the key.
+        HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
+        HValue* key =
+            Add<HLoadKeyed>(number_string_cache, key_index, nullptr, nullptr,
+                            FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+        // Check if the key is a heap number and compare it with the object.
+        IfBuilder if_keyisnotsmi(this);
+        HValue* keyisnotsmi = if_keyisnotsmi.IfNot<HIsSmiAndBranch>(key);
+        if_keyisnotsmi.Then();
+        {
+          IfBuilder if_keyisheapnumber(this);
+          if_keyisheapnumber.If<HCompareMap>(
+              key, isolate()->factory()->heap_number_map());
+          if_keyisheapnumber.Then();
+          {
+            // Check if values of key and object match.
+            IfBuilder if_keyeqobject(this);
+            if_keyeqobject.If<HCompareNumericAndBranch>(
+                Add<HLoadNamedField>(key, keyisnotsmi,
+                                     HObjectAccess::ForHeapNumberValue()),
+                Add<HLoadNamedField>(object, objectisnumber,
+                                     HObjectAccess::ForHeapNumberValue()),
+                Token::EQ);
+            if_keyeqobject.Then();
+            {
+              // Make the key_index available.
+              Push(key_index);
+            }
+            if_keyeqobject.JoinContinuation(&found);
+          }
+          if_keyisheapnumber.JoinContinuation(&found);
+        }
+        if_keyisnotsmi.JoinContinuation(&found);
+      }
+      if_objectisnumber.Else();
+      {
+        if (type->Is(Type::Number())) {
+          if_objectisnumber.Deopt(Deoptimizer::kExpectedHeapNumber);
+        }
+      }
+      if_objectisnumber.JoinContinuation(&found);
+    }
+  }
+  if_objectissmi.JoinContinuation(&found);
+
+  // Check for cache hit.
+  IfBuilder if_found(this, &found);
+  if_found.Then();
+  {
+    // Count number to string operation in native code.
+    AddIncrementCounter(isolate()->counters()->number_to_string_native());
+
+    // Load the value in case of cache hit.
+    HValue* key_index = Pop();
+    HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
+    Push(Add<HLoadKeyed>(number_string_cache, value_index, nullptr, nullptr,
+                         FAST_ELEMENTS, ALLOW_RETURN_HOLE));
+  }
+  if_found.Else();
+  {
+    // Cache miss, fallback to runtime.
+    Add<HPushArguments>(object);
+    Push(Add<HCallRuntime>(
+            Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
+            1));
+  }
+  if_found.End();
+
+  return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildToObject(HValue* receiver) {
+  NoObservableSideEffectsScope scope(this);
+
+  // Create a joinable continuation.
+  HIfContinuation wrap(graph()->CreateBasicBlock(),
+                       graph()->CreateBasicBlock());
+
+  // Determine the proper global constructor function required to wrap
+  // {receiver} into a JSValue, unless {receiver} is already a {JSReceiver}, in
+  // which case we just return it.  Deopts to Runtime::kToObject if {receiver}
+  // is undefined or null.
+  IfBuilder receiver_is_smi(this);
+  receiver_is_smi.If<HIsSmiAndBranch>(receiver);
+  receiver_is_smi.Then();
+  {
+    // Use global Number function.
+    Push(Add<HConstant>(Context::NUMBER_FUNCTION_INDEX));
+  }
+  receiver_is_smi.Else();
+  {
+    // Determine {receiver} map and instance type.
+    HValue* receiver_map =
+        Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
+    HValue* receiver_instance_type = Add<HLoadNamedField>(
+        receiver_map, nullptr, HObjectAccess::ForMapInstanceType());
+
+    // First check whether {receiver} is already a spec object (fast case).
+    IfBuilder receiver_is_not_spec_object(this);
+    receiver_is_not_spec_object.If<HCompareNumericAndBranch>(
+        receiver_instance_type, Add<HConstant>(FIRST_JS_RECEIVER_TYPE),
+        Token::LT);
+    receiver_is_not_spec_object.Then();
+    {
+      // Load the constructor function index from the {receiver} map.
+      HValue* constructor_function_index = Add<HLoadNamedField>(
+          receiver_map, nullptr,
+          HObjectAccess::ForMapInObjectPropertiesOrConstructorFunctionIndex());
+
+      // Check if {receiver} has a constructor (null and undefined have no
+      // constructors, so we deoptimize to the runtime to throw an exception).
+      IfBuilder constructor_function_index_is_invalid(this);
+      constructor_function_index_is_invalid.If<HCompareNumericAndBranch>(
+          constructor_function_index,
+          Add<HConstant>(Map::kNoConstructorFunctionIndex), Token::EQ);
+      constructor_function_index_is_invalid.ThenDeopt(
+          Deoptimizer::kUndefinedOrNullInToObject);
+      constructor_function_index_is_invalid.End();
+
+      // Use the global constructor function.
+      Push(constructor_function_index);
+    }
+    receiver_is_not_spec_object.JoinContinuation(&wrap);
+  }
+  receiver_is_smi.JoinContinuation(&wrap);
+
+  // Wrap the receiver if necessary.
+  IfBuilder if_wrap(this, &wrap);
+  if_wrap.Then();
+  {
+    // Grab the constructor function index.
+    HValue* constructor_index = Pop();
+
+    // Load native context.
+    HValue* native_context = BuildGetNativeContext();
+
+    // Determine the initial map for the global constructor.
+    HValue* constructor = Add<HLoadKeyed>(native_context, constructor_index,
+                                          nullptr, nullptr, FAST_ELEMENTS);
+    HValue* constructor_initial_map = Add<HLoadNamedField>(
+        constructor, nullptr, HObjectAccess::ForPrototypeOrInitialMap());
+    // Allocate and initialize a JSValue wrapper.
+    HValue* value =
+        BuildAllocate(Add<HConstant>(JSValue::kSize), HType::JSObject(),
+                      JS_VALUE_TYPE, HAllocationMode());
+    Add<HStoreNamedField>(value, HObjectAccess::ForMap(),
+                          constructor_initial_map);
+    HValue* empty_fixed_array = Add<HLoadRoot>(Heap::kEmptyFixedArrayRootIndex);
+    Add<HStoreNamedField>(value, HObjectAccess::ForPropertiesPointer(),
+                          empty_fixed_array);
+    Add<HStoreNamedField>(value, HObjectAccess::ForElementsPointer(),
+                          empty_fixed_array);
+    Add<HStoreNamedField>(value, HObjectAccess::ForObservableJSObjectOffset(
+                                     JSValue::kValueOffset),
+                          receiver);
+    Push(value);
+  }
+  if_wrap.Else();
+  { Push(receiver); }
+  if_wrap.End();
+  return Pop();
+}
+
+
+HAllocate* HGraphBuilder::BuildAllocate(
+    HValue* object_size,
+    HType type,
+    InstanceType instance_type,
+    HAllocationMode allocation_mode) {
+  // Compute the effective allocation size.
+  HValue* size = object_size;
+  if (allocation_mode.CreateAllocationMementos()) {
+    size = AddUncasted<HAdd>(size, Add<HConstant>(AllocationMemento::kSize));
+    size->ClearFlag(HValue::kCanOverflow);
+  }
+
+  // Perform the actual allocation.
+  HAllocate* object = Add<HAllocate>(
+      size, type, allocation_mode.GetPretenureMode(),
+      instance_type, allocation_mode.feedback_site());
+
+  // Setup the allocation memento.
+  if (allocation_mode.CreateAllocationMementos()) {
+    BuildCreateAllocationMemento(
+        object, object_size, allocation_mode.current_site());
+  }
+
+  return object;
+}
+
+
+HValue* HGraphBuilder::BuildAddStringLengths(HValue* left_length,
+                                             HValue* right_length) {
+  // Compute the combined string length and check against max string length.
+  HValue* length = AddUncasted<HAdd>(left_length, right_length);
+  // Check that length <= kMaxLength <=> length < MaxLength + 1.
+  HValue* max_length = Add<HConstant>(String::kMaxLength + 1);
+  Add<HBoundsCheck>(length, max_length);
+  return length;
+}
+
+
+HValue* HGraphBuilder::BuildCreateConsString(
+    HValue* length,
+    HValue* left,
+    HValue* right,
+    HAllocationMode allocation_mode) {
+  // Determine the string instance types.
+  HInstruction* left_instance_type = AddLoadStringInstanceType(left);
+  HInstruction* right_instance_type = AddLoadStringInstanceType(right);
+
+  // Allocate the cons string object. HAllocate does not care whether we
+  // pass CONS_STRING_TYPE or CONS_ONE_BYTE_STRING_TYPE here, so we just use
+  // CONS_STRING_TYPE here. Below we decide whether the cons string is
+  // one-byte or two-byte and set the appropriate map.
+  DCHECK(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
+                                            CONS_ONE_BYTE_STRING_TYPE));
+  HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize),
+                                    HType::String(), CONS_STRING_TYPE,
+                                    allocation_mode);
+
+  // Compute intersection and difference of instance types.
+  HValue* anded_instance_types = AddUncasted<HBitwise>(
+      Token::BIT_AND, left_instance_type, right_instance_type);
+  HValue* xored_instance_types = AddUncasted<HBitwise>(
+      Token::BIT_XOR, left_instance_type, right_instance_type);
+
+  // We create a one-byte cons string if
+  // 1. both strings are one-byte, or
+  // 2. at least one of the strings is two-byte, but happens to contain only
+  //    one-byte characters.
+  // To do this, we check
+  // 1. if both strings are one-byte, or if the one-byte data hint is set in
+  //    both strings, or
+  // 2. if one of the strings has the one-byte data hint set and the other
+  //    string is one-byte.
+  IfBuilder if_onebyte(this);
+  STATIC_ASSERT(kOneByteStringTag != 0);
+  STATIC_ASSERT(kOneByteDataHintMask != 0);
+  if_onebyte.If<HCompareNumericAndBranch>(
+      AddUncasted<HBitwise>(
+          Token::BIT_AND, anded_instance_types,
+          Add<HConstant>(static_cast<int32_t>(
+                  kStringEncodingMask | kOneByteDataHintMask))),
+      graph()->GetConstant0(), Token::NE);
+  if_onebyte.Or();
+  STATIC_ASSERT(kOneByteStringTag != 0 &&
+                kOneByteDataHintTag != 0 &&
+                kOneByteDataHintTag != kOneByteStringTag);
+  if_onebyte.If<HCompareNumericAndBranch>(
+      AddUncasted<HBitwise>(
+          Token::BIT_AND, xored_instance_types,
+          Add<HConstant>(static_cast<int32_t>(
+                  kOneByteStringTag | kOneByteDataHintTag))),
+      Add<HConstant>(static_cast<int32_t>(
+              kOneByteStringTag | kOneByteDataHintTag)), Token::EQ);
+  if_onebyte.Then();
+  {
+    // We can safely skip the write barrier for storing the map here.
+    Add<HStoreNamedField>(
+        result, HObjectAccess::ForMap(),
+        Add<HConstant>(isolate()->factory()->cons_one_byte_string_map()));
+  }
+  if_onebyte.Else();
+  {
+    // We can safely skip the write barrier for storing the map here.
+    Add<HStoreNamedField>(
+        result, HObjectAccess::ForMap(),
+        Add<HConstant>(isolate()->factory()->cons_string_map()));
+  }
+  if_onebyte.End();
+
+  // Initialize the cons string fields.
+  Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+                        Add<HConstant>(String::kEmptyHashField));
+  Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+  Add<HStoreNamedField>(result, HObjectAccess::ForConsStringFirst(), left);
+  Add<HStoreNamedField>(result, HObjectAccess::ForConsStringSecond(), right);
+
+  // Count the native string addition.
+  AddIncrementCounter(isolate()->counters()->string_add_native());
+
+  return result;
+}
+
+
+void HGraphBuilder::BuildCopySeqStringChars(HValue* src,
+                                            HValue* src_offset,
+                                            String::Encoding src_encoding,
+                                            HValue* dst,
+                                            HValue* dst_offset,
+                                            String::Encoding dst_encoding,
+                                            HValue* length) {
+  DCHECK(dst_encoding != String::ONE_BYTE_ENCODING ||
+         src_encoding == String::ONE_BYTE_ENCODING);
+  LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
+  HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
+  {
+    HValue* src_index = AddUncasted<HAdd>(src_offset, index);
+    HValue* value =
+        AddUncasted<HSeqStringGetChar>(src_encoding, src, src_index);
+    HValue* dst_index = AddUncasted<HAdd>(dst_offset, index);
+    Add<HSeqStringSetChar>(dst_encoding, dst, dst_index, value);
+  }
+  loop.EndBody();
+}
+
+
+HValue* HGraphBuilder::BuildObjectSizeAlignment(
+    HValue* unaligned_size, int header_size) {
+  DCHECK((header_size & kObjectAlignmentMask) == 0);
+  HValue* size = AddUncasted<HAdd>(
+      unaligned_size, Add<HConstant>(static_cast<int32_t>(
+          header_size + kObjectAlignmentMask)));
+  size->ClearFlag(HValue::kCanOverflow);
+  return AddUncasted<HBitwise>(
+      Token::BIT_AND, size, Add<HConstant>(static_cast<int32_t>(
+          ~kObjectAlignmentMask)));
+}
+
+
+HValue* HGraphBuilder::BuildUncheckedStringAdd(
+    HValue* left,
+    HValue* right,
+    HAllocationMode allocation_mode) {
+  // Determine the string lengths.
+  HValue* left_length = AddLoadStringLength(left);
+  HValue* right_length = AddLoadStringLength(right);
+
+  // Compute the combined string length.
+  HValue* length = BuildAddStringLengths(left_length, right_length);
+
+  // Do some manual constant folding here.
+  if (left_length->IsConstant()) {
+    HConstant* c_left_length = HConstant::cast(left_length);
+    DCHECK_NE(0, c_left_length->Integer32Value());
+    if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) {
+      // The right string contains at least one character.
+      return BuildCreateConsString(length, left, right, allocation_mode);
+    }
+  } else if (right_length->IsConstant()) {
+    HConstant* c_right_length = HConstant::cast(right_length);
+    DCHECK_NE(0, c_right_length->Integer32Value());
+    if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) {
+      // The left string contains at least one character.
+      return BuildCreateConsString(length, left, right, allocation_mode);
+    }
+  }
+
+  // Check if we should create a cons string.
+  IfBuilder if_createcons(this);
+  if_createcons.If<HCompareNumericAndBranch>(
+      length, Add<HConstant>(ConsString::kMinLength), Token::GTE);
+  if_createcons.Then();
+  {
+    // Create a cons string.
+    Push(BuildCreateConsString(length, left, right, allocation_mode));
+  }
+  if_createcons.Else();
+  {
+    // Determine the string instance types.
+    HValue* left_instance_type = AddLoadStringInstanceType(left);
+    HValue* right_instance_type = AddLoadStringInstanceType(right);
+
+    // Compute union and difference of instance types.
+    HValue* ored_instance_types = AddUncasted<HBitwise>(
+        Token::BIT_OR, left_instance_type, right_instance_type);
+    HValue* xored_instance_types = AddUncasted<HBitwise>(
+        Token::BIT_XOR, left_instance_type, right_instance_type);
+
+    // Check if both strings have the same encoding and both are
+    // sequential.
+    IfBuilder if_sameencodingandsequential(this);
+    if_sameencodingandsequential.If<HCompareNumericAndBranch>(
+        AddUncasted<HBitwise>(
+            Token::BIT_AND, xored_instance_types,
+            Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
+        graph()->GetConstant0(), Token::EQ);
+    if_sameencodingandsequential.And();
+    STATIC_ASSERT(kSeqStringTag == 0);
+    if_sameencodingandsequential.If<HCompareNumericAndBranch>(
+        AddUncasted<HBitwise>(
+            Token::BIT_AND, ored_instance_types,
+            Add<HConstant>(static_cast<int32_t>(kStringRepresentationMask))),
+        graph()->GetConstant0(), Token::EQ);
+    if_sameencodingandsequential.Then();
+    {
+      HConstant* string_map =
+          Add<HConstant>(isolate()->factory()->string_map());
+      HConstant* one_byte_string_map =
+          Add<HConstant>(isolate()->factory()->one_byte_string_map());
+
+      // Determine map and size depending on whether result is one-byte string.
+      IfBuilder if_onebyte(this);
+      STATIC_ASSERT(kOneByteStringTag != 0);
+      if_onebyte.If<HCompareNumericAndBranch>(
+          AddUncasted<HBitwise>(
+              Token::BIT_AND, ored_instance_types,
+              Add<HConstant>(static_cast<int32_t>(kStringEncodingMask))),
+          graph()->GetConstant0(), Token::NE);
+      if_onebyte.Then();
+      {
+        // Allocate sequential one-byte string object.
+        Push(length);
+        Push(one_byte_string_map);
+      }
+      if_onebyte.Else();
+      {
+        // Allocate sequential two-byte string object.
+        HValue* size = AddUncasted<HShl>(length, graph()->GetConstant1());
+        size->ClearFlag(HValue::kCanOverflow);
+        size->SetFlag(HValue::kUint32);
+        Push(size);
+        Push(string_map);
+      }
+      if_onebyte.End();
+      HValue* map = Pop();
+
+      // Calculate the number of bytes needed for the characters in the
+      // string while observing object alignment.
+      STATIC_ASSERT((SeqString::kHeaderSize & kObjectAlignmentMask) == 0);
+      HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
+
+      IfBuilder if_size(this);
+      if_size.If<HCompareNumericAndBranch>(
+          size, Add<HConstant>(Page::kMaxRegularHeapObjectSize), Token::LT);
+      if_size.Then();
+      {
+        // Allocate the string object. HAllocate does not care whether we pass
+        // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
+        HAllocate* result =
+            BuildAllocate(size, HType::String(), STRING_TYPE, allocation_mode);
+        Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
+
+        // Initialize the string fields.
+        Add<HStoreNamedField>(result, HObjectAccess::ForStringHashField(),
+                              Add<HConstant>(String::kEmptyHashField));
+        Add<HStoreNamedField>(result, HObjectAccess::ForStringLength(), length);
+
+        // Copy characters to the result string.
+        IfBuilder if_twobyte(this);
+        if_twobyte.If<HCompareObjectEqAndBranch>(map, string_map);
+        if_twobyte.Then();
+        {
+          // Copy characters from the left string.
+          BuildCopySeqStringChars(
+              left, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
+              graph()->GetConstant0(), String::TWO_BYTE_ENCODING, left_length);
+
+          // Copy characters from the right string.
+          BuildCopySeqStringChars(
+              right, graph()->GetConstant0(), String::TWO_BYTE_ENCODING, result,
+              left_length, String::TWO_BYTE_ENCODING, right_length);
+        }
+        if_twobyte.Else();
+        {
+          // Copy characters from the left string.
+          BuildCopySeqStringChars(
+              left, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
+              graph()->GetConstant0(), String::ONE_BYTE_ENCODING, left_length);
+
+          // Copy characters from the right string.
+          BuildCopySeqStringChars(
+              right, graph()->GetConstant0(), String::ONE_BYTE_ENCODING, result,
+              left_length, String::ONE_BYTE_ENCODING, right_length);
+        }
+        if_twobyte.End();
+
+        // Count the native string addition.
+        AddIncrementCounter(isolate()->counters()->string_add_native());
+
+        // Return the sequential string.
+        Push(result);
+      }
+      if_size.Else();
+      {
+        // Fallback to the runtime to add the two strings. The string has to be
+        // allocated in LO space.
+        Add<HPushArguments>(left, right);
+        Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kStringAdd), 2));
+      }
+      if_size.End();
+    }
+    if_sameencodingandsequential.Else();
+    {
+      // Fallback to the runtime to add the two strings.
+      Add<HPushArguments>(left, right);
+      Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kStringAdd), 2));
+    }
+    if_sameencodingandsequential.End();
+  }
+  if_createcons.End();
+
+  return Pop();
+}
+
+
+HValue* HGraphBuilder::BuildStringAdd(
+    HValue* left,
+    HValue* right,
+    HAllocationMode allocation_mode) {
+  NoObservableSideEffectsScope no_effects(this);
+
+  // Determine string lengths.
+  HValue* left_length = AddLoadStringLength(left);
+  HValue* right_length = AddLoadStringLength(right);
+
+  // Check if left string is empty.
+  IfBuilder if_leftempty(this);
+  if_leftempty.If<HCompareNumericAndBranch>(
+      left_length, graph()->GetConstant0(), Token::EQ);
+  if_leftempty.Then();
+  {
+    // Count the native string addition.
+    AddIncrementCounter(isolate()->counters()->string_add_native());
+
+    // Just return the right string.
+    Push(right);
+  }
+  if_leftempty.Else();
+  {
+    // Check if right string is empty.
+    IfBuilder if_rightempty(this);
+    if_rightempty.If<HCompareNumericAndBranch>(
+        right_length, graph()->GetConstant0(), Token::EQ);
+    if_rightempty.Then();
+    {
+      // Count the native string addition.
+      AddIncrementCounter(isolate()->counters()->string_add_native());
+
+      // Just return the left string.
+      Push(left);
+    }
+    if_rightempty.Else();
+    {
+      // Add the two non-empty strings.
+      Push(BuildUncheckedStringAdd(left, right, allocation_mode));
+    }
+    if_rightempty.End();
+  }
+  if_leftempty.End();
+
+  return Pop();
+}
+
+
+HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
+    HValue* checked_object,
+    HValue* key,
+    HValue* val,
+    bool is_js_array,
+    ElementsKind elements_kind,
+    PropertyAccessType access_type,
+    LoadKeyedHoleMode load_mode,
+    KeyedAccessStoreMode store_mode) {
+  DCHECK(top_info()->IsStub() || checked_object->IsCompareMap() ||
+         checked_object->IsCheckMaps());
+  DCHECK(!IsFixedTypedArrayElementsKind(elements_kind) || !is_js_array);
+  // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
+  // on a HElementsTransition instruction. The flag can also be removed if the
+  // map to check has FAST_HOLEY_ELEMENTS, since there can be no further
+  // ElementsKind transitions. Finally, the dependency can be removed for stores
+  // for FAST_ELEMENTS, since a transition to HOLEY elements won't change the
+  // generated store code.
+  if ((elements_kind == FAST_HOLEY_ELEMENTS) ||
+      (elements_kind == FAST_ELEMENTS && access_type == STORE)) {
+    checked_object->ClearDependsOnFlag(kElementsKind);
+  }
+
+  bool fast_smi_only_elements = IsFastSmiElementsKind(elements_kind);
+  bool fast_elements = IsFastObjectElementsKind(elements_kind);
+  HValue* elements = AddLoadElements(checked_object);
+  if (access_type == STORE && (fast_elements || fast_smi_only_elements) &&
+      store_mode != STORE_NO_TRANSITION_HANDLE_COW) {
+    HCheckMaps* check_cow_map = Add<HCheckMaps>(
+        elements, isolate()->factory()->fixed_array_map());
+    check_cow_map->ClearDependsOnFlag(kElementsKind);
+  }
+  HInstruction* length = NULL;
+  if (is_js_array) {
+    length = Add<HLoadNamedField>(
+        checked_object->ActualValue(), checked_object,
+        HObjectAccess::ForArrayLength(elements_kind));
+  } else {
+    length = AddLoadFixedArrayLength(elements);
+  }
+  length->set_type(HType::Smi());
+  HValue* checked_key = NULL;
+  if (IsFixedTypedArrayElementsKind(elements_kind)) {
+    checked_object = Add<HCheckArrayBufferNotNeutered>(checked_object);
+
+    HValue* external_pointer = Add<HLoadNamedField>(
+        elements, nullptr,
+        HObjectAccess::ForFixedTypedArrayBaseExternalPointer());
+    HValue* base_pointer = Add<HLoadNamedField>(
+        elements, nullptr, HObjectAccess::ForFixedTypedArrayBaseBasePointer());
+    HValue* backing_store = AddUncasted<HAdd>(
+        external_pointer, base_pointer, Strength::WEAK, AddOfExternalAndTagged);
+
+    if (store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+      NoObservableSideEffectsScope no_effects(this);
+      IfBuilder length_checker(this);
+      length_checker.If<HCompareNumericAndBranch>(key, length, Token::LT);
+      length_checker.Then();
+      IfBuilder negative_checker(this);
+      HValue* bounds_check = negative_checker.If<HCompareNumericAndBranch>(
+          key, graph()->GetConstant0(), Token::GTE);
+      negative_checker.Then();
+      HInstruction* result = AddElementAccess(
+          backing_store, key, val, bounds_check, checked_object->ActualValue(),
+          elements_kind, access_type);
+      negative_checker.ElseDeopt(Deoptimizer::kNegativeKeyEncountered);
+      negative_checker.End();
+      length_checker.End();
+      return result;
+    } else {
+      DCHECK(store_mode == STANDARD_STORE);
+      checked_key = Add<HBoundsCheck>(key, length);
+      return AddElementAccess(backing_store, checked_key, val, checked_object,
+                              checked_object->ActualValue(), elements_kind,
+                              access_type);
+    }
+  }
+  DCHECK(fast_smi_only_elements ||
+         fast_elements ||
+         IsFastDoubleElementsKind(elements_kind));
+
+  // In case val is stored into a fast smi array, assure that the value is a smi
+  // before manipulating the backing store. Otherwise the actual store may
+  // deopt, leaving the backing store in an invalid state.
+  if (access_type == STORE && IsFastSmiElementsKind(elements_kind) &&
+      !val->type().IsSmi()) {
+    val = AddUncasted<HForceRepresentation>(val, Representation::Smi());
+  }
+
+  if (IsGrowStoreMode(store_mode)) {
+    NoObservableSideEffectsScope no_effects(this);
+    Representation representation = HStoreKeyed::RequiredValueRepresentation(
+        elements_kind, STORE_TO_INITIALIZED_ENTRY);
+    val = AddUncasted<HForceRepresentation>(val, representation);
+    elements = BuildCheckForCapacityGrow(checked_object, elements,
+                                         elements_kind, length, key,
+                                         is_js_array, access_type);
+    checked_key = key;
+  } else {
+    checked_key = Add<HBoundsCheck>(key, length);
+
+    if (access_type == STORE && (fast_elements || fast_smi_only_elements)) {
+      if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
+        NoObservableSideEffectsScope no_effects(this);
+        elements = BuildCopyElementsOnWrite(checked_object, elements,
+                                            elements_kind, length);
+      } else {
+        HCheckMaps* check_cow_map = Add<HCheckMaps>(
+            elements, isolate()->factory()->fixed_array_map());
+        check_cow_map->ClearDependsOnFlag(kElementsKind);
+      }
+    }
+  }
+  return AddElementAccess(elements, checked_key, val, checked_object, nullptr,
+                          elements_kind, access_type, load_mode);
+}
+
+
+HValue* HGraphBuilder::BuildAllocateArrayFromLength(
+    JSArrayBuilder* array_builder,
+    HValue* length_argument) {
+  if (length_argument->IsConstant() &&
+      HConstant::cast(length_argument)->HasSmiValue()) {
+    int array_length = HConstant::cast(length_argument)->Integer32Value();
+    if (array_length == 0) {
+      return array_builder->AllocateEmptyArray();
+    } else {
+      return array_builder->AllocateArray(length_argument,
+                                          array_length,
+                                          length_argument);
+    }
+  }
+
+  HValue* constant_zero = graph()->GetConstant0();
+  HConstant* max_alloc_length =
+      Add<HConstant>(JSArray::kInitialMaxFastElementArray);
+  HInstruction* checked_length = Add<HBoundsCheck>(length_argument,
+                                                   max_alloc_length);
+  IfBuilder if_builder(this);
+  if_builder.If<HCompareNumericAndBranch>(checked_length, constant_zero,
+                                          Token::EQ);
+  if_builder.Then();
+  const int initial_capacity = JSArray::kPreallocatedArrayElements;
+  HConstant* initial_capacity_node = Add<HConstant>(initial_capacity);
+  Push(initial_capacity_node);  // capacity
+  Push(constant_zero);          // length
+  if_builder.Else();
+  if (!(top_info()->IsStub()) &&
+      IsFastPackedElementsKind(array_builder->kind())) {
+    // We'll come back later with better (holey) feedback.
+    if_builder.Deopt(
+        Deoptimizer::kHoleyArrayDespitePackedElements_kindFeedback);
+  } else {
+    Push(checked_length);         // capacity
+    Push(checked_length);         // length
+  }
+  if_builder.End();
+
+  // Figure out total size
+  HValue* length = Pop();
+  HValue* capacity = Pop();
+  return array_builder->AllocateArray(capacity, max_alloc_length, length);
+}
+
+
+HValue* HGraphBuilder::BuildCalculateElementsSize(ElementsKind kind,
+                                                  HValue* capacity) {
+  int elements_size = IsFastDoubleElementsKind(kind)
+      ? kDoubleSize
+      : kPointerSize;
+
+  HConstant* elements_size_value = Add<HConstant>(elements_size);
+  HInstruction* mul =
+      HMul::NewImul(isolate(), zone(), context(), capacity->ActualValue(),
+                    elements_size_value);
+  AddInstruction(mul);
+  mul->ClearFlag(HValue::kCanOverflow);
+
+  STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
+
+  HConstant* header_size = Add<HConstant>(FixedArray::kHeaderSize);
+  HValue* total_size = AddUncasted<HAdd>(mul, header_size);
+  total_size->ClearFlag(HValue::kCanOverflow);
+  return total_size;
+}
+
+
+HAllocate* HGraphBuilder::AllocateJSArrayObject(AllocationSiteMode mode) {
+  int base_size = JSArray::kSize;
+  if (mode == TRACK_ALLOCATION_SITE) {
+    base_size += AllocationMemento::kSize;
+  }
+  HConstant* size_in_bytes = Add<HConstant>(base_size);
+  return Add<HAllocate>(
+      size_in_bytes, HType::JSArray(), NOT_TENURED, JS_OBJECT_TYPE);
+}
+
+
+HConstant* HGraphBuilder::EstablishElementsAllocationSize(
+    ElementsKind kind,
+    int capacity) {
+  int base_size = IsFastDoubleElementsKind(kind)
+      ? FixedDoubleArray::SizeFor(capacity)
+      : FixedArray::SizeFor(capacity);
+
+  return Add<HConstant>(base_size);
+}
+
+
+HAllocate* HGraphBuilder::BuildAllocateElements(ElementsKind kind,
+                                                HValue* size_in_bytes) {
+  InstanceType instance_type = IsFastDoubleElementsKind(kind)
+      ? FIXED_DOUBLE_ARRAY_TYPE
+      : FIXED_ARRAY_TYPE;
+
+  return Add<HAllocate>(size_in_bytes, HType::HeapObject(), NOT_TENURED,
+                        instance_type);
+}
+
+
+void HGraphBuilder::BuildInitializeElementsHeader(HValue* elements,
+                                                  ElementsKind kind,
+                                                  HValue* capacity) {
+  Factory* factory = isolate()->factory();
+  Handle<Map> map = IsFastDoubleElementsKind(kind)
+      ? factory->fixed_double_array_map()
+      : factory->fixed_array_map();
+
+  Add<HStoreNamedField>(elements, HObjectAccess::ForMap(), Add<HConstant>(map));
+  Add<HStoreNamedField>(elements, HObjectAccess::ForFixedArrayLength(),
+                        capacity);
+}
+
+
+HValue* HGraphBuilder::BuildAllocateAndInitializeArray(ElementsKind kind,
+                                                       HValue* capacity) {
+  // The HForceRepresentation is to prevent possible deopt on int-smi
+  // conversion after allocation but before the new object fields are set.
+  capacity = AddUncasted<HForceRepresentation>(capacity, Representation::Smi());
+  HValue* size_in_bytes = BuildCalculateElementsSize(kind, capacity);
+  HValue* new_array = BuildAllocateElements(kind, size_in_bytes);
+  BuildInitializeElementsHeader(new_array, kind, capacity);
+  return new_array;
+}
+
+
+void HGraphBuilder::BuildJSArrayHeader(HValue* array,
+                                       HValue* array_map,
+                                       HValue* elements,
+                                       AllocationSiteMode mode,
+                                       ElementsKind elements_kind,
+                                       HValue* allocation_site_payload,
+                                       HValue* length_field) {
+  Add<HStoreNamedField>(array, HObjectAccess::ForMap(), array_map);
+
+  HConstant* empty_fixed_array =
+    Add<HConstant>(isolate()->factory()->empty_fixed_array());
+
+  Add<HStoreNamedField>(
+      array, HObjectAccess::ForPropertiesPointer(), empty_fixed_array);
+
+  Add<HStoreNamedField>(
+      array, HObjectAccess::ForElementsPointer(),
+      elements != NULL ? elements : empty_fixed_array);
+
+  Add<HStoreNamedField>(
+      array, HObjectAccess::ForArrayLength(elements_kind), length_field);
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    BuildCreateAllocationMemento(
+        array, Add<HConstant>(JSArray::kSize), allocation_site_payload);
+  }
+}
+
+
+HInstruction* HGraphBuilder::AddElementAccess(
+    HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
+    HValue* backing_store_owner, ElementsKind elements_kind,
+    PropertyAccessType access_type, LoadKeyedHoleMode load_mode) {
+  if (access_type == STORE) {
+    DCHECK(val != NULL);
+    if (elements_kind == UINT8_CLAMPED_ELEMENTS) {
+      val = Add<HClampToUint8>(val);
+    }
+    return Add<HStoreKeyed>(elements, checked_key, val, backing_store_owner,
+                            elements_kind, STORE_TO_INITIALIZED_ENTRY);
+  }
+
+  DCHECK(access_type == LOAD);
+  DCHECK(val == NULL);
+  HLoadKeyed* load =
+      Add<HLoadKeyed>(elements, checked_key, dependency, backing_store_owner,
+                      elements_kind, load_mode);
+  if (elements_kind == UINT32_ELEMENTS) {
+    graph()->RecordUint32Instruction(load);
+  }
+  return load;
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadMap(HValue* object,
+                                           HValue* dependency) {
+  return Add<HLoadNamedField>(object, dependency, HObjectAccess::ForMap());
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
+                                                HValue* dependency) {
+  return Add<HLoadNamedField>(
+      object, dependency, HObjectAccess::ForElementsPointer());
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(
+    HValue* array,
+    HValue* dependency) {
+  return Add<HLoadNamedField>(
+      array, dependency, HObjectAccess::ForFixedArrayLength());
+}
+
+
+HLoadNamedField* HGraphBuilder::AddLoadArrayLength(HValue* array,
+                                                   ElementsKind kind,
+                                                   HValue* dependency) {
+  return Add<HLoadNamedField>(
+      array, dependency, HObjectAccess::ForArrayLength(kind));
+}
+
+
+HValue* HGraphBuilder::BuildNewElementsCapacity(HValue* old_capacity) {
+  HValue* half_old_capacity = AddUncasted<HShr>(old_capacity,
+                                                graph_->GetConstant1());
+
+  HValue* new_capacity = AddUncasted<HAdd>(half_old_capacity, old_capacity);
+  new_capacity->ClearFlag(HValue::kCanOverflow);
+
+  HValue* min_growth = Add<HConstant>(16);
+
+  new_capacity = AddUncasted<HAdd>(new_capacity, min_growth);
+  new_capacity->ClearFlag(HValue::kCanOverflow);
+
+  return new_capacity;
+}
+
+
+HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
+                                                 HValue* elements,
+                                                 ElementsKind kind,
+                                                 ElementsKind new_kind,
+                                                 HValue* length,
+                                                 HValue* new_capacity) {
+  Add<HBoundsCheck>(new_capacity, Add<HConstant>(
+          (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
+          ElementsKindToShiftSize(new_kind)));
+
+  HValue* new_elements =
+      BuildAllocateAndInitializeArray(new_kind, new_capacity);
+
+  BuildCopyElements(elements, kind, new_elements,
+                    new_kind, length, new_capacity);
+
+  Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+                        new_elements);
+
+  return new_elements;
+}
+
+
+void HGraphBuilder::BuildFillElementsWithValue(HValue* elements,
+                                               ElementsKind elements_kind,
+                                               HValue* from,
+                                               HValue* to,
+                                               HValue* value) {
+  if (to == NULL) {
+    to = AddLoadFixedArrayLength(elements);
+  }
+
+  // Special loop unfolding case
+  STATIC_ASSERT(JSArray::kPreallocatedArrayElements <=
+                kElementLoopUnrollThreshold);
+  int initial_capacity = -1;
+  if (from->IsInteger32Constant() && to->IsInteger32Constant()) {
+    int constant_from = from->GetInteger32Constant();
+    int constant_to = to->GetInteger32Constant();
+
+    if (constant_from == 0 && constant_to <= kElementLoopUnrollThreshold) {
+      initial_capacity = constant_to;
+    }
+  }
+
+  if (initial_capacity >= 0) {
+    for (int i = 0; i < initial_capacity; i++) {
+      HInstruction* key = Add<HConstant>(i);
+      Add<HStoreKeyed>(elements, key, value, nullptr, elements_kind);
+    }
+  } else {
+    // Carefully loop backwards so that the "from" remains live through the loop
+    // rather than the to. This often corresponds to keeping length live rather
+    // then capacity, which helps register allocation, since length is used more
+    // other than capacity after filling with holes.
+    LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
+
+    HValue* key = builder.BeginBody(to, from, Token::GT);
+
+    HValue* adjusted_key = AddUncasted<HSub>(key, graph()->GetConstant1());
+    adjusted_key->ClearFlag(HValue::kCanOverflow);
+
+    Add<HStoreKeyed>(elements, adjusted_key, value, nullptr, elements_kind);
+
+    builder.EndBody();
+  }
+}
+
+
+void HGraphBuilder::BuildFillElementsWithHole(HValue* elements,
+                                              ElementsKind elements_kind,
+                                              HValue* from,
+                                              HValue* to) {
+  // Fast elements kinds need to be initialized in case statements below cause a
+  // garbage collection.
+
+  HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
+                     ? graph()->GetConstantHole()
+                     : Add<HConstant>(HConstant::kHoleNaN);
+
+  // Since we're about to store a hole value, the store instruction below must
+  // assume an elements kind that supports heap object values.
+  if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+    elements_kind = FAST_HOLEY_ELEMENTS;
+  }
+
+  BuildFillElementsWithValue(elements, elements_kind, from, to, hole);
+}
+
+
+void HGraphBuilder::BuildCopyProperties(HValue* from_properties,
+                                        HValue* to_properties, HValue* length,
+                                        HValue* capacity) {
+  ElementsKind kind = FAST_ELEMENTS;
+
+  BuildFillElementsWithValue(to_properties, kind, length, capacity,
+                             graph()->GetConstantUndefined());
+
+  LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
+
+  HValue* key = builder.BeginBody(length, graph()->GetConstant0(), Token::GT);
+
+  key = AddUncasted<HSub>(key, graph()->GetConstant1());
+  key->ClearFlag(HValue::kCanOverflow);
+
+  HValue* element =
+      Add<HLoadKeyed>(from_properties, key, nullptr, nullptr, kind);
+
+  Add<HStoreKeyed>(to_properties, key, element, nullptr, kind);
+
+  builder.EndBody();
+}
+
+
+void HGraphBuilder::BuildCopyElements(HValue* from_elements,
+                                      ElementsKind from_elements_kind,
+                                      HValue* to_elements,
+                                      ElementsKind to_elements_kind,
+                                      HValue* length,
+                                      HValue* capacity) {
+  int constant_capacity = -1;
+  if (capacity != NULL &&
+      capacity->IsConstant() &&
+      HConstant::cast(capacity)->HasInteger32Value()) {
+    int constant_candidate = HConstant::cast(capacity)->Integer32Value();
+    if (constant_candidate <= kElementLoopUnrollThreshold) {
+      constant_capacity = constant_candidate;
+    }
+  }
+
+  bool pre_fill_with_holes =
+    IsFastDoubleElementsKind(from_elements_kind) &&
+    IsFastObjectElementsKind(to_elements_kind);
+  if (pre_fill_with_holes) {
+    // If the copy might trigger a GC, make sure that the FixedArray is
+    // pre-initialized with holes to make sure that it's always in a
+    // consistent state.
+    BuildFillElementsWithHole(to_elements, to_elements_kind,
+                              graph()->GetConstant0(), NULL);
+  }
+
+  if (constant_capacity != -1) {
+    // Unroll the loop for small elements kinds.
+    for (int i = 0; i < constant_capacity; i++) {
+      HValue* key_constant = Add<HConstant>(i);
+      HInstruction* value = Add<HLoadKeyed>(
+          from_elements, key_constant, nullptr, nullptr, from_elements_kind);
+      Add<HStoreKeyed>(to_elements, key_constant, value, nullptr,
+                       to_elements_kind);
+    }
+  } else {
+    if (!pre_fill_with_holes &&
+        (capacity == NULL || !length->Equals(capacity))) {
+      BuildFillElementsWithHole(to_elements, to_elements_kind,
+                                length, NULL);
+    }
+
+    LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
+
+    HValue* key = builder.BeginBody(length, graph()->GetConstant0(),
+                                    Token::GT);
+
+    key = AddUncasted<HSub>(key, graph()->GetConstant1());
+    key->ClearFlag(HValue::kCanOverflow);
+
+    HValue* element = Add<HLoadKeyed>(from_elements, key, nullptr, nullptr,
+                                      from_elements_kind, ALLOW_RETURN_HOLE);
+
+    ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
+                         IsFastSmiElementsKind(to_elements_kind))
+      ? FAST_HOLEY_ELEMENTS : to_elements_kind;
+
+    if (IsHoleyElementsKind(from_elements_kind) &&
+        from_elements_kind != to_elements_kind) {
+      IfBuilder if_hole(this);
+      if_hole.If<HCompareHoleAndBranch>(element);
+      if_hole.Then();
+      HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
+                                     ? Add<HConstant>(HConstant::kHoleNaN)
+                                     : graph()->GetConstantHole();
+      Add<HStoreKeyed>(to_elements, key, hole_constant, nullptr, kind);
+      if_hole.Else();
+      HStoreKeyed* store =
+          Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
+      store->SetFlag(HValue::kAllowUndefinedAsNaN);
+      if_hole.End();
+    } else {
+      HStoreKeyed* store =
+          Add<HStoreKeyed>(to_elements, key, element, nullptr, kind);
+      store->SetFlag(HValue::kAllowUndefinedAsNaN);
+    }
+
+    builder.EndBody();
+  }
+
+  Counters* counters = isolate()->counters();
+  AddIncrementCounter(counters->inlined_copied_elements());
+}
+
+
+HValue* HGraphBuilder::BuildCloneShallowArrayCow(HValue* boilerplate,
+                                                 HValue* allocation_site,
+                                                 AllocationSiteMode mode,
+                                                 ElementsKind kind) {
+  HAllocate* array = AllocateJSArrayObject(mode);
+
+  HValue* map = AddLoadMap(boilerplate);
+  HValue* elements = AddLoadElements(boilerplate);
+  HValue* length = AddLoadArrayLength(boilerplate, kind);
+
+  BuildJSArrayHeader(array,
+                     map,
+                     elements,
+                     mode,
+                     FAST_ELEMENTS,
+                     allocation_site,
+                     length);
+  return array;
+}
+
+
+HValue* HGraphBuilder::BuildCloneShallowArrayEmpty(HValue* boilerplate,
+                                                   HValue* allocation_site,
+                                                   AllocationSiteMode mode) {
+  HAllocate* array = AllocateJSArrayObject(mode);
+
+  HValue* map = AddLoadMap(boilerplate);
+
+  BuildJSArrayHeader(array,
+                     map,
+                     NULL,  // set elements to empty fixed array
+                     mode,
+                     FAST_ELEMENTS,
+                     allocation_site,
+                     graph()->GetConstant0());
+  return array;
+}
+
+
+HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
+                                                      HValue* allocation_site,
+                                                      AllocationSiteMode mode,
+                                                      ElementsKind kind) {
+  HValue* boilerplate_elements = AddLoadElements(boilerplate);
+  HValue* capacity = AddLoadFixedArrayLength(boilerplate_elements);
+
+  // Generate size calculation code here in order to make it dominate
+  // the JSArray allocation.
+  HValue* elements_size = BuildCalculateElementsSize(kind, capacity);
+
+  // Create empty JSArray object for now, store elimination should remove
+  // redundant initialization of elements and length fields and at the same
+  // time the object will be fully prepared for GC if it happens during
+  // elements allocation.
+  HValue* result = BuildCloneShallowArrayEmpty(
+      boilerplate, allocation_site, mode);
+
+  HAllocate* elements = BuildAllocateElements(kind, elements_size);
+
+  // This function implicitly relies on the fact that the
+  // FastCloneShallowArrayStub is called only for literals shorter than
+  // JSArray::kInitialMaxFastElementArray.
+  // Can't add HBoundsCheck here because otherwise the stub will eager a frame.
+  HConstant* size_upper_bound = EstablishElementsAllocationSize(
+      kind, JSArray::kInitialMaxFastElementArray);
+  elements->set_size_upper_bound(size_upper_bound);
+
+  Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(), elements);
+
+  // The allocation for the cloned array above causes register pressure on
+  // machines with low register counts. Force a reload of the boilerplate
+  // elements here to free up a register for the allocation to avoid unnecessary
+  // spillage.
+  boilerplate_elements = AddLoadElements(boilerplate);
+  boilerplate_elements->SetFlag(HValue::kCantBeReplaced);
+
+  // Copy the elements array header.
+  for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
+    HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
+    Add<HStoreNamedField>(
+        elements, access,
+        Add<HLoadNamedField>(boilerplate_elements, nullptr, access));
+  }
+
+  // And the result of the length
+  HValue* length = AddLoadArrayLength(boilerplate, kind);
+  Add<HStoreNamedField>(result, HObjectAccess::ForArrayLength(kind), length);
+
+  BuildCopyElements(boilerplate_elements, kind, elements,
+                    kind, length, NULL);
+  return result;
+}
+
+
+void HGraphBuilder::BuildCompareNil(HValue* value, Type* type,
+                                    HIfContinuation* continuation,
+                                    MapEmbedding map_embedding) {
+  IfBuilder if_nil(this);
+  bool some_case_handled = false;
+  bool some_case_missing = false;
+
+  if (type->Maybe(Type::Null())) {
+    if (some_case_handled) if_nil.Or();
+    if_nil.If<HCompareObjectEqAndBranch>(value, graph()->GetConstantNull());
+    some_case_handled = true;
+  } else {
+    some_case_missing = true;
+  }
+
+  if (type->Maybe(Type::Undefined())) {
+    if (some_case_handled) if_nil.Or();
+    if_nil.If<HCompareObjectEqAndBranch>(value,
+                                         graph()->GetConstantUndefined());
+    some_case_handled = true;
+  } else {
+    some_case_missing = true;
+  }
+
+  if (type->Maybe(Type::Undetectable())) {
+    if (some_case_handled) if_nil.Or();
+    if_nil.If<HIsUndetectableAndBranch>(value);
+    some_case_handled = true;
+  } else {
+    some_case_missing = true;
+  }
+
+  if (some_case_missing) {
+    if_nil.Then();
+    if_nil.Else();
+    if (type->NumClasses() == 1) {
+      BuildCheckHeapObject(value);
+      // For ICs, the map checked below is a sentinel map that gets replaced by
+      // the monomorphic map when the code is used as a template to generate a
+      // new IC. For optimized functions, there is no sentinel map, the map
+      // emitted below is the actual monomorphic map.
+      if (map_embedding == kEmbedMapsViaWeakCells) {
+        HValue* cell =
+            Add<HConstant>(Map::WeakCellForMap(type->Classes().Current()));
+        HValue* expected_map = Add<HLoadNamedField>(
+            cell, nullptr, HObjectAccess::ForWeakCellValue());
+        HValue* map =
+            Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
+        IfBuilder map_check(this);
+        map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
+        map_check.ThenDeopt(Deoptimizer::kUnknownMap);
+        map_check.End();
+      } else {
+        DCHECK(map_embedding == kEmbedMapsDirectly);
+        Add<HCheckMaps>(value, type->Classes().Current());
+      }
+    } else {
+      if_nil.Deopt(Deoptimizer::kTooManyUndetectableTypes);
+    }
+  }
+
+  if_nil.CaptureContinuation(continuation);
+}
+
+
+void HGraphBuilder::BuildCreateAllocationMemento(
+    HValue* previous_object,
+    HValue* previous_object_size,
+    HValue* allocation_site) {
+  DCHECK(allocation_site != NULL);
+  HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>(
+      previous_object, previous_object_size, HType::HeapObject());
+  AddStoreMapConstant(
+      allocation_memento, isolate()->factory()->allocation_memento_map());
+  Add<HStoreNamedField>(
+      allocation_memento,
+      HObjectAccess::ForAllocationMementoSite(),
+      allocation_site);
+  if (FLAG_allocation_site_pretenuring) {
+    HValue* memento_create_count =
+        Add<HLoadNamedField>(allocation_site, nullptr,
+                             HObjectAccess::ForAllocationSiteOffset(
+                                 AllocationSite::kPretenureCreateCountOffset));
+    memento_create_count = AddUncasted<HAdd>(
+        memento_create_count, graph()->GetConstant1());
+    // This smi value is reset to zero after every gc, overflow isn't a problem
+    // since the counter is bounded by the new space size.
+    memento_create_count->ClearFlag(HValue::kCanOverflow);
+    Add<HStoreNamedField>(
+        allocation_site, HObjectAccess::ForAllocationSiteOffset(
+            AllocationSite::kPretenureCreateCountOffset), memento_create_count);
+  }
+}
+
+
+HInstruction* HGraphBuilder::BuildGetNativeContext() {
+  return Add<HLoadNamedField>(
+      context(), nullptr,
+      HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
+}
+
+
+HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
+  // Get the global object, then the native context
+  HInstruction* context = Add<HLoadNamedField>(
+      closure, nullptr, HObjectAccess::ForFunctionContextPointer());
+  return Add<HLoadNamedField>(
+      context, nullptr,
+      HObjectAccess::ForContextSlot(Context::NATIVE_CONTEXT_INDEX));
+}
+
+
+HInstruction* HGraphBuilder::BuildGetScriptContext(int context_index) {
+  HValue* native_context = BuildGetNativeContext();
+  HValue* script_context_table = Add<HLoadNamedField>(
+      native_context, nullptr,
+      HObjectAccess::ForContextSlot(Context::SCRIPT_CONTEXT_TABLE_INDEX));
+  return Add<HLoadNamedField>(script_context_table, nullptr,
+                              HObjectAccess::ForScriptContext(context_index));
+}
+
+
+HValue* HGraphBuilder::BuildGetParentContext(HValue* depth, int depth_value) {
+  HValue* script_context = context();
+  if (depth != NULL) {
+    HValue* zero = graph()->GetConstant0();
+
+    Push(script_context);
+    Push(depth);
+
+    LoopBuilder loop(this);
+    loop.BeginBody(2);  // Drop script_context and depth from last environment
+                        // to appease live range building without simulates.
+    depth = Pop();
+    script_context = Pop();
+
+    script_context = Add<HLoadNamedField>(
+        script_context, nullptr,
+        HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+    depth = AddUncasted<HSub>(depth, graph()->GetConstant1());
+    depth->ClearFlag(HValue::kCanOverflow);
+
+    IfBuilder if_break(this);
+    if_break.If<HCompareNumericAndBranch, HValue*>(depth, zero, Token::EQ);
+    if_break.Then();
+    {
+      Push(script_context);  // The result.
+      loop.Break();
+    }
+    if_break.Else();
+    {
+      Push(script_context);
+      Push(depth);
+    }
+    loop.EndBody();
+    if_break.End();
+
+    script_context = Pop();
+  } else if (depth_value > 0) {
+    // Unroll the above loop.
+    for (int i = 0; i < depth_value; i++) {
+      script_context = Add<HLoadNamedField>(
+          script_context, nullptr,
+          HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+    }
+  }
+  return script_context;
+}
+
+
+HInstruction* HGraphBuilder::BuildGetArrayFunction() {
+  HInstruction* native_context = BuildGetNativeContext();
+  HInstruction* index =
+      Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
+  return Add<HLoadKeyed>(native_context, index, nullptr, nullptr,
+                         FAST_ELEMENTS);
+}
+
+
+HValue* HGraphBuilder::BuildArrayBufferViewFieldAccessor(HValue* object,
+                                                         HValue* checked_object,
+                                                         FieldIndex index) {
+  NoObservableSideEffectsScope scope(this);
+  HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
+      index.offset(), Representation::Tagged());
+  HInstruction* buffer = Add<HLoadNamedField>(
+      object, checked_object, HObjectAccess::ForJSArrayBufferViewBuffer());
+  HInstruction* field = Add<HLoadNamedField>(object, checked_object, access);
+
+  HInstruction* flags = Add<HLoadNamedField>(
+      buffer, nullptr, HObjectAccess::ForJSArrayBufferBitField());
+  HValue* was_neutered_mask =
+      Add<HConstant>(1 << JSArrayBuffer::WasNeutered::kShift);
+  HValue* was_neutered_test =
+      AddUncasted<HBitwise>(Token::BIT_AND, flags, was_neutered_mask);
+
+  IfBuilder if_was_neutered(this);
+  if_was_neutered.If<HCompareNumericAndBranch>(
+      was_neutered_test, graph()->GetConstant0(), Token::NE);
+  if_was_neutered.Then();
+  Push(graph()->GetConstant0());
+  if_was_neutered.Else();
+  Push(field);
+  if_was_neutered.End();
+
+  return Pop();
+}
+
+
+HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
+    ElementsKind kind,
+    HValue* allocation_site_payload,
+    HValue* constructor_function,
+    AllocationSiteOverrideMode override_mode) :
+        builder_(builder),
+        kind_(kind),
+        allocation_site_payload_(allocation_site_payload),
+        constructor_function_(constructor_function) {
+  DCHECK(!allocation_site_payload->IsConstant() ||
+         HConstant::cast(allocation_site_payload)->handle(
+             builder_->isolate())->IsAllocationSite());
+  mode_ = override_mode == DISABLE_ALLOCATION_SITES
+      ? DONT_TRACK_ALLOCATION_SITE
+      : AllocationSite::GetMode(kind);
+}
+
+
+HGraphBuilder::JSArrayBuilder::JSArrayBuilder(HGraphBuilder* builder,
+                                              ElementsKind kind,
+                                              HValue* constructor_function) :
+    builder_(builder),
+    kind_(kind),
+    mode_(DONT_TRACK_ALLOCATION_SITE),
+    allocation_site_payload_(NULL),
+    constructor_function_(constructor_function) {
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
+  if (!builder()->top_info()->IsStub()) {
+    // A constant map is fine.
+    Handle<Map> map(builder()->isolate()->get_initial_js_array_map(kind_),
+                    builder()->isolate());
+    return builder()->Add<HConstant>(map);
+  }
+
+  if (constructor_function_ != NULL && kind_ == GetInitialFastElementsKind()) {
+    // No need for a context lookup if the kind_ matches the initial
+    // map, because we can just load the map in that case.
+    HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
+    return builder()->Add<HLoadNamedField>(constructor_function_, nullptr,
+                                           access);
+  }
+
+  // TODO(mvstanton): we should always have a constructor function if we
+  // are creating a stub.
+  HInstruction* native_context = constructor_function_ != NULL
+      ? builder()->BuildGetNativeContext(constructor_function_)
+      : builder()->BuildGetNativeContext();
+
+  HObjectAccess access =
+      HObjectAccess::ForContextSlot(Context::ArrayMapIndex(kind_));
+  return builder()->Add<HLoadNamedField>(native_context, nullptr, access);
+}
+
+
+HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
+  // Find the map near the constructor function
+  HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
+  return builder()->Add<HLoadNamedField>(constructor_function_, nullptr,
+                                         access);
+}
+
+
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateEmptyArray() {
+  HConstant* capacity = builder()->Add<HConstant>(initial_capacity());
+  return AllocateArray(capacity,
+                       capacity,
+                       builder()->graph()->GetConstant0());
+}
+
+
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
+    HValue* capacity,
+    HConstant* capacity_upper_bound,
+    HValue* length_field,
+    FillMode fill_mode) {
+  return AllocateArray(capacity,
+                       capacity_upper_bound->GetInteger32Constant(),
+                       length_field,
+                       fill_mode);
+}
+
+
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
+    HValue* capacity,
+    int capacity_upper_bound,
+    HValue* length_field,
+    FillMode fill_mode) {
+  HConstant* elememts_size_upper_bound = capacity->IsInteger32Constant()
+      ? HConstant::cast(capacity)
+      : builder()->EstablishElementsAllocationSize(kind_, capacity_upper_bound);
+
+  HAllocate* array = AllocateArray(capacity, length_field, fill_mode);
+  if (!elements_location_->has_size_upper_bound()) {
+    elements_location_->set_size_upper_bound(elememts_size_upper_bound);
+  }
+  return array;
+}
+
+
+HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
+    HValue* capacity,
+    HValue* length_field,
+    FillMode fill_mode) {
+  // These HForceRepresentations are because we store these as fields in the
+  // objects we construct, and an int32-to-smi HChange could deopt. Accept
+  // the deopt possibility now, before allocation occurs.
+  capacity =
+      builder()->AddUncasted<HForceRepresentation>(capacity,
+                                                   Representation::Smi());
+  length_field =
+      builder()->AddUncasted<HForceRepresentation>(length_field,
+                                                   Representation::Smi());
+
+  // Generate size calculation code here in order to make it dominate
+  // the JSArray allocation.
+  HValue* elements_size =
+      builder()->BuildCalculateElementsSize(kind_, capacity);
+
+  // Bail out for large objects.
+  HValue* max_regular_heap_object_size =
+      builder()->Add<HConstant>(Page::kMaxRegularHeapObjectSize);
+  builder()->Add<HBoundsCheck>(elements_size, max_regular_heap_object_size);
+
+  // Allocate (dealing with failure appropriately)
+  HAllocate* array_object = builder()->AllocateJSArrayObject(mode_);
+
+  // Fill in the fields: map, properties, length
+  HValue* map;
+  if (allocation_site_payload_ == NULL) {
+    map = EmitInternalMapCode();
+  } else {
+    map = EmitMapCode();
+  }
+
+  builder()->BuildJSArrayHeader(array_object,
+                                map,
+                                NULL,  // set elements to empty fixed array
+                                mode_,
+                                kind_,
+                                allocation_site_payload_,
+                                length_field);
+
+  // Allocate and initialize the elements
+  elements_location_ = builder()->BuildAllocateElements(kind_, elements_size);
+
+  builder()->BuildInitializeElementsHeader(elements_location_, kind_, capacity);
+
+  // Set the elements
+  builder()->Add<HStoreNamedField>(
+      array_object, HObjectAccess::ForElementsPointer(), elements_location_);
+
+  if (fill_mode == FILL_WITH_HOLE) {
+    builder()->BuildFillElementsWithHole(elements_location_, kind_,
+                                         graph()->GetConstant0(), capacity);
+  }
+
+  return array_object;
+}
+
+
+HValue* HGraphBuilder::AddLoadJSBuiltin(int context_index) {
+  HValue* native_context = BuildGetNativeContext();
+  HObjectAccess function_access = HObjectAccess::ForContextSlot(context_index);
+  return Add<HLoadNamedField>(native_context, nullptr, function_access);
+}
+
+
+HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
+    : HGraphBuilder(info),
+      function_state_(NULL),
+      initial_function_state_(this, info, NORMAL_RETURN, 0),
+      ast_context_(NULL),
+      break_scope_(NULL),
+      inlined_count_(0),
+      globals_(10, info->zone()),
+      osr_(new(info->zone()) HOsrBuilder(this)) {
+  // This is not initialized in the initializer list because the
+  // constructor for the initial state relies on function_state_ == NULL
+  // to know it's the initial state.
+  function_state_ = &initial_function_state_;
+  InitializeAstVisitor(info->isolate());
+  if (top_info()->is_tracking_positions()) {
+    SetSourcePosition(info->shared_info()->start_position());
+  }
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::CreateJoin(HBasicBlock* first,
+                                                HBasicBlock* second,
+                                                BailoutId join_id) {
+  if (first == NULL) {
+    return second;
+  } else if (second == NULL) {
+    return first;
+  } else {
+    HBasicBlock* join_block = graph()->CreateBasicBlock();
+    Goto(first, join_block);
+    Goto(second, join_block);
+    join_block->SetJoinId(join_id);
+    return join_block;
+  }
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::JoinContinue(IterationStatement* statement,
+                                                  HBasicBlock* exit_block,
+                                                  HBasicBlock* continue_block) {
+  if (continue_block != NULL) {
+    if (exit_block != NULL) Goto(exit_block, continue_block);
+    continue_block->SetJoinId(statement->ContinueId());
+    return continue_block;
+  }
+  return exit_block;
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::CreateLoop(IterationStatement* statement,
+                                                HBasicBlock* loop_entry,
+                                                HBasicBlock* body_exit,
+                                                HBasicBlock* loop_successor,
+                                                HBasicBlock* break_block) {
+  if (body_exit != NULL) Goto(body_exit, loop_entry);
+  loop_entry->PostProcessLoopHeader(statement);
+  if (break_block != NULL) {
+    if (loop_successor != NULL) Goto(loop_successor, break_block);
+    break_block->SetJoinId(statement->ExitId());
+    return break_block;
+  }
+  return loop_successor;
+}
+
+
+// Build a new loop header block and set it as the current block.
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry() {
+  HBasicBlock* loop_entry = CreateLoopHeaderBlock();
+  Goto(loop_entry);
+  set_current_block(loop_entry);
+  return loop_entry;
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::BuildLoopEntry(
+    IterationStatement* statement) {
+  HBasicBlock* loop_entry = osr()->HasOsrEntryAt(statement)
+      ? osr()->BuildOsrLoopEntry(statement)
+      : BuildLoopEntry();
+  return loop_entry;
+}
+
+
+void HBasicBlock::FinishExit(HControlInstruction* instruction,
+                             SourcePosition position) {
+  Finish(instruction, position);
+  ClearEnvironment();
+}
+
+
+std::ostream& operator<<(std::ostream& os, const HBasicBlock& b) {
+  return os << "B" << b.block_id();
+}
+
+
+HGraph::HGraph(CompilationInfo* info)
+    : isolate_(info->isolate()),
+      next_block_id_(0),
+      entry_block_(NULL),
+      blocks_(8, info->zone()),
+      values_(16, info->zone()),
+      phi_list_(NULL),
+      uint32_instructions_(NULL),
+      osr_(NULL),
+      info_(info),
+      zone_(info->zone()),
+      is_recursive_(false),
+      use_optimistic_licm_(false),
+      depends_on_empty_array_proto_elements_(false),
+      type_change_checksum_(0),
+      maximum_environment_size_(0),
+      no_side_effects_scope_count_(0),
+      disallow_adding_new_values_(false) {
+  if (info->IsStub()) {
+    CallInterfaceDescriptor descriptor =
+        info->code_stub()->GetCallInterfaceDescriptor();
+    start_environment_ =
+        new (zone_) HEnvironment(zone_, descriptor.GetRegisterParameterCount());
+  } else {
+    if (info->is_tracking_positions()) {
+      info->TraceInlinedFunction(info->shared_info(), SourcePosition::Unknown(),
+                                 InlinedFunctionInfo::kNoParentId);
+    }
+    start_environment_ =
+        new(zone_) HEnvironment(NULL, info->scope(), info->closure(), zone_);
+  }
+  start_environment_->set_ast_id(BailoutId::FunctionContext());
+  entry_block_ = CreateBasicBlock();
+  entry_block_->SetInitialEnvironment(start_environment_);
+}
+
+
+HBasicBlock* HGraph::CreateBasicBlock() {
+  HBasicBlock* result = new(zone()) HBasicBlock(this);
+  blocks_.Add(result, zone());
+  return result;
+}
+
+
+void HGraph::FinalizeUniqueness() {
+  DisallowHeapAllocation no_gc;
+  for (int i = 0; i < blocks()->length(); ++i) {
+    for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
+      it.Current()->FinalizeUniqueness();
+    }
+  }
+}
+
+
+int HGraph::SourcePositionToScriptPosition(SourcePosition pos) {
+  return (FLAG_hydrogen_track_positions && !pos.IsUnknown())
+             ? info()->start_position_for(pos.inlining_id()) + pos.position()
+             : pos.raw();
+}
+
+
+// Block ordering was implemented with two mutually recursive methods,
+// HGraph::Postorder and HGraph::PostorderLoopBlocks.
+// The recursion could lead to stack overflow so the algorithm has been
+// implemented iteratively.
+// At a high level the algorithm looks like this:
+//
+// Postorder(block, loop_header) : {
+//   if (block has already been visited or is of another loop) return;
+//   mark block as visited;
+//   if (block is a loop header) {
+//     VisitLoopMembers(block, loop_header);
+//     VisitSuccessorsOfLoopHeader(block);
+//   } else {
+//     VisitSuccessors(block)
+//   }
+//   put block in result list;
+// }
+//
+// VisitLoopMembers(block, outer_loop_header) {
+//   foreach (block b in block loop members) {
+//     VisitSuccessorsOfLoopMember(b, outer_loop_header);
+//     if (b is loop header) VisitLoopMembers(b);
+//   }
+// }
+//
+// VisitSuccessorsOfLoopMember(block, outer_loop_header) {
+//   foreach (block b in block successors) Postorder(b, outer_loop_header)
+// }
+//
+// VisitSuccessorsOfLoopHeader(block) {
+//   foreach (block b in block successors) Postorder(b, block)
+// }
+//
+// VisitSuccessors(block, loop_header) {
+//   foreach (block b in block successors) Postorder(b, loop_header)
+// }
+//
+// The ordering is started calling Postorder(entry, NULL).
+//
+// Each instance of PostorderProcessor represents the "stack frame" of the
+// recursion, and particularly keeps the state of the loop (iteration) of the
+// "Visit..." function it represents.
+// To recycle memory we keep all the frames in a double linked list but
+// this means that we cannot use constructors to initialize the frames.
+//
+class PostorderProcessor : public ZoneObject {
+ public:
+  // Back link (towards the stack bottom).
+  PostorderProcessor* parent() {return father_; }
+  // Forward link (towards the stack top).
+  PostorderProcessor* child() {return child_; }
+  HBasicBlock* block() { return block_; }
+  HLoopInformation* loop() { return loop_; }
+  HBasicBlock* loop_header() { return loop_header_; }
+
+  static PostorderProcessor* CreateEntryProcessor(Zone* zone,
+                                                  HBasicBlock* block) {
+    PostorderProcessor* result = new(zone) PostorderProcessor(NULL);
+    return result->SetupSuccessors(zone, block, NULL);
+  }
+
+  PostorderProcessor* PerformStep(Zone* zone,
+                                  ZoneList<HBasicBlock*>* order) {
+    PostorderProcessor* next =
+        PerformNonBacktrackingStep(zone, order);
+    if (next != NULL) {
+      return next;
+    } else {
+      return Backtrack(zone, order);
+    }
+  }
+
+ private:
+  explicit PostorderProcessor(PostorderProcessor* father)
+      : father_(father), child_(NULL), successor_iterator(NULL) { }
+
+  // Each enum value states the cycle whose state is kept by this instance.
+  enum LoopKind {
+    NONE,
+    SUCCESSORS,
+    SUCCESSORS_OF_LOOP_HEADER,
+    LOOP_MEMBERS,
+    SUCCESSORS_OF_LOOP_MEMBER
+  };
+
+  // Each "Setup..." method is like a constructor for a cycle state.
+  PostorderProcessor* SetupSuccessors(Zone* zone,
+                                      HBasicBlock* block,
+                                      HBasicBlock* loop_header) {
+    if (block == NULL || block->IsOrdered() ||
+        block->parent_loop_header() != loop_header) {
+      kind_ = NONE;
+      block_ = NULL;
+      loop_ = NULL;
+      loop_header_ = NULL;
+      return this;
+    } else {
+      block_ = block;
+      loop_ = NULL;
+      block->MarkAsOrdered();
+
+      if (block->IsLoopHeader()) {
+        kind_ = SUCCESSORS_OF_LOOP_HEADER;
+        loop_header_ = block;
+        InitializeSuccessors();
+        PostorderProcessor* result = Push(zone);
+        return result->SetupLoopMembers(zone, block, block->loop_information(),
+                                        loop_header);
+      } else {
+        DCHECK(block->IsFinished());
+        kind_ = SUCCESSORS;
+        loop_header_ = loop_header;
+        InitializeSuccessors();
+        return this;
+      }
+    }
+  }
+
+  PostorderProcessor* SetupLoopMembers(Zone* zone,
+                                       HBasicBlock* block,
+                                       HLoopInformation* loop,
+                                       HBasicBlock* loop_header) {
+    kind_ = LOOP_MEMBERS;
+    block_ = block;
+    loop_ = loop;
+    loop_header_ = loop_header;
+    InitializeLoopMembers();
+    return this;
+  }
+
+  PostorderProcessor* SetupSuccessorsOfLoopMember(
+      HBasicBlock* block,
+      HLoopInformation* loop,
+      HBasicBlock* loop_header) {
+    kind_ = SUCCESSORS_OF_LOOP_MEMBER;
+    block_ = block;
+    loop_ = loop;
+    loop_header_ = loop_header;
+    InitializeSuccessors();
+    return this;
+  }
+
+  // This method "allocates" a new stack frame.
+  PostorderProcessor* Push(Zone* zone) {
+    if (child_ == NULL) {
+      child_ = new(zone) PostorderProcessor(this);
+    }
+    return child_;
+  }
+
+  void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
+    DCHECK(block_->end()->FirstSuccessor() == NULL ||
+           order->Contains(block_->end()->FirstSuccessor()) ||
+           block_->end()->FirstSuccessor()->IsLoopHeader());
+    DCHECK(block_->end()->SecondSuccessor() == NULL ||
+           order->Contains(block_->end()->SecondSuccessor()) ||
+           block_->end()->SecondSuccessor()->IsLoopHeader());
+    order->Add(block_, zone);
+  }
+
+  // This method is the basic block to walk up the stack.
+  PostorderProcessor* Pop(Zone* zone,
+                          ZoneList<HBasicBlock*>* order) {
+    switch (kind_) {
+      case SUCCESSORS:
+      case SUCCESSORS_OF_LOOP_HEADER:
+        ClosePostorder(order, zone);
+        return father_;
+      case LOOP_MEMBERS:
+        return father_;
+      case SUCCESSORS_OF_LOOP_MEMBER:
+        if (block()->IsLoopHeader() && block() != loop_->loop_header()) {
+          // In this case we need to perform a LOOP_MEMBERS cycle so we
+          // initialize it and return this instead of father.
+          return SetupLoopMembers(zone, block(),
+                                  block()->loop_information(), loop_header_);
+        } else {
+          return father_;
+        }
+      case NONE:
+        return father_;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+  // Walks up the stack.
+  PostorderProcessor* Backtrack(Zone* zone,
+                                ZoneList<HBasicBlock*>* order) {
+    PostorderProcessor* parent = Pop(zone, order);
+    while (parent != NULL) {
+      PostorderProcessor* next =
+          parent->PerformNonBacktrackingStep(zone, order);
+      if (next != NULL) {
+        return next;
+      } else {
+        parent = parent->Pop(zone, order);
+      }
+    }
+    return NULL;
+  }
+
+  PostorderProcessor* PerformNonBacktrackingStep(
+      Zone* zone,
+      ZoneList<HBasicBlock*>* order) {
+    HBasicBlock* next_block;
+    switch (kind_) {
+      case SUCCESSORS:
+        next_block = AdvanceSuccessors();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessors(zone, next_block, loop_header_);
+        }
+        break;
+      case SUCCESSORS_OF_LOOP_HEADER:
+        next_block = AdvanceSuccessors();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessors(zone, next_block, block());
+        }
+        break;
+      case LOOP_MEMBERS:
+        next_block = AdvanceLoopMembers();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessorsOfLoopMember(next_block,
+                                                     loop_, loop_header_);
+        }
+        break;
+      case SUCCESSORS_OF_LOOP_MEMBER:
+        next_block = AdvanceSuccessors();
+        if (next_block != NULL) {
+          PostorderProcessor* result = Push(zone);
+          return result->SetupSuccessors(zone, next_block, loop_header_);
+        }
+        break;
+      case NONE:
+        return NULL;
+    }
+    return NULL;
+  }
+
+  // The following two methods implement a "foreach b in successors" cycle.
+  void InitializeSuccessors() {
+    loop_index = 0;
+    loop_length = 0;
+    successor_iterator = HSuccessorIterator(block_->end());
+  }
+
+  HBasicBlock* AdvanceSuccessors() {
+    if (!successor_iterator.Done()) {
+      HBasicBlock* result = successor_iterator.Current();
+      successor_iterator.Advance();
+      return result;
+    }
+    return NULL;
+  }
+
+  // The following two methods implement a "foreach b in loop members" cycle.
+  void InitializeLoopMembers() {
+    loop_index = 0;
+    loop_length = loop_->blocks()->length();
+  }
+
+  HBasicBlock* AdvanceLoopMembers() {
+    if (loop_index < loop_length) {
+      HBasicBlock* result = loop_->blocks()->at(loop_index);
+      loop_index++;
+      return result;
+    } else {
+      return NULL;
+    }
+  }
+
+  LoopKind kind_;
+  PostorderProcessor* father_;
+  PostorderProcessor* child_;
+  HLoopInformation* loop_;
+  HBasicBlock* block_;
+  HBasicBlock* loop_header_;
+  int loop_index;
+  int loop_length;
+  HSuccessorIterator successor_iterator;
+};
+
+
+void HGraph::OrderBlocks() {
+  CompilationPhase phase("H_Block ordering", info());
+
+#ifdef DEBUG
+  // Initially the blocks must not be ordered.
+  for (int i = 0; i < blocks_.length(); ++i) {
+    DCHECK(!blocks_[i]->IsOrdered());
+  }
+#endif
+
+  PostorderProcessor* postorder =
+      PostorderProcessor::CreateEntryProcessor(zone(), blocks_[0]);
+  blocks_.Rewind(0);
+  while (postorder) {
+    postorder = postorder->PerformStep(zone(), &blocks_);
+  }
+
+#ifdef DEBUG
+  // Now all blocks must be marked as ordered.
+  for (int i = 0; i < blocks_.length(); ++i) {
+    DCHECK(blocks_[i]->IsOrdered());
+  }
+#endif
+
+  // Reverse block list and assign block IDs.
+  for (int i = 0, j = blocks_.length(); --j >= i; ++i) {
+    HBasicBlock* bi = blocks_[i];
+    HBasicBlock* bj = blocks_[j];
+    bi->set_block_id(j);
+    bj->set_block_id(i);
+    blocks_[i] = bj;
+    blocks_[j] = bi;
+  }
+}
+
+
+void HGraph::AssignDominators() {
+  HPhase phase("H_Assign dominators", this);
+  for (int i = 0; i < blocks_.length(); ++i) {
+    HBasicBlock* block = blocks_[i];
+    if (block->IsLoopHeader()) {
+      // Only the first predecessor of a loop header is from outside the loop.
+      // All others are back edges, and thus cannot dominate the loop header.
+      block->AssignCommonDominator(block->predecessors()->first());
+      block->AssignLoopSuccessorDominators();
+    } else {
+      for (int j = blocks_[i]->predecessors()->length() - 1; j >= 0; --j) {
+        blocks_[i]->AssignCommonDominator(blocks_[i]->predecessors()->at(j));
+      }
+    }
+  }
+}
+
+
+bool HGraph::CheckArgumentsPhiUses() {
+  int block_count = blocks_.length();
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      // We don't support phi uses of arguments for now.
+      if (phi->CheckFlag(HValue::kIsArguments)) return false;
+    }
+  }
+  return true;
+}
+
+
+bool HGraph::CheckConstPhiUses() {
+  int block_count = blocks_.length();
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      // Check for the hole value (from an uninitialized const).
+      for (int k = 0; k < phi->OperandCount(); k++) {
+        if (phi->OperandAt(k) == GetConstantHole()) return false;
+      }
+    }
+  }
+  return true;
+}
+
+
+void HGraph::CollectPhis() {
+  int block_count = blocks_.length();
+  phi_list_ = new(zone()) ZoneList<HPhi*>(block_count, zone());
+  for (int i = 0; i < block_count; ++i) {
+    for (int j = 0; j < blocks_[i]->phis()->length(); ++j) {
+      HPhi* phi = blocks_[i]->phis()->at(j);
+      phi_list_->Add(phi, zone());
+    }
+  }
+}
+
+
+// Implementation of utility class to encapsulate the translation state for
+// a (possibly inlined) function.
+FunctionState::FunctionState(HOptimizedGraphBuilder* owner,
+                             CompilationInfo* info, InliningKind inlining_kind,
+                             int inlining_id)
+    : owner_(owner),
+      compilation_info_(info),
+      call_context_(NULL),
+      inlining_kind_(inlining_kind),
+      function_return_(NULL),
+      test_context_(NULL),
+      entry_(NULL),
+      arguments_object_(NULL),
+      arguments_elements_(NULL),
+      inlining_id_(inlining_id),
+      outer_source_position_(SourcePosition::Unknown()),
+      outer_(owner->function_state()) {
+  if (outer_ != NULL) {
+    // State for an inline function.
+    if (owner->ast_context()->IsTest()) {
+      HBasicBlock* if_true = owner->graph()->CreateBasicBlock();
+      HBasicBlock* if_false = owner->graph()->CreateBasicBlock();
+      if_true->MarkAsInlineReturnTarget(owner->current_block());
+      if_false->MarkAsInlineReturnTarget(owner->current_block());
+      TestContext* outer_test_context = TestContext::cast(owner->ast_context());
+      Expression* cond = outer_test_context->condition();
+      // The AstContext constructor pushed on the context stack.  This newed
+      // instance is the reason that AstContext can't be BASE_EMBEDDED.
+      test_context_ = new TestContext(owner, cond, if_true, if_false);
+    } else {
+      function_return_ = owner->graph()->CreateBasicBlock();
+      function_return()->MarkAsInlineReturnTarget(owner->current_block());
+    }
+    // Set this after possibly allocating a new TestContext above.
+    call_context_ = owner->ast_context();
+  }
+
+  // Push on the state stack.
+  owner->set_function_state(this);
+
+  if (compilation_info_->is_tracking_positions()) {
+    outer_source_position_ = owner->source_position();
+    owner->EnterInlinedSource(
+      info->shared_info()->start_position(),
+      inlining_id);
+    owner->SetSourcePosition(info->shared_info()->start_position());
+  }
+}
+
+
+FunctionState::~FunctionState() {
+  delete test_context_;
+  owner_->set_function_state(outer_);
+
+  if (compilation_info_->is_tracking_positions()) {
+    owner_->set_source_position(outer_source_position_);
+    owner_->EnterInlinedSource(
+      outer_->compilation_info()->shared_info()->start_position(),
+      outer_->inlining_id());
+  }
+}
+
+
+// Implementation of utility classes to represent an expression's context in
+// the AST.
+AstContext::AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind)
+    : owner_(owner),
+      kind_(kind),
+      outer_(owner->ast_context()),
+      typeof_mode_(NOT_INSIDE_TYPEOF) {
+  owner->set_ast_context(this);  // Push.
+#ifdef DEBUG
+  DCHECK(owner->environment()->frame_type() == JS_FUNCTION);
+  original_length_ = owner->environment()->length();
+#endif
+}
+
+
+AstContext::~AstContext() {
+  owner_->set_ast_context(outer_);  // Pop.
+}
+
+
+EffectContext::~EffectContext() {
+  DCHECK(owner()->HasStackOverflow() ||
+         owner()->current_block() == NULL ||
+         (owner()->environment()->length() == original_length_ &&
+          owner()->environment()->frame_type() == JS_FUNCTION));
+}
+
+
+ValueContext::~ValueContext() {
+  DCHECK(owner()->HasStackOverflow() ||
+         owner()->current_block() == NULL ||
+         (owner()->environment()->length() == original_length_ + 1 &&
+          owner()->environment()->frame_type() == JS_FUNCTION));
+}
+
+
+void EffectContext::ReturnValue(HValue* value) {
+  // The value is simply ignored.
+}
+
+
+void ValueContext::ReturnValue(HValue* value) {
+  // The value is tracked in the bailout environment, and communicated
+  // through the environment as the result of the expression.
+  if (value->CheckFlag(HValue::kIsArguments)) {
+    if (flag_ == ARGUMENTS_FAKED) {
+      value = owner()->graph()->GetConstantUndefined();
+    } else if (!arguments_allowed()) {
+      owner()->Bailout(kBadValueContextForArgumentsValue);
+    }
+  }
+  owner()->Push(value);
+}
+
+
+void TestContext::ReturnValue(HValue* value) {
+  BuildBranch(value);
+}
+
+
+void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
+  DCHECK(!instr->IsControlInstruction());
+  owner()->AddInstruction(instr);
+  if (instr->HasObservableSideEffects()) {
+    owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+  }
+}
+
+
+void EffectContext::ReturnControl(HControlInstruction* instr,
+                                  BailoutId ast_id) {
+  DCHECK(!instr->HasObservableSideEffects());
+  HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+  HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+  instr->SetSuccessorAt(0, empty_true);
+  instr->SetSuccessorAt(1, empty_false);
+  owner()->FinishCurrentBlock(instr);
+  HBasicBlock* join = owner()->CreateJoin(empty_true, empty_false, ast_id);
+  owner()->set_current_block(join);
+}
+
+
+void EffectContext::ReturnContinuation(HIfContinuation* continuation,
+                                       BailoutId ast_id) {
+  HBasicBlock* true_branch = NULL;
+  HBasicBlock* false_branch = NULL;
+  continuation->Continue(&true_branch, &false_branch);
+  if (!continuation->IsTrueReachable()) {
+    owner()->set_current_block(false_branch);
+  } else if (!continuation->IsFalseReachable()) {
+    owner()->set_current_block(true_branch);
+  } else {
+    HBasicBlock* join = owner()->CreateJoin(true_branch, false_branch, ast_id);
+    owner()->set_current_block(join);
+  }
+}
+
+
+void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
+  DCHECK(!instr->IsControlInstruction());
+  if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+    return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
+  }
+  owner()->AddInstruction(instr);
+  owner()->Push(instr);
+  if (instr->HasObservableSideEffects()) {
+    owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+  }
+}
+
+
+void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
+  DCHECK(!instr->HasObservableSideEffects());
+  if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
+    return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
+  }
+  HBasicBlock* materialize_false = owner()->graph()->CreateBasicBlock();
+  HBasicBlock* materialize_true = owner()->graph()->CreateBasicBlock();
+  instr->SetSuccessorAt(0, materialize_true);
+  instr->SetSuccessorAt(1, materialize_false);
+  owner()->FinishCurrentBlock(instr);
+  owner()->set_current_block(materialize_true);
+  owner()->Push(owner()->graph()->GetConstantTrue());
+  owner()->set_current_block(materialize_false);
+  owner()->Push(owner()->graph()->GetConstantFalse());
+  HBasicBlock* join =
+    owner()->CreateJoin(materialize_true, materialize_false, ast_id);
+  owner()->set_current_block(join);
+}
+
+
+void ValueContext::ReturnContinuation(HIfContinuation* continuation,
+                                      BailoutId ast_id) {
+  HBasicBlock* materialize_true = NULL;
+  HBasicBlock* materialize_false = NULL;
+  continuation->Continue(&materialize_true, &materialize_false);
+  if (continuation->IsTrueReachable()) {
+    owner()->set_current_block(materialize_true);
+    owner()->Push(owner()->graph()->GetConstantTrue());
+    owner()->set_current_block(materialize_true);
+  }
+  if (continuation->IsFalseReachable()) {
+    owner()->set_current_block(materialize_false);
+    owner()->Push(owner()->graph()->GetConstantFalse());
+    owner()->set_current_block(materialize_false);
+  }
+  if (continuation->TrueAndFalseReachable()) {
+    HBasicBlock* join =
+        owner()->CreateJoin(materialize_true, materialize_false, ast_id);
+    owner()->set_current_block(join);
+  }
+}
+
+
+void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
+  DCHECK(!instr->IsControlInstruction());
+  HOptimizedGraphBuilder* builder = owner();
+  builder->AddInstruction(instr);
+  // We expect a simulate after every expression with side effects, though
+  // this one isn't actually needed (and wouldn't work if it were targeted).
+  if (instr->HasObservableSideEffects()) {
+    builder->Push(instr);
+    builder->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+    builder->Pop();
+  }
+  BuildBranch(instr);
+}
+
+
+void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
+  DCHECK(!instr->HasObservableSideEffects());
+  HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
+  HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
+  instr->SetSuccessorAt(0, empty_true);
+  instr->SetSuccessorAt(1, empty_false);
+  owner()->FinishCurrentBlock(instr);
+  owner()->Goto(empty_true, if_true(), owner()->function_state());
+  owner()->Goto(empty_false, if_false(), owner()->function_state());
+  owner()->set_current_block(NULL);
+}
+
+
+void TestContext::ReturnContinuation(HIfContinuation* continuation,
+                                     BailoutId ast_id) {
+  HBasicBlock* true_branch = NULL;
+  HBasicBlock* false_branch = NULL;
+  continuation->Continue(&true_branch, &false_branch);
+  if (continuation->IsTrueReachable()) {
+    owner()->Goto(true_branch, if_true(), owner()->function_state());
+  }
+  if (continuation->IsFalseReachable()) {
+    owner()->Goto(false_branch, if_false(), owner()->function_state());
+  }
+  owner()->set_current_block(NULL);
+}
+
+
+void TestContext::BuildBranch(HValue* value) {
+  // We expect the graph to be in edge-split form: there is no edge that
+  // connects a branch node to a join node.  We conservatively ensure that
+  // property by always adding an empty block on the outgoing edges of this
+  // branch.
+  HOptimizedGraphBuilder* builder = owner();
+  if (value != NULL && value->CheckFlag(HValue::kIsArguments)) {
+    builder->Bailout(kArgumentsObjectValueInATestContext);
+  }
+  ToBooleanStub::Types expected(condition()->to_boolean_types());
+  ReturnControl(owner()->New<HBranch>(value, expected), BailoutId::None());
+}
+
+
+// HOptimizedGraphBuilder infrastructure for bailing out and checking bailouts.
+#define CHECK_BAILOUT(call)                     \
+  do {                                          \
+    call;                                       \
+    if (HasStackOverflow()) return;             \
+  } while (false)
+
+
+#define CHECK_ALIVE(call)                                       \
+  do {                                                          \
+    call;                                                       \
+    if (HasStackOverflow() || current_block() == NULL) return;  \
+  } while (false)
+
+
+#define CHECK_ALIVE_OR_RETURN(call, value)                            \
+  do {                                                                \
+    call;                                                             \
+    if (HasStackOverflow() || current_block() == NULL) return value;  \
+  } while (false)
+
+
+void HOptimizedGraphBuilder::Bailout(BailoutReason reason) {
+  current_info()->AbortOptimization(reason);
+  SetStackOverflow();
+}
+
+
+void HOptimizedGraphBuilder::VisitForEffect(Expression* expr) {
+  EffectContext for_effect(this);
+  Visit(expr);
+}
+
+
+void HOptimizedGraphBuilder::VisitForValue(Expression* expr,
+                                           ArgumentsAllowedFlag flag) {
+  ValueContext for_value(this, flag);
+  Visit(expr);
+}
+
+
+void HOptimizedGraphBuilder::VisitForTypeOf(Expression* expr) {
+  ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
+  for_value.set_typeof_mode(INSIDE_TYPEOF);
+  Visit(expr);
+}
+
+
+void HOptimizedGraphBuilder::VisitForControl(Expression* expr,
+                                             HBasicBlock* true_block,
+                                             HBasicBlock* false_block) {
+  TestContext for_control(this, expr, true_block, false_block);
+  Visit(expr);
+}
+
+
+void HOptimizedGraphBuilder::VisitExpressions(
+    ZoneList<Expression*>* exprs) {
+  for (int i = 0; i < exprs->length(); ++i) {
+    CHECK_ALIVE(VisitForValue(exprs->at(i)));
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs,
+                                              ArgumentsAllowedFlag flag) {
+  for (int i = 0; i < exprs->length(); ++i) {
+    CHECK_ALIVE(VisitForValue(exprs->at(i), flag));
+  }
+}
+
+
+bool HOptimizedGraphBuilder::BuildGraph() {
+  if (IsSubclassConstructor(current_info()->literal()->kind())) {
+    Bailout(kSuperReference);
+    return false;
+  }
+
+  Scope* scope = current_info()->scope();
+  SetUpScope(scope);
+
+  // Add an edge to the body entry.  This is warty: the graph's start
+  // environment will be used by the Lithium translation as the initial
+  // environment on graph entry, but it has now been mutated by the
+  // Hydrogen translation of the instructions in the start block.  This
+  // environment uses values which have not been defined yet.  These
+  // Hydrogen instructions will then be replayed by the Lithium
+  // translation, so they cannot have an environment effect.  The edge to
+  // the body's entry block (along with some special logic for the start
+  // block in HInstruction::InsertAfter) seals the start block from
+  // getting unwanted instructions inserted.
+  //
+  // TODO(kmillikin): Fix this.  Stop mutating the initial environment.
+  // Make the Hydrogen instructions in the initial block into Hydrogen
+  // values (but not instructions), present in the initial environment and
+  // not replayed by the Lithium translation.
+  HEnvironment* initial_env = environment()->CopyWithoutHistory();
+  HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+  Goto(body_entry);
+  body_entry->SetJoinId(BailoutId::FunctionEntry());
+  set_current_block(body_entry);
+
+  VisitDeclarations(scope->declarations());
+  Add<HSimulate>(BailoutId::Declarations());
+
+  Add<HStackCheck>(HStackCheck::kFunctionEntry);
+
+  VisitStatements(current_info()->literal()->body());
+  if (HasStackOverflow()) return false;
+
+  if (current_block() != NULL) {
+    Add<HReturn>(graph()->GetConstantUndefined());
+    set_current_block(NULL);
+  }
+
+  // If the checksum of the number of type info changes is the same as the
+  // last time this function was compiled, then this recompile is likely not
+  // due to missing/inadequate type feedback, but rather too aggressive
+  // optimization. Disable optimistic LICM in that case.
+  Handle<Code> unoptimized_code(current_info()->shared_info()->code());
+  DCHECK(unoptimized_code->kind() == Code::FUNCTION);
+  Handle<TypeFeedbackInfo> type_info(
+      TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+  int checksum = type_info->own_type_change_checksum();
+  int composite_checksum = graph()->update_type_change_checksum(checksum);
+  graph()->set_use_optimistic_licm(
+      !type_info->matches_inlined_type_change_checksum(composite_checksum));
+  type_info->set_inlined_type_change_checksum(composite_checksum);
+
+  // Perform any necessary OSR-specific cleanups or changes to the graph.
+  osr()->FinishGraph();
+
+  return true;
+}
+
+
+bool HGraph::Optimize(BailoutReason* bailout_reason) {
+  OrderBlocks();
+  AssignDominators();
+
+  // We need to create a HConstant "zero" now so that GVN will fold every
+  // zero-valued constant in the graph together.
+  // The constant is needed to make idef-based bounds check work: the pass
+  // evaluates relations with "zero" and that zero cannot be created after GVN.
+  GetConstant0();
+
+#ifdef DEBUG
+  // Do a full verify after building the graph and computing dominators.
+  Verify(true);
+#endif
+
+  if (FLAG_analyze_environment_liveness && maximum_environment_size() != 0) {
+    Run<HEnvironmentLivenessAnalysisPhase>();
+  }
+
+  if (!CheckConstPhiUses()) {
+    *bailout_reason = kUnsupportedPhiUseOfConstVariable;
+    return false;
+  }
+  Run<HRedundantPhiEliminationPhase>();
+  if (!CheckArgumentsPhiUses()) {
+    *bailout_reason = kUnsupportedPhiUseOfArguments;
+    return false;
+  }
+
+  // Find and mark unreachable code to simplify optimizations, especially gvn,
+  // where unreachable code could unnecessarily defeat LICM.
+  Run<HMarkUnreachableBlocksPhase>();
+
+  if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
+  if (FLAG_use_escape_analysis) Run<HEscapeAnalysisPhase>();
+
+  if (FLAG_load_elimination) Run<HLoadEliminationPhase>();
+
+  CollectPhis();
+
+  if (has_osr()) osr()->FinishOsrValues();
+
+  Run<HInferRepresentationPhase>();
+
+  // Remove HSimulate instructions that have turned out not to be needed
+  // after all by folding them into the following HSimulate.
+  // This must happen after inferring representations.
+  Run<HMergeRemovableSimulatesPhase>();
+
+  Run<HMarkDeoptimizeOnUndefinedPhase>();
+  Run<HRepresentationChangesPhase>();
+
+  Run<HInferTypesPhase>();
+
+  // Must be performed before canonicalization to ensure that Canonicalize
+  // will not remove semantically meaningful ToInt32 operations e.g. BIT_OR with
+  // zero.
+  Run<HUint32AnalysisPhase>();
+
+  if (FLAG_use_canonicalizing) Run<HCanonicalizePhase>();
+
+  if (FLAG_use_gvn) Run<HGlobalValueNumberingPhase>();
+
+  if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
+
+  if (FLAG_store_elimination) Run<HStoreEliminationPhase>();
+
+  Run<HRangeAnalysisPhase>();
+
+  Run<HComputeChangeUndefinedToNaN>();
+
+  // Eliminate redundant stack checks on backwards branches.
+  Run<HStackCheckEliminationPhase>();
+
+  if (FLAG_array_bounds_checks_elimination) Run<HBoundsCheckEliminationPhase>();
+  if (FLAG_array_bounds_checks_hoisting) Run<HBoundsCheckHoistingPhase>();
+  if (FLAG_array_index_dehoisting) Run<HDehoistIndexComputationsPhase>();
+  if (FLAG_dead_code_elimination) Run<HDeadCodeEliminationPhase>();
+
+  RestoreActualValues();
+
+  // Find unreachable code a second time, GVN and other optimizations may have
+  // made blocks unreachable that were previously reachable.
+  Run<HMarkUnreachableBlocksPhase>();
+
+  return true;
+}
+
+
+void HGraph::RestoreActualValues() {
+  HPhase phase("H_Restore actual values", this);
+
+  for (int block_index = 0; block_index < blocks()->length(); block_index++) {
+    HBasicBlock* block = blocks()->at(block_index);
+
+#ifdef DEBUG
+    for (int i = 0; i < block->phis()->length(); i++) {
+      HPhi* phi = block->phis()->at(i);
+      DCHECK(phi->ActualValue() == phi);
+    }
+#endif
+
+    for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
+      HInstruction* instruction = it.Current();
+      if (instruction->ActualValue() == instruction) continue;
+      if (instruction->CheckFlag(HValue::kIsDead)) {
+        // The instruction was marked as deleted but left in the graph
+        // as a control flow dependency point for subsequent
+        // instructions.
+        instruction->DeleteAndReplaceWith(instruction->ActualValue());
+      } else {
+        DCHECK(instruction->IsInformativeDefinition());
+        if (instruction->IsPurelyInformativeDefinition()) {
+          instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
+        } else {
+          instruction->ReplaceAllUsesWith(instruction->ActualValue());
+        }
+      }
+    }
+  }
+}
+
+
+void HOptimizedGraphBuilder::PushArgumentsFromEnvironment(int count) {
+  ZoneList<HValue*> arguments(count, zone());
+  for (int i = 0; i < count; ++i) {
+    arguments.Add(Pop(), zone());
+  }
+
+  HPushArguments* push_args = New<HPushArguments>();
+  while (!arguments.is_empty()) {
+    push_args->AddInput(arguments.RemoveLast());
+  }
+  AddInstruction(push_args);
+}
+
+
+template <class Instruction>
+HInstruction* HOptimizedGraphBuilder::PreProcessCall(Instruction* call) {
+  PushArgumentsFromEnvironment(call->argument_count());
+  return call;
+}
+
+
+void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
+  HEnvironment* prolog_env = environment();
+  int parameter_count = environment()->parameter_count();
+  ZoneList<HValue*> parameters(parameter_count, zone());
+  for (int i = 0; i < parameter_count; ++i) {
+    HInstruction* parameter = Add<HParameter>(static_cast<unsigned>(i));
+    parameters.Add(parameter, zone());
+    environment()->Bind(i, parameter);
+  }
+
+  HConstant* undefined_constant = graph()->GetConstantUndefined();
+  // Initialize specials and locals to undefined.
+  for (int i = parameter_count + 1; i < environment()->length(); ++i) {
+    environment()->Bind(i, undefined_constant);
+  }
+  Add<HPrologue>();
+
+  HEnvironment* initial_env = environment()->CopyWithoutHistory();
+  HBasicBlock* body_entry = CreateBasicBlock(initial_env);
+  GotoNoSimulate(body_entry);
+  set_current_block(body_entry);
+
+  // Initialize context of prolog environment to undefined.
+  prolog_env->BindContext(undefined_constant);
+
+  // First special is HContext.
+  HInstruction* context = Add<HContext>();
+  environment()->BindContext(context);
+
+  // Create an arguments object containing the initial parameters.  Set the
+  // initial values of parameters including "this" having parameter index 0.
+  DCHECK_EQ(scope->num_parameters() + 1, parameter_count);
+  HArgumentsObject* arguments_object = New<HArgumentsObject>(parameter_count);
+  for (int i = 0; i < parameter_count; ++i) {
+    HValue* parameter = parameters.at(i);
+    arguments_object->AddArgument(parameter, zone());
+  }
+
+  AddInstruction(arguments_object);
+  graph()->SetArgumentsObject(arguments_object);
+
+  // Handle the arguments and arguments shadow variables specially (they do
+  // not have declarations).
+  if (scope->arguments() != NULL) {
+    environment()->Bind(scope->arguments(), graph()->GetArgumentsObject());
+  }
+
+  int rest_index;
+  Variable* rest = scope->rest_parameter(&rest_index);
+  if (rest) {
+    return Bailout(kRestParameter);
+  }
+
+  if (scope->this_function_var() != nullptr ||
+      scope->new_target_var() != nullptr) {
+    return Bailout(kSuperReference);
+  }
+
+  // Trace the call.
+  if (FLAG_trace && top_info()->IsOptimizing()) {
+    Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kTraceEnter), 0);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
+  for (int i = 0; i < statements->length(); i++) {
+    Statement* stmt = statements->at(i);
+    CHECK_ALIVE(Visit(stmt));
+    if (stmt->IsJump()) break;
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+
+  Scope* outer_scope = scope();
+  Scope* scope = stmt->scope();
+  BreakAndContinueInfo break_info(stmt, outer_scope);
+
+  { BreakAndContinueScope push(&break_info, this);
+    if (scope != NULL) {
+      if (scope->NeedsContext()) {
+        // Load the function object.
+        Scope* declaration_scope = scope->DeclarationScope();
+        HInstruction* function;
+        HValue* outer_context = environment()->context();
+        if (declaration_scope->is_script_scope() ||
+            declaration_scope->is_eval_scope()) {
+          function = new (zone())
+              HLoadContextSlot(outer_context, Context::CLOSURE_INDEX,
+                               HLoadContextSlot::kNoCheck);
+        } else {
+          function = New<HThisFunction>();
+        }
+        AddInstruction(function);
+        // Allocate a block context and store it to the stack frame.
+        HInstruction* inner_context = Add<HAllocateBlockContext>(
+            outer_context, function, scope->GetScopeInfo(isolate()));
+        HInstruction* instr = Add<HStoreFrameContext>(inner_context);
+        set_scope(scope);
+        environment()->BindContext(inner_context);
+        if (instr->HasObservableSideEffects()) {
+          AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE);
+        }
+      }
+      VisitDeclarations(scope->declarations());
+      AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
+    }
+    CHECK_BAILOUT(VisitStatements(stmt->statements()));
+  }
+  set_scope(outer_scope);
+  if (scope != NULL && current_block() != NULL &&
+      scope->ContextLocalCount() > 0) {
+    HValue* inner_context = environment()->context();
+    HValue* outer_context = Add<HLoadNamedField>(
+        inner_context, nullptr,
+        HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+
+    HInstruction* instr = Add<HStoreFrameContext>(outer_context);
+    environment()->BindContext(outer_context);
+    if (instr->HasObservableSideEffects()) {
+      AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE);
+    }
+  }
+  HBasicBlock* break_block = break_info.break_block();
+  if (break_block != NULL) {
+    if (current_block() != NULL) Goto(break_block);
+    break_block->SetJoinId(stmt->ExitId());
+    set_current_block(break_block);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitExpressionStatement(
+    ExpressionStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  VisitForEffect(stmt->expression());
+}
+
+
+void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+}
+
+
+void HOptimizedGraphBuilder::VisitSloppyBlockFunctionStatement(
+    SloppyBlockFunctionStatement* stmt) {
+  Visit(stmt->statement());
+}
+
+
+void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (stmt->condition()->ToBooleanIsTrue()) {
+    Add<HSimulate>(stmt->ThenId());
+    Visit(stmt->then_statement());
+  } else if (stmt->condition()->ToBooleanIsFalse()) {
+    Add<HSimulate>(stmt->ElseId());
+    Visit(stmt->else_statement());
+  } else {
+    HBasicBlock* cond_true = graph()->CreateBasicBlock();
+    HBasicBlock* cond_false = graph()->CreateBasicBlock();
+    CHECK_BAILOUT(VisitForControl(stmt->condition(), cond_true, cond_false));
+
+    if (cond_true->HasPredecessor()) {
+      cond_true->SetJoinId(stmt->ThenId());
+      set_current_block(cond_true);
+      CHECK_BAILOUT(Visit(stmt->then_statement()));
+      cond_true = current_block();
+    } else {
+      cond_true = NULL;
+    }
+
+    if (cond_false->HasPredecessor()) {
+      cond_false->SetJoinId(stmt->ElseId());
+      set_current_block(cond_false);
+      CHECK_BAILOUT(Visit(stmt->else_statement()));
+      cond_false = current_block();
+    } else {
+      cond_false = NULL;
+    }
+
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, stmt->IfId());
+    set_current_block(join);
+  }
+}
+
+
+HBasicBlock* HOptimizedGraphBuilder::BreakAndContinueScope::Get(
+    BreakableStatement* stmt,
+    BreakType type,
+    Scope** scope,
+    int* drop_extra) {
+  *drop_extra = 0;
+  BreakAndContinueScope* current = this;
+  while (current != NULL && current->info()->target() != stmt) {
+    *drop_extra += current->info()->drop_extra();
+    current = current->next();
+  }
+  DCHECK(current != NULL);  // Always found (unless stack is malformed).
+  *scope = current->info()->scope();
+
+  if (type == BREAK) {
+    *drop_extra += current->info()->drop_extra();
+  }
+
+  HBasicBlock* block = NULL;
+  switch (type) {
+    case BREAK:
+      block = current->info()->break_block();
+      if (block == NULL) {
+        block = current->owner()->graph()->CreateBasicBlock();
+        current->info()->set_break_block(block);
+      }
+      break;
+
+    case CONTINUE:
+      block = current->info()->continue_block();
+      if (block == NULL) {
+        block = current->owner()->graph()->CreateBasicBlock();
+        current->info()->set_continue_block(block);
+      }
+      break;
+  }
+
+  return block;
+}
+
+
+void HOptimizedGraphBuilder::VisitContinueStatement(
+    ContinueStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  Scope* outer_scope = NULL;
+  Scope* inner_scope = scope();
+  int drop_extra = 0;
+  HBasicBlock* continue_block = break_scope()->Get(
+      stmt->target(), BreakAndContinueScope::CONTINUE,
+      &outer_scope, &drop_extra);
+  HValue* context = environment()->context();
+  Drop(drop_extra);
+  int context_pop_count = inner_scope->ContextChainLength(outer_scope);
+  if (context_pop_count > 0) {
+    while (context_pop_count-- > 0) {
+      HInstruction* context_instruction = Add<HLoadNamedField>(
+          context, nullptr,
+          HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+      context = context_instruction;
+    }
+    HInstruction* instr = Add<HStoreFrameContext>(context);
+    if (instr->HasObservableSideEffects()) {
+      AddSimulate(stmt->target()->EntryId(), REMOVABLE_SIMULATE);
+    }
+    environment()->BindContext(context);
+  }
+
+  Goto(continue_block);
+  set_current_block(NULL);
+}
+
+
+void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  Scope* outer_scope = NULL;
+  Scope* inner_scope = scope();
+  int drop_extra = 0;
+  HBasicBlock* break_block = break_scope()->Get(
+      stmt->target(), BreakAndContinueScope::BREAK,
+      &outer_scope, &drop_extra);
+  HValue* context = environment()->context();
+  Drop(drop_extra);
+  int context_pop_count = inner_scope->ContextChainLength(outer_scope);
+  if (context_pop_count > 0) {
+    while (context_pop_count-- > 0) {
+      HInstruction* context_instruction = Add<HLoadNamedField>(
+          context, nullptr,
+          HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+      context = context_instruction;
+    }
+    HInstruction* instr = Add<HStoreFrameContext>(context);
+    if (instr->HasObservableSideEffects()) {
+      AddSimulate(stmt->target()->ExitId(), REMOVABLE_SIMULATE);
+    }
+    environment()->BindContext(context);
+  }
+  Goto(break_block);
+  set_current_block(NULL);
+}
+
+
+void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  FunctionState* state = function_state();
+  AstContext* context = call_context();
+  if (context == NULL) {
+    // Not an inlined return, so an actual one.
+    CHECK_ALIVE(VisitForValue(stmt->expression()));
+    HValue* result = environment()->Pop();
+    Add<HReturn>(result);
+  } else if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
+    // Return from an inlined construct call. In a test context the return value
+    // will always evaluate to true, in a value context the return value needs
+    // to be a JSObject.
+    if (context->IsTest()) {
+      TestContext* test = TestContext::cast(context);
+      CHECK_ALIVE(VisitForEffect(stmt->expression()));
+      Goto(test->if_true(), state);
+    } else if (context->IsEffect()) {
+      CHECK_ALIVE(VisitForEffect(stmt->expression()));
+      Goto(function_return(), state);
+    } else {
+      DCHECK(context->IsValue());
+      CHECK_ALIVE(VisitForValue(stmt->expression()));
+      HValue* return_value = Pop();
+      HValue* receiver = environment()->arguments_environment()->Lookup(0);
+      HHasInstanceTypeAndBranch* typecheck =
+          New<HHasInstanceTypeAndBranch>(return_value,
+                                         FIRST_JS_RECEIVER_TYPE,
+                                         LAST_JS_RECEIVER_TYPE);
+      HBasicBlock* if_spec_object = graph()->CreateBasicBlock();
+      HBasicBlock* not_spec_object = graph()->CreateBasicBlock();
+      typecheck->SetSuccessorAt(0, if_spec_object);
+      typecheck->SetSuccessorAt(1, not_spec_object);
+      FinishCurrentBlock(typecheck);
+      AddLeaveInlined(if_spec_object, return_value, state);
+      AddLeaveInlined(not_spec_object, receiver, state);
+    }
+  } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
+    // Return from an inlined setter call. The returned value is never used, the
+    // value of an assignment is always the value of the RHS of the assignment.
+    CHECK_ALIVE(VisitForEffect(stmt->expression()));
+    if (context->IsTest()) {
+      HValue* rhs = environment()->arguments_environment()->Lookup(1);
+      context->ReturnValue(rhs);
+    } else if (context->IsEffect()) {
+      Goto(function_return(), state);
+    } else {
+      DCHECK(context->IsValue());
+      HValue* rhs = environment()->arguments_environment()->Lookup(1);
+      AddLeaveInlined(rhs, state);
+    }
+  } else {
+    // Return from a normal inlined function. Visit the subexpression in the
+    // expression context of the call.
+    if (context->IsTest()) {
+      TestContext* test = TestContext::cast(context);
+      VisitForControl(stmt->expression(), test->if_true(), test->if_false());
+    } else if (context->IsEffect()) {
+      // Visit in value context and ignore the result. This is needed to keep
+      // environment in sync with full-codegen since some visitors (e.g.
+      // VisitCountOperation) use the operand stack differently depending on
+      // context.
+      CHECK_ALIVE(VisitForValue(stmt->expression()));
+      Pop();
+      Goto(function_return(), state);
+    } else {
+      DCHECK(context->IsValue());
+      CHECK_ALIVE(VisitForValue(stmt->expression()));
+      AddLeaveInlined(Pop(), state);
+    }
+  }
+  set_current_block(NULL);
+}
+
+
+void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kWithStatement);
+}
+
+
+void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  int clause_count = clauses->length();
+  ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
+
+  CHECK_ALIVE(VisitForValue(stmt->tag()));
+  Add<HSimulate>(stmt->EntryId());
+  HValue* tag_value = Top();
+  Type* tag_type = stmt->tag()->bounds().lower;
+
+  // 1. Build all the tests, with dangling true branches
+  BailoutId default_id = BailoutId::None();
+  for (int i = 0; i < clause_count; ++i) {
+    CaseClause* clause = clauses->at(i);
+    if (clause->is_default()) {
+      body_blocks.Add(NULL, zone());
+      if (default_id.IsNone()) default_id = clause->EntryId();
+      continue;
+    }
+
+    // Generate a compare and branch.
+    CHECK_BAILOUT(VisitForValue(clause->label()));
+    if (current_block() == NULL) return Bailout(kUnsupportedSwitchStatement);
+    HValue* label_value = Pop();
+
+    Type* label_type = clause->label()->bounds().lower;
+    Type* combined_type = clause->compare_type();
+    HControlInstruction* compare = BuildCompareInstruction(
+        Token::EQ_STRICT, tag_value, label_value, tag_type, label_type,
+        combined_type,
+        ScriptPositionToSourcePosition(stmt->tag()->position()),
+        ScriptPositionToSourcePosition(clause->label()->position()),
+        PUSH_BEFORE_SIMULATE, clause->id());
+
+    HBasicBlock* next_test_block = graph()->CreateBasicBlock();
+    HBasicBlock* body_block = graph()->CreateBasicBlock();
+    body_blocks.Add(body_block, zone());
+    compare->SetSuccessorAt(0, body_block);
+    compare->SetSuccessorAt(1, next_test_block);
+    FinishCurrentBlock(compare);
+
+    set_current_block(body_block);
+    Drop(1);  // tag_value
+
+    set_current_block(next_test_block);
+  }
+
+  // Save the current block to use for the default or to join with the
+  // exit.
+  HBasicBlock* last_block = current_block();
+  Drop(1);  // tag_value
+
+  // 2. Loop over the clauses and the linked list of tests in lockstep,
+  // translating the clause bodies.
+  HBasicBlock* fall_through_block = NULL;
+
+  BreakAndContinueInfo break_info(stmt, scope());
+  { BreakAndContinueScope push(&break_info, this);
+    for (int i = 0; i < clause_count; ++i) {
+      CaseClause* clause = clauses->at(i);
+
+      // Identify the block where normal (non-fall-through) control flow
+      // goes to.
+      HBasicBlock* normal_block = NULL;
+      if (clause->is_default()) {
+        if (last_block == NULL) continue;
+        normal_block = last_block;
+        last_block = NULL;  // Cleared to indicate we've handled it.
+      } else {
+        normal_block = body_blocks[i];
+      }
+
+      if (fall_through_block == NULL) {
+        set_current_block(normal_block);
+      } else {
+        HBasicBlock* join = CreateJoin(fall_through_block,
+                                       normal_block,
+                                       clause->EntryId());
+        set_current_block(join);
+      }
+
+      CHECK_BAILOUT(VisitStatements(clause->statements()));
+      fall_through_block = current_block();
+    }
+  }
+
+  // Create an up-to-3-way join.  Use the break block if it exists since
+  // it's already a join block.
+  HBasicBlock* break_block = break_info.break_block();
+  if (break_block == NULL) {
+    set_current_block(CreateJoin(fall_through_block,
+                                 last_block,
+                                 stmt->ExitId()));
+  } else {
+    if (fall_through_block != NULL) Goto(fall_through_block, break_block);
+    if (last_block != NULL) Goto(last_block, break_block);
+    break_block->SetJoinId(stmt->ExitId());
+    set_current_block(break_block);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitLoopBody(IterationStatement* stmt,
+                                           HBasicBlock* loop_entry) {
+  Add<HSimulate>(stmt->StackCheckId());
+  HStackCheck* stack_check =
+      HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
+  DCHECK(loop_entry->IsLoopHeader());
+  loop_entry->loop_information()->set_stack_check(stack_check);
+  CHECK_BAILOUT(Visit(stmt->body()));
+}
+
+
+void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  DCHECK(current_block() != NULL);
+  HBasicBlock* loop_entry = BuildLoopEntry(stmt);
+
+  BreakAndContinueInfo break_info(stmt, scope());
+  {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+  }
+  HBasicBlock* body_exit =
+      JoinContinue(stmt, current_block(), break_info.continue_block());
+  HBasicBlock* loop_successor = NULL;
+  if (body_exit != NULL && !stmt->cond()->ToBooleanIsTrue()) {
+    set_current_block(body_exit);
+    loop_successor = graph()->CreateBasicBlock();
+    if (stmt->cond()->ToBooleanIsFalse()) {
+      loop_entry->loop_information()->stack_check()->Eliminate();
+      Goto(loop_successor);
+      body_exit = NULL;
+    } else {
+      // The block for a true condition, the actual predecessor block of the
+      // back edge.
+      body_exit = graph()->CreateBasicBlock();
+      CHECK_BAILOUT(VisitForControl(stmt->cond(), body_exit, loop_successor));
+    }
+    if (body_exit != NULL && body_exit->HasPredecessor()) {
+      body_exit->SetJoinId(stmt->BackEdgeId());
+    } else {
+      body_exit = NULL;
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
+  }
+  HBasicBlock* loop_exit = CreateLoop(stmt,
+                                      loop_entry,
+                                      body_exit,
+                                      loop_successor,
+                                      break_info.break_block());
+  set_current_block(loop_exit);
+}
+
+
+void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  DCHECK(current_block() != NULL);
+  HBasicBlock* loop_entry = BuildLoopEntry(stmt);
+
+  // If the condition is constant true, do not generate a branch.
+  HBasicBlock* loop_successor = NULL;
+  if (!stmt->cond()->ToBooleanIsTrue()) {
+    HBasicBlock* body_entry = graph()->CreateBasicBlock();
+    loop_successor = graph()->CreateBasicBlock();
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
+  }
+
+  BreakAndContinueInfo break_info(stmt, scope());
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+  }
+  HBasicBlock* body_exit =
+      JoinContinue(stmt, current_block(), break_info.continue_block());
+  HBasicBlock* loop_exit = CreateLoop(stmt,
+                                      loop_entry,
+                                      body_exit,
+                                      loop_successor,
+                                      break_info.break_block());
+  set_current_block(loop_exit);
+}
+
+
+void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (stmt->init() != NULL) {
+    CHECK_ALIVE(Visit(stmt->init()));
+  }
+  DCHECK(current_block() != NULL);
+  HBasicBlock* loop_entry = BuildLoopEntry(stmt);
+
+  HBasicBlock* loop_successor = NULL;
+  if (stmt->cond() != NULL) {
+    HBasicBlock* body_entry = graph()->CreateBasicBlock();
+    loop_successor = graph()->CreateBasicBlock();
+    CHECK_BAILOUT(VisitForControl(stmt->cond(), body_entry, loop_successor));
+    if (body_entry->HasPredecessor()) {
+      body_entry->SetJoinId(stmt->BodyId());
+      set_current_block(body_entry);
+    }
+    if (loop_successor->HasPredecessor()) {
+      loop_successor->SetJoinId(stmt->ExitId());
+    } else {
+      loop_successor = NULL;
+    }
+  }
+
+  BreakAndContinueInfo break_info(stmt, scope());
+  if (current_block() != NULL) {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+  }
+  HBasicBlock* body_exit =
+      JoinContinue(stmt, current_block(), break_info.continue_block());
+
+  if (stmt->next() != NULL && body_exit != NULL) {
+    set_current_block(body_exit);
+    CHECK_BAILOUT(Visit(stmt->next()));
+    body_exit = current_block();
+  }
+
+  HBasicBlock* loop_exit = CreateLoop(stmt,
+                                      loop_entry,
+                                      body_exit,
+                                      loop_successor,
+                                      break_info.break_block());
+  set_current_block(loop_exit);
+}
+
+
+void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+
+  if (!FLAG_optimize_for_in) {
+    return Bailout(kForInStatementOptimizationIsDisabled);
+  }
+
+  if (!stmt->each()->IsVariableProxy() ||
+      !stmt->each()->AsVariableProxy()->var()->IsStackLocal()) {
+    return Bailout(kForInStatementWithNonLocalEachVariable);
+  }
+
+  Variable* each_var = stmt->each()->AsVariableProxy()->var();
+
+  CHECK_ALIVE(VisitForValue(stmt->enumerable()));
+  HValue* enumerable = Top();  // Leave enumerable at the top.
+
+  IfBuilder if_undefined_or_null(this);
+  if_undefined_or_null.If<HCompareObjectEqAndBranch>(
+      enumerable, graph()->GetConstantUndefined());
+  if_undefined_or_null.Or();
+  if_undefined_or_null.If<HCompareObjectEqAndBranch>(
+      enumerable, graph()->GetConstantNull());
+  if_undefined_or_null.ThenDeopt(Deoptimizer::kUndefinedOrNullInForIn);
+  if_undefined_or_null.End();
+  BuildForInBody(stmt, each_var, enumerable);
+}
+
+
+void HOptimizedGraphBuilder::BuildForInBody(ForInStatement* stmt,
+                                            Variable* each_var,
+                                            HValue* enumerable) {
+  HInstruction* map;
+  HInstruction* array;
+  HInstruction* enum_length;
+  bool fast = stmt->for_in_type() == ForInStatement::FAST_FOR_IN;
+  if (fast) {
+    map = Add<HForInPrepareMap>(enumerable);
+    Add<HSimulate>(stmt->PrepareId());
+
+    array = Add<HForInCacheArray>(enumerable, map,
+                                  DescriptorArray::kEnumCacheBridgeCacheIndex);
+    enum_length = Add<HMapEnumLength>(map);
+
+    HInstruction* index_cache = Add<HForInCacheArray>(
+        enumerable, map, DescriptorArray::kEnumCacheBridgeIndicesCacheIndex);
+    HForInCacheArray::cast(array)
+        ->set_index_cache(HForInCacheArray::cast(index_cache));
+  } else {
+    Add<HSimulate>(stmt->PrepareId());
+    {
+      NoObservableSideEffectsScope no_effects(this);
+      BuildJSObjectCheck(enumerable, 0);
+    }
+    Add<HSimulate>(stmt->ToObjectId());
+
+    map = graph()->GetConstant1();
+    Runtime::FunctionId function_id = Runtime::kGetPropertyNamesFast;
+    Add<HPushArguments>(enumerable);
+    array = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
+    Push(array);
+    Add<HSimulate>(stmt->EnumId());
+    Drop(1);
+    Handle<Map> array_map = isolate()->factory()->fixed_array_map();
+    HValue* check = Add<HCheckMaps>(array, array_map);
+    enum_length = AddLoadFixedArrayLength(array, check);
+  }
+
+  HInstruction* start_index = Add<HConstant>(0);
+
+  Push(map);
+  Push(array);
+  Push(enum_length);
+  Push(start_index);
+
+  HBasicBlock* loop_entry = BuildLoopEntry(stmt);
+
+  // Reload the values to ensure we have up-to-date values inside of the loop.
+  // This is relevant especially for OSR where the values don't come from the
+  // computation above, but from the OSR entry block.
+  enumerable = environment()->ExpressionStackAt(4);
+  HValue* index = environment()->ExpressionStackAt(0);
+  HValue* limit = environment()->ExpressionStackAt(1);
+
+  // Check that we still have more keys.
+  HCompareNumericAndBranch* compare_index =
+      New<HCompareNumericAndBranch>(index, limit, Token::LT);
+  compare_index->set_observed_input_representation(
+      Representation::Smi(), Representation::Smi());
+
+  HBasicBlock* loop_body = graph()->CreateBasicBlock();
+  HBasicBlock* loop_successor = graph()->CreateBasicBlock();
+
+  compare_index->SetSuccessorAt(0, loop_body);
+  compare_index->SetSuccessorAt(1, loop_successor);
+  FinishCurrentBlock(compare_index);
+
+  set_current_block(loop_successor);
+  Drop(5);
+
+  set_current_block(loop_body);
+
+  HValue* key =
+      Add<HLoadKeyed>(environment()->ExpressionStackAt(2),  // Enum cache.
+                      index, index, nullptr, FAST_ELEMENTS);
+
+  if (fast) {
+    // Check if the expected map still matches that of the enumerable.
+    // If not just deoptimize.
+    Add<HCheckMapValue>(enumerable, environment()->ExpressionStackAt(3));
+    Bind(each_var, key);
+  } else {
+    Add<HPushArguments>(enumerable, key);
+    Runtime::FunctionId function_id = Runtime::kForInFilter;
+    key = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
+    Push(key);
+    Add<HSimulate>(stmt->FilterId());
+    key = Pop();
+    Bind(each_var, key);
+    IfBuilder if_undefined(this);
+    if_undefined.If<HCompareObjectEqAndBranch>(key,
+                                               graph()->GetConstantUndefined());
+    if_undefined.ThenDeopt(Deoptimizer::kUndefined);
+    if_undefined.End();
+    Add<HSimulate>(stmt->AssignmentId());
+  }
+
+  BreakAndContinueInfo break_info(stmt, scope(), 5);
+  {
+    BreakAndContinueScope push(&break_info, this);
+    CHECK_BAILOUT(VisitLoopBody(stmt, loop_entry));
+  }
+
+  HBasicBlock* body_exit =
+      JoinContinue(stmt, current_block(), break_info.continue_block());
+
+  if (body_exit != NULL) {
+    set_current_block(body_exit);
+
+    HValue* current_index = Pop();
+    Push(AddUncasted<HAdd>(current_index, graph()->GetConstant1()));
+    body_exit = current_block();
+  }
+
+  HBasicBlock* loop_exit = CreateLoop(stmt,
+                                      loop_entry,
+                                      body_exit,
+                                      loop_successor,
+                                      break_info.break_block());
+
+  set_current_block(loop_exit);
+}
+
+
+void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kForOfStatement);
+}
+
+
+void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kTryCatchStatement);
+}
+
+
+void HOptimizedGraphBuilder::VisitTryFinallyStatement(
+    TryFinallyStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kTryFinallyStatement);
+}
+
+
+void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kDebuggerStatement);
+}
+
+
+void HOptimizedGraphBuilder::VisitCaseClause(CaseClause* clause) {
+  UNREACHABLE();
+}
+
+
+void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  Handle<SharedFunctionInfo> shared_info = Compiler::GetSharedFunctionInfo(
+      expr, current_info()->script(), top_info());
+  // We also have a stack overflow if the recursive compilation did.
+  if (HasStackOverflow()) return;
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  HConstant* shared_info_value = Add<HConstant>(shared_info);
+  HInstruction* instr;
+  if (!expr->pretenure() && shared_info->num_literals() == 0) {
+    FastNewClosureStub stub(isolate(), shared_info->language_mode(),
+                            shared_info->kind());
+    FastNewClosureDescriptor descriptor(isolate());
+    HValue* values[] = {context(), shared_info_value};
+    HConstant* stub_value = Add<HConstant>(stub.GetCode());
+    instr = New<HCallWithDescriptor>(stub_value, 0, descriptor,
+                                     Vector<HValue*>(values, arraysize(values)),
+                                     NORMAL_CALL);
+  } else {
+    Add<HPushArguments>(shared_info_value);
+    Runtime::FunctionId function_id =
+        expr->pretenure() ? Runtime::kNewClosure_Tenured : Runtime::kNewClosure;
+    instr = New<HCallRuntime>(Runtime::FunctionForId(function_id), 1);
+  }
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::VisitClassLiteral(ClassLiteral* lit) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kClassLiteral);
+}
+
+
+void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
+    NativeFunctionLiteral* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kNativeFunctionLiteral);
+}
+
+
+void HOptimizedGraphBuilder::VisitDoExpression(DoExpression* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kDoExpression);
+}
+
+
+void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  HBasicBlock* cond_true = graph()->CreateBasicBlock();
+  HBasicBlock* cond_false = graph()->CreateBasicBlock();
+  CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
+
+  // Visit the true and false subexpressions in the same AST context as the
+  // whole expression.
+  if (cond_true->HasPredecessor()) {
+    cond_true->SetJoinId(expr->ThenId());
+    set_current_block(cond_true);
+    CHECK_BAILOUT(Visit(expr->then_expression()));
+    cond_true = current_block();
+  } else {
+    cond_true = NULL;
+  }
+
+  if (cond_false->HasPredecessor()) {
+    cond_false->SetJoinId(expr->ElseId());
+    set_current_block(cond_false);
+    CHECK_BAILOUT(Visit(expr->else_expression()));
+    cond_false = current_block();
+  } else {
+    cond_false = NULL;
+  }
+
+  if (!ast_context()->IsTest()) {
+    HBasicBlock* join = CreateJoin(cond_true, cond_false, expr->id());
+    set_current_block(join);
+    if (join != NULL && !ast_context()->IsEffect()) {
+      return ast_context()->ReturnValue(Pop());
+    }
+  }
+}
+
+
+HOptimizedGraphBuilder::GlobalPropertyAccess
+HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
+                                             PropertyAccessType access_type) {
+  if (var->is_this() || !current_info()->has_global_object()) {
+    return kUseGeneric;
+  }
+
+  switch (it->state()) {
+    case LookupIterator::ACCESSOR:
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::INTERCEPTOR:
+    case LookupIterator::INTEGER_INDEXED_EXOTIC:
+    case LookupIterator::NOT_FOUND:
+      return kUseGeneric;
+    case LookupIterator::DATA:
+      if (access_type == STORE && it->IsReadOnly()) return kUseGeneric;
+      return kUseCell;
+    case LookupIterator::JSPROXY:
+    case LookupIterator::TRANSITION:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return kUseGeneric;
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
+  DCHECK(var->IsContextSlot());
+  HValue* context = environment()->context();
+  int length = scope()->ContextChainLength(var->scope());
+  while (length-- > 0) {
+    context = Add<HLoadNamedField>(
+        context, nullptr,
+        HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
+  }
+  return context;
+}
+
+
+void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  Variable* variable = expr->var();
+  switch (variable->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      if (IsLexicalVariableMode(variable->mode())) {
+        // TODO(rossberg): should this be an DCHECK?
+        return Bailout(kReferenceToGlobalLexicalVariable);
+      }
+      // Handle known global constants like 'undefined' specially to avoid a
+      // load from a global cell for them.
+      Handle<Object> constant_value =
+          isolate()->factory()->GlobalConstantFor(variable->name());
+      if (!constant_value.is_null()) {
+        HConstant* instr = New<HConstant>(constant_value);
+        return ast_context()->ReturnInstruction(instr, expr->id());
+      }
+
+      Handle<JSGlobalObject> global(current_info()->global_object());
+
+      // Lookup in script contexts.
+      {
+        Handle<ScriptContextTable> script_contexts(
+            global->native_context()->script_context_table());
+        ScriptContextTable::LookupResult lookup;
+        if (ScriptContextTable::Lookup(script_contexts, variable->name(),
+                                       &lookup)) {
+          Handle<Context> script_context = ScriptContextTable::GetContext(
+              script_contexts, lookup.context_index);
+          Handle<Object> current_value =
+              FixedArray::get(script_context, lookup.slot_index);
+
+          // If the values is not the hole, it will stay initialized,
+          // so no need to generate a check.
+          if (*current_value == *isolate()->factory()->the_hole_value()) {
+            return Bailout(kReferenceToUninitializedVariable);
+          }
+          HInstruction* result = New<HLoadNamedField>(
+              Add<HConstant>(script_context), nullptr,
+              HObjectAccess::ForContextSlot(lookup.slot_index));
+          return ast_context()->ReturnInstruction(result, expr->id());
+        }
+      }
+
+      LookupIterator it(global, variable->name(), LookupIterator::OWN);
+      GlobalPropertyAccess type = LookupGlobalProperty(variable, &it, LOAD);
+
+      if (type == kUseCell) {
+        Handle<PropertyCell> cell = it.GetPropertyCell();
+        top_info()->dependencies()->AssumePropertyCell(cell);
+        auto cell_type = it.property_details().cell_type();
+        if (cell_type == PropertyCellType::kConstant ||
+            cell_type == PropertyCellType::kUndefined) {
+          Handle<Object> constant_object(cell->value(), isolate());
+          if (constant_object->IsConsString()) {
+            constant_object =
+                String::Flatten(Handle<String>::cast(constant_object));
+          }
+          HConstant* constant = New<HConstant>(constant_object);
+          return ast_context()->ReturnInstruction(constant, expr->id());
+        } else {
+          auto access = HObjectAccess::ForPropertyCellValue();
+          UniqueSet<Map>* field_maps = nullptr;
+          if (cell_type == PropertyCellType::kConstantType) {
+            switch (cell->GetConstantType()) {
+              case PropertyCellConstantType::kSmi:
+                access = access.WithRepresentation(Representation::Smi());
+                break;
+              case PropertyCellConstantType::kStableMap: {
+                // Check that the map really is stable. The heap object could
+                // have mutated without the cell updating state. In that case,
+                // make no promises about the loaded value except that it's a
+                // heap object.
+                access =
+                    access.WithRepresentation(Representation::HeapObject());
+                Handle<Map> map(HeapObject::cast(cell->value())->map());
+                if (map->is_stable()) {
+                  field_maps = new (zone())
+                      UniqueSet<Map>(Unique<Map>::CreateImmovable(map), zone());
+                }
+                break;
+              }
+            }
+          }
+          HConstant* cell_constant = Add<HConstant>(cell);
+          HLoadNamedField* instr;
+          if (field_maps == nullptr) {
+            instr = New<HLoadNamedField>(cell_constant, nullptr, access);
+          } else {
+            instr = New<HLoadNamedField>(cell_constant, nullptr, access,
+                                         field_maps, HType::HeapObject());
+          }
+          instr->ClearDependsOnFlag(kInobjectFields);
+          instr->SetDependsOnFlag(kGlobalVars);
+          return ast_context()->ReturnInstruction(instr, expr->id());
+        }
+      } else {
+        HValue* global_object = Add<HLoadNamedField>(
+            BuildGetNativeContext(), nullptr,
+            HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
+        HLoadGlobalGeneric* instr = New<HLoadGlobalGeneric>(
+            global_object, variable->name(), ast_context()->typeof_mode());
+        instr->SetVectorAndSlot(handle(current_feedback_vector(), isolate()),
+                                expr->VariableFeedbackSlot());
+        return ast_context()->ReturnInstruction(instr, expr->id());
+      }
+    }
+
+    case VariableLocation::PARAMETER:
+    case VariableLocation::LOCAL: {
+      HValue* value = LookupAndMakeLive(variable);
+      if (value == graph()->GetConstantHole()) {
+        DCHECK(IsDeclaredVariableMode(variable->mode()) &&
+               variable->mode() != VAR);
+        return Bailout(kReferenceToUninitializedVariable);
+      }
+      return ast_context()->ReturnValue(value);
+    }
+
+    case VariableLocation::CONTEXT: {
+      HValue* context = BuildContextChainWalk(variable);
+      HLoadContextSlot::Mode mode;
+      switch (variable->mode()) {
+        case LET:
+        case CONST:
+          mode = HLoadContextSlot::kCheckDeoptimize;
+          break;
+        case CONST_LEGACY:
+          mode = HLoadContextSlot::kCheckReturnUndefined;
+          break;
+        default:
+          mode = HLoadContextSlot::kNoCheck;
+          break;
+      }
+      HLoadContextSlot* instr =
+          new(zone()) HLoadContextSlot(context, variable->index(), mode);
+      return ast_context()->ReturnInstruction(instr, expr->id());
+    }
+
+    case VariableLocation::LOOKUP:
+      return Bailout(kReferenceToAVariableWhichRequiresDynamicLookup);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  HConstant* instr = New<HConstant>(expr->value());
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  Callable callable = CodeFactory::FastCloneRegExp(isolate());
+  HValue* values[] = {
+      context(), AddThisFunction(), Add<HConstant>(expr->literal_index()),
+      Add<HConstant>(expr->pattern()), Add<HConstant>(expr->flags())};
+  HConstant* stub_value = Add<HConstant>(callable.code());
+  HInstruction* instr = New<HCallWithDescriptor>(
+      stub_value, 0, callable.descriptor(),
+      Vector<HValue*>(values, arraysize(values)), NORMAL_CALL);
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+static bool CanInlinePropertyAccess(Handle<Map> map) {
+  if (map->instance_type() == HEAP_NUMBER_TYPE) return true;
+  if (map->instance_type() < FIRST_NONSTRING_TYPE) return true;
+  return map->IsJSObjectMap() && !map->is_dictionary_map() &&
+         !map->has_named_interceptor() &&
+         // TODO(verwaest): Whitelist contexts to which we have access.
+         !map->is_access_check_needed();
+}
+
+
+// Determines whether the given array or object literal boilerplate satisfies
+// all limits to be considered for fast deep-copying and computes the total
+// size of all objects that are part of the graph.
+static bool IsFastLiteral(Handle<JSObject> boilerplate,
+                          int max_depth,
+                          int* max_properties) {
+  if (boilerplate->map()->is_deprecated() &&
+      !JSObject::TryMigrateInstance(boilerplate)) {
+    return false;
+  }
+
+  DCHECK(max_depth >= 0 && *max_properties >= 0);
+  if (max_depth == 0) return false;
+
+  Isolate* isolate = boilerplate->GetIsolate();
+  Handle<FixedArrayBase> elements(boilerplate->elements());
+  if (elements->length() > 0 &&
+      elements->map() != isolate->heap()->fixed_cow_array_map()) {
+    if (boilerplate->HasFastSmiOrObjectElements()) {
+      Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+      int length = elements->length();
+      for (int i = 0; i < length; i++) {
+        if ((*max_properties)-- == 0) return false;
+        Handle<Object> value(fast_elements->get(i), isolate);
+        if (value->IsJSObject()) {
+          Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+          if (!IsFastLiteral(value_object,
+                             max_depth - 1,
+                             max_properties)) {
+            return false;
+          }
+        }
+      }
+    } else if (!boilerplate->HasFastDoubleElements()) {
+      return false;
+    }
+  }
+
+  Handle<FixedArray> properties(boilerplate->properties());
+  if (properties->length() > 0) {
+    return false;
+  } else {
+    Handle<DescriptorArray> descriptors(
+        boilerplate->map()->instance_descriptors());
+    int limit = boilerplate->map()->NumberOfOwnDescriptors();
+    for (int i = 0; i < limit; i++) {
+      PropertyDetails details = descriptors->GetDetails(i);
+      if (details.type() != DATA) continue;
+      if ((*max_properties)-- == 0) return false;
+      FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+      if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+      Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
+                           isolate);
+      if (value->IsJSObject()) {
+        Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+        if (!IsFastLiteral(value_object,
+                           max_depth - 1,
+                           max_properties)) {
+          return false;
+        }
+      }
+    }
+  }
+  return true;
+}
+
+
+void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+
+  Handle<JSFunction> closure = function_state()->compilation_info()->closure();
+  HInstruction* literal;
+
+  // Check whether to use fast or slow deep-copying for boilerplate.
+  int max_properties = kMaxFastLiteralProperties;
+  Handle<Object> literals_cell(
+      closure->literals()->literal(expr->literal_index()), isolate());
+  Handle<AllocationSite> site;
+  Handle<JSObject> boilerplate;
+  if (!literals_cell->IsUndefined()) {
+    // Retrieve the boilerplate
+    site = Handle<AllocationSite>::cast(literals_cell);
+    boilerplate = Handle<JSObject>(JSObject::cast(site->transition_info()),
+                                   isolate());
+  }
+
+  if (!boilerplate.is_null() &&
+      IsFastLiteral(boilerplate, kMaxFastLiteralDepth, &max_properties)) {
+    AllocationSiteUsageContext site_context(isolate(), site, false);
+    site_context.EnterNewScope();
+    literal = BuildFastLiteral(boilerplate, &site_context);
+    site_context.ExitScope(site, boilerplate);
+  } else {
+    NoObservableSideEffectsScope no_effects(this);
+    Handle<FixedArray> constant_properties = expr->constant_properties();
+    int literal_index = expr->literal_index();
+    int flags = expr->ComputeFlags(true);
+
+    Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
+                        Add<HConstant>(constant_properties),
+                        Add<HConstant>(flags));
+
+    Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
+    literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
+  }
+
+  // The object is expected in the bailout environment during computation
+  // of the property values and is the value of the entire expression.
+  Push(literal);
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->is_computed_name()) return Bailout(kComputedPropertyName);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key()->AsLiteral();
+    Expression* value = property->value();
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
+        // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        // It is safe to use [[Put]] here because the boilerplate already
+        // contains computed properties with an uninitialized value.
+        if (key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            CHECK_ALIVE(VisitForValue(value));
+            HValue* value = Pop();
+
+            Handle<Map> map = property->GetReceiverType();
+            Handle<String> name = key->AsPropertyName();
+            HValue* store;
+            FeedbackVectorSlot slot = property->GetSlot();
+            if (map.is_null()) {
+              // If we don't know the monomorphic type, do a generic store.
+              CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot, literal,
+                                                    name, value));
+            } else {
+              PropertyAccessInfo info(this, STORE, map, name);
+              if (info.CanAccessMonomorphic()) {
+                HValue* checked_literal = Add<HCheckMaps>(literal, map);
+                DCHECK(!info.IsAccessorConstant());
+                store = BuildMonomorphicAccess(
+                    &info, literal, checked_literal, value,
+                    BailoutId::None(), BailoutId::None());
+              } else {
+                CHECK_ALIVE(store = BuildNamedGeneric(STORE, NULL, slot,
+                                                      literal, name, value));
+              }
+            }
+            if (store->IsInstruction()) {
+              AddInstruction(HInstruction::cast(store));
+            }
+            DCHECK(store->HasObservableSideEffects());
+            Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
+
+            // Add [[HomeObject]] to function literals.
+            if (FunctionLiteral::NeedsHomeObject(property->value())) {
+              Handle<Symbol> sym = isolate()->factory()->home_object_symbol();
+              HInstruction* store_home = BuildNamedGeneric(
+                  STORE, NULL, property->GetSlot(1), value, sym, literal);
+              AddInstruction(store_home);
+              DCHECK(store_home->HasObservableSideEffects());
+              Add<HSimulate>(property->value()->id(), REMOVABLE_SIMULATE);
+            }
+          } else {
+            CHECK_ALIVE(VisitForEffect(value));
+          }
+          break;
+        }
+        // Fall through.
+      case ObjectLiteral::Property::PROTOTYPE:
+      case ObjectLiteral::Property::SETTER:
+      case ObjectLiteral::Property::GETTER:
+        return Bailout(kObjectLiteralWithComplexProperty);
+      default: UNREACHABLE();
+    }
+  }
+
+  if (expr->has_function()) {
+    // Return the result of the transformation to fast properties
+    // instead of the original since this operation changes the map
+    // of the object. This makes sure that the original object won't
+    // be used by other optimized code before it is transformed
+    // (e.g. because of code motion).
+    HToFastProperties* result = Add<HToFastProperties>(Pop());
+    return ast_context()->ReturnValue(result);
+  } else {
+    return ast_context()->ReturnValue(Pop());
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+  HInstruction* literal;
+
+  Handle<AllocationSite> site;
+  Handle<LiteralsArray> literals(environment()->closure()->literals(),
+                                 isolate());
+  bool uninitialized = false;
+  Handle<Object> literals_cell(literals->literal(expr->literal_index()),
+                               isolate());
+  Handle<JSObject> boilerplate_object;
+  if (literals_cell->IsUndefined()) {
+    uninitialized = true;
+    Handle<Object> raw_boilerplate;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        isolate(), raw_boilerplate,
+        Runtime::CreateArrayLiteralBoilerplate(
+            isolate(), literals, expr->constant_elements(),
+            is_strong(function_language_mode())),
+        Bailout(kArrayBoilerplateCreationFailed));
+
+    boilerplate_object = Handle<JSObject>::cast(raw_boilerplate);
+    AllocationSiteCreationContext creation_context(isolate());
+    site = creation_context.EnterNewScope();
+    if (JSObject::DeepWalk(boilerplate_object, &creation_context).is_null()) {
+      return Bailout(kArrayBoilerplateCreationFailed);
+    }
+    creation_context.ExitScope(site, boilerplate_object);
+    literals->set_literal(expr->literal_index(), *site);
+
+    if (boilerplate_object->elements()->map() ==
+        isolate()->heap()->fixed_cow_array_map()) {
+      isolate()->counters()->cow_arrays_created_runtime()->Increment();
+    }
+  } else {
+    DCHECK(literals_cell->IsAllocationSite());
+    site = Handle<AllocationSite>::cast(literals_cell);
+    boilerplate_object = Handle<JSObject>(
+        JSObject::cast(site->transition_info()), isolate());
+  }
+
+  DCHECK(!boilerplate_object.is_null());
+  DCHECK(site->SitePointsToLiteral());
+
+  ElementsKind boilerplate_elements_kind =
+      boilerplate_object->GetElementsKind();
+
+  // Check whether to use fast or slow deep-copying for boilerplate.
+  int max_properties = kMaxFastLiteralProperties;
+  if (IsFastLiteral(boilerplate_object,
+                    kMaxFastLiteralDepth,
+                    &max_properties)) {
+    AllocationSiteUsageContext site_context(isolate(), site, false);
+    site_context.EnterNewScope();
+    literal = BuildFastLiteral(boilerplate_object, &site_context);
+    site_context.ExitScope(site, boilerplate_object);
+  } else {
+    NoObservableSideEffectsScope no_effects(this);
+    // Boilerplate already exists and constant elements are never accessed,
+    // pass an empty fixed array to the runtime function instead.
+    Handle<FixedArray> constants = isolate()->factory()->empty_fixed_array();
+    int literal_index = expr->literal_index();
+    int flags = expr->ComputeFlags(true);
+
+    Add<HPushArguments>(AddThisFunction(), Add<HConstant>(literal_index),
+                        Add<HConstant>(constants), Add<HConstant>(flags));
+
+    Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
+    literal = Add<HCallRuntime>(Runtime::FunctionForId(function_id), 4);
+
+    // Register to deopt if the boilerplate ElementsKind changes.
+    top_info()->dependencies()->AssumeTransitionStable(site);
+  }
+
+  // The array is expected in the bailout environment during computation
+  // of the property values and is the value of the entire expression.
+  Push(literal);
+
+  HInstruction* elements = NULL;
+
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    if (subexpr->IsSpread()) {
+      return Bailout(kSpread);
+    }
+
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    CHECK_ALIVE(VisitForValue(subexpr));
+    HValue* value = Pop();
+    if (!Smi::IsValid(i)) return Bailout(kNonSmiKeyInArrayLiteral);
+
+    elements = AddLoadElements(literal);
+
+    HValue* key = Add<HConstant>(i);
+
+    switch (boilerplate_elements_kind) {
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS: {
+        HStoreKeyed* instr = Add<HStoreKeyed>(elements, key, value, nullptr,
+                                              boilerplate_elements_kind);
+        instr->SetUninitialized(uninitialized);
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+    }
+
+    Add<HSimulate>(expr->GetIdForElement(i));
+  }
+
+  return ast_context()->ReturnValue(Pop());
+}
+
+
+HCheckMaps* HOptimizedGraphBuilder::AddCheckMap(HValue* object,
+                                                Handle<Map> map) {
+  BuildCheckHeapObject(object);
+  return Add<HCheckMaps>(object, map);
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
+    PropertyAccessInfo* info,
+    HValue* checked_object) {
+  // See if this is a load for an immutable property
+  if (checked_object->ActualValue()->IsConstant()) {
+    Handle<Object> object(
+        HConstant::cast(checked_object->ActualValue())->handle(isolate()));
+
+    if (object->IsJSObject()) {
+      LookupIterator it(object, info->name(),
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
+      Handle<Object> value = JSReceiver::GetDataProperty(&it);
+      if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
+        return New<HConstant>(value);
+      }
+    }
+  }
+
+  HObjectAccess access = info->access();
+  if (access.representation().IsDouble() &&
+      (!FLAG_unbox_double_fields || !access.IsInobject())) {
+    // Load the heap number.
+    checked_object = Add<HLoadNamedField>(
+        checked_object, nullptr,
+        access.WithRepresentation(Representation::Tagged()));
+    // Load the double value from it.
+    access = HObjectAccess::ForHeapNumberValue();
+  }
+
+  SmallMapList* map_list = info->field_maps();
+  if (map_list->length() == 0) {
+    return New<HLoadNamedField>(checked_object, checked_object, access);
+  }
+
+  UniqueSet<Map>* maps = new(zone()) UniqueSet<Map>(map_list->length(), zone());
+  for (int i = 0; i < map_list->length(); ++i) {
+    maps->Add(Unique<Map>::CreateImmovable(map_list->at(i)), zone());
+  }
+  return New<HLoadNamedField>(
+      checked_object, checked_object, access, maps, info->field_type());
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
+    PropertyAccessInfo* info,
+    HValue* checked_object,
+    HValue* value) {
+  bool transition_to_field = info->IsTransition();
+  // TODO(verwaest): Move this logic into PropertyAccessInfo.
+  HObjectAccess field_access = info->access();
+
+  HStoreNamedField *instr;
+  if (field_access.representation().IsDouble() &&
+      (!FLAG_unbox_double_fields || !field_access.IsInobject())) {
+    HObjectAccess heap_number_access =
+        field_access.WithRepresentation(Representation::Tagged());
+    if (transition_to_field) {
+      // The store requires a mutable HeapNumber to be allocated.
+      NoObservableSideEffectsScope no_side_effects(this);
+      HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
+
+      // TODO(hpayer): Allocation site pretenuring support.
+      HInstruction* heap_number = Add<HAllocate>(heap_number_size,
+          HType::HeapObject(),
+          NOT_TENURED,
+          MUTABLE_HEAP_NUMBER_TYPE);
+      AddStoreMapConstant(
+          heap_number, isolate()->factory()->mutable_heap_number_map());
+      Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
+                            value);
+      instr = New<HStoreNamedField>(checked_object->ActualValue(),
+                                    heap_number_access,
+                                    heap_number);
+    } else {
+      // Already holds a HeapNumber; load the box and write its value field.
+      HInstruction* heap_number =
+          Add<HLoadNamedField>(checked_object, nullptr, heap_number_access);
+      instr = New<HStoreNamedField>(heap_number,
+                                    HObjectAccess::ForHeapNumberValue(),
+                                    value, STORE_TO_INITIALIZED_ENTRY);
+    }
+  } else {
+    if (field_access.representation().IsHeapObject()) {
+      BuildCheckHeapObject(value);
+    }
+
+    if (!info->field_maps()->is_empty()) {
+      DCHECK(field_access.representation().IsHeapObject());
+      value = Add<HCheckMaps>(value, info->field_maps());
+    }
+
+    // This is a normal store.
+    instr = New<HStoreNamedField>(
+        checked_object->ActualValue(), field_access, value,
+        transition_to_field ? INITIALIZING_STORE : STORE_TO_INITIALIZED_ENTRY);
+  }
+
+  if (transition_to_field) {
+    Handle<Map> transition(info->transition());
+    DCHECK(!transition->is_deprecated());
+    instr->SetTransition(Add<HConstant>(transition));
+  }
+  return instr;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsCompatible(
+    PropertyAccessInfo* info) {
+  if (!CanInlinePropertyAccess(map_)) return false;
+
+  // Currently only handle Type::Number as a polymorphic case.
+  // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
+  // instruction.
+  if (IsNumberType()) return false;
+
+  // Values are only compatible for monomorphic load if they all behave the same
+  // regarding value wrappers.
+  if (IsValueWrapped() != info->IsValueWrapped()) return false;
+
+  if (!LookupDescriptor()) return false;
+
+  if (!IsFound()) {
+    return (!info->IsFound() || info->has_holder()) &&
+           map()->prototype() == info->map()->prototype();
+  }
+
+  // Mismatch if the other access info found the property in the prototype
+  // chain.
+  if (info->has_holder()) return false;
+
+  if (IsAccessorConstant()) {
+    return accessor_.is_identical_to(info->accessor_) &&
+        api_holder_.is_identical_to(info->api_holder_);
+  }
+
+  if (IsDataConstant()) {
+    return constant_.is_identical_to(info->constant_);
+  }
+
+  DCHECK(IsData());
+  if (!info->IsData()) return false;
+
+  Representation r = access_.representation();
+  if (IsLoad()) {
+    if (!info->access_.representation().IsCompatibleForLoad(r)) return false;
+  } else {
+    if (!info->access_.representation().IsCompatibleForStore(r)) return false;
+  }
+  if (info->access_.offset() != access_.offset()) return false;
+  if (info->access_.IsInobject() != access_.IsInobject()) return false;
+  if (IsLoad()) {
+    if (field_maps_.is_empty()) {
+      info->field_maps_.Clear();
+    } else if (!info->field_maps_.is_empty()) {
+      for (int i = 0; i < field_maps_.length(); ++i) {
+        info->field_maps_.AddMapIfMissing(field_maps_.at(i), info->zone());
+      }
+      info->field_maps_.Sort();
+    }
+  } else {
+    // We can only merge stores that agree on their field maps. The comparison
+    // below is safe, since we keep the field maps sorted.
+    if (field_maps_.length() != info->field_maps_.length()) return false;
+    for (int i = 0; i < field_maps_.length(); ++i) {
+      if (!field_maps_.at(i).is_identical_to(info->field_maps_.at(i))) {
+        return false;
+      }
+    }
+  }
+  info->GeneralizeRepresentation(r);
+  info->field_type_ = info->field_type_.Combine(field_type_);
+  return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupDescriptor() {
+  if (!map_->IsJSObjectMap()) return true;
+  LookupDescriptor(*map_, *name_);
+  return LoadResult(map_);
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
+  if (!IsLoad() && IsProperty() && IsReadOnly()) {
+    return false;
+  }
+
+  if (IsData()) {
+    // Construct the object field access.
+    int index = GetLocalFieldIndexFromMap(map);
+    access_ = HObjectAccess::ForField(map, index, representation(), name_);
+
+    // Load field map for heap objects.
+    return LoadFieldMaps(map);
+  } else if (IsAccessorConstant()) {
+    Handle<Object> accessors = GetAccessorsFromMap(map);
+    if (!accessors->IsAccessorPair()) return false;
+    Object* raw_accessor =
+        IsLoad() ? Handle<AccessorPair>::cast(accessors)->getter()
+                 : Handle<AccessorPair>::cast(accessors)->setter();
+    if (!raw_accessor->IsJSFunction()) return false;
+    Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
+    if (accessor->shared()->IsApiFunction()) {
+      CallOptimization call_optimization(accessor);
+      if (call_optimization.is_simple_api_call()) {
+        CallOptimization::HolderLookup holder_lookup;
+        api_holder_ =
+            call_optimization.LookupHolderOfExpectedType(map_, &holder_lookup);
+      }
+    }
+    accessor_ = accessor;
+  } else if (IsDataConstant()) {
+    constant_ = GetConstantFromMap(map);
+  }
+
+  return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadFieldMaps(
+    Handle<Map> map) {
+  // Clear any previously collected field maps/type.
+  field_maps_.Clear();
+  field_type_ = HType::Tagged();
+
+  // Figure out the field type from the accessor map.
+  Handle<HeapType> field_type = GetFieldTypeFromMap(map);
+
+  // Collect the (stable) maps from the field type.
+  int num_field_maps = field_type->NumClasses();
+  if (num_field_maps > 0) {
+    DCHECK(access_.representation().IsHeapObject());
+    field_maps_.Reserve(num_field_maps, zone());
+    HeapType::Iterator<Map> it = field_type->Classes();
+    while (!it.Done()) {
+      Handle<Map> field_map = it.Current();
+      if (!field_map->is_stable()) {
+        field_maps_.Clear();
+        break;
+      }
+      field_maps_.Add(field_map, zone());
+      it.Advance();
+    }
+  }
+
+  if (field_maps_.is_empty()) {
+    // Store is not safe if the field map was cleared.
+    return IsLoad() || !field_type->Is(HeapType::None());
+  }
+
+  field_maps_.Sort();
+  DCHECK_EQ(num_field_maps, field_maps_.length());
+
+  // Determine field HType from field HeapType.
+  field_type_ = HType::FromType<HeapType>(field_type);
+  DCHECK(field_type_.IsHeapObject());
+
+  // Add dependency on the map that introduced the field.
+  top_info()->dependencies()->AssumeFieldType(GetFieldOwnerFromMap(map));
+  return true;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::LookupInPrototypes() {
+  Handle<Map> map = this->map();
+
+  while (map->prototype()->IsJSObject()) {
+    holder_ = handle(JSObject::cast(map->prototype()));
+    if (holder_->map()->is_deprecated()) {
+      JSObject::TryMigrateInstance(holder_);
+    }
+    map = Handle<Map>(holder_->map());
+    if (!CanInlinePropertyAccess(map)) {
+      NotFound();
+      return false;
+    }
+    LookupDescriptor(*map, *name_);
+    if (IsFound()) return LoadResult(map);
+  }
+
+  NotFound();
+  return !map->prototype()->IsJSReceiver();
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::IsIntegerIndexedExotic() {
+  InstanceType instance_type = map_->instance_type();
+  return instance_type == JS_TYPED_ARRAY_TYPE && name_->IsString() &&
+         IsSpecialIndex(isolate()->unicode_cache(), String::cast(*name_));
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
+  if (!CanInlinePropertyAccess(map_)) return false;
+  if (IsJSObjectFieldAccessor()) return IsLoad();
+  if (IsJSArrayBufferViewFieldAccessor()) return IsLoad();
+  if (map_->IsJSFunctionMap() && map_->is_constructor() &&
+      !map_->has_non_instance_prototype() &&
+      name_.is_identical_to(isolate()->factory()->prototype_string())) {
+    return IsLoad();
+  }
+  if (!LookupDescriptor()) return false;
+  if (IsFound()) return IsLoad() || !IsReadOnly();
+  if (IsIntegerIndexedExotic()) return false;
+  if (!LookupInPrototypes()) return false;
+  if (IsLoad()) return true;
+
+  if (IsAccessorConstant()) return true;
+  LookupTransition(*map_, *name_, NONE);
+  if (IsTransitionToData() && map_->unused_property_fields() > 0) {
+    // Construct the object field access.
+    int descriptor = transition()->LastAdded();
+    int index =
+        transition()->instance_descriptors()->GetFieldIndex(descriptor) -
+        map_->GetInObjectProperties();
+    PropertyDetails details =
+        transition()->instance_descriptors()->GetDetails(descriptor);
+    Representation representation = details.representation();
+    access_ = HObjectAccess::ForField(map_, index, representation, name_);
+
+    // Load field map for heap objects.
+    return LoadFieldMaps(transition());
+  }
+  return false;
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
+    SmallMapList* maps) {
+  DCHECK(map_.is_identical_to(maps->first()));
+  if (!CanAccessMonomorphic()) return false;
+  STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
+  if (maps->length() > kMaxLoadPolymorphism) return false;
+  HObjectAccess access = HObjectAccess::ForMap();  // bogus default
+  if (GetJSObjectFieldAccess(&access)) {
+    for (int i = 1; i < maps->length(); ++i) {
+      PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
+      HObjectAccess test_access = HObjectAccess::ForMap();  // bogus default
+      if (!test_info.GetJSObjectFieldAccess(&test_access)) return false;
+      if (!access.Equals(test_access)) return false;
+    }
+    return true;
+  }
+  if (GetJSArrayBufferViewFieldAccess(&access)) {
+    for (int i = 1; i < maps->length(); ++i) {
+      PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
+      HObjectAccess test_access = HObjectAccess::ForMap();  // bogus default
+      if (!test_info.GetJSArrayBufferViewFieldAccess(&test_access)) {
+        return false;
+      }
+      if (!access.Equals(test_access)) return false;
+    }
+    return true;
+  }
+
+  // Currently only handle numbers as a polymorphic case.
+  // TODO(verwaest): Support monomorphic handling of numbers with a HCheckNumber
+  // instruction.
+  if (IsNumberType()) return false;
+
+  // Multiple maps cannot transition to the same target map.
+  DCHECK(!IsLoad() || !IsTransition());
+  if (IsTransition() && maps->length() > 1) return false;
+
+  for (int i = 1; i < maps->length(); ++i) {
+    PropertyAccessInfo test_info(builder_, access_type_, maps->at(i), name_);
+    if (!test_info.IsCompatible(this)) return false;
+  }
+
+  return true;
+}
+
+
+Handle<Map> HOptimizedGraphBuilder::PropertyAccessInfo::map() {
+  Handle<JSFunction> ctor;
+  if (Map::GetConstructorFunction(
+          map_, handle(current_info()->closure()->context()->native_context()))
+          .ToHandle(&ctor)) {
+    return handle(ctor->initial_map());
+  }
+  return map_;
+}
+
+
+static bool NeedsWrapping(Handle<Map> map, Handle<JSFunction> target) {
+  return !map->IsJSObjectMap() &&
+         is_sloppy(target->shared()->language_mode()) &&
+         !target->shared()->native();
+}
+
+
+bool HOptimizedGraphBuilder::PropertyAccessInfo::NeedsWrappingFor(
+    Handle<JSFunction> target) const {
+  return NeedsWrapping(map_, target);
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildMonomorphicAccess(
+    PropertyAccessInfo* info, HValue* object, HValue* checked_object,
+    HValue* value, BailoutId ast_id, BailoutId return_id,
+    bool can_inline_accessor) {
+  HObjectAccess access = HObjectAccess::ForMap();  // bogus default
+  if (info->GetJSObjectFieldAccess(&access)) {
+    DCHECK(info->IsLoad());
+    return New<HLoadNamedField>(object, checked_object, access);
+  }
+
+  if (info->GetJSArrayBufferViewFieldAccess(&access)) {
+    DCHECK(info->IsLoad());
+    checked_object = Add<HCheckArrayBufferNotNeutered>(checked_object);
+    return New<HLoadNamedField>(object, checked_object, access);
+  }
+
+  if (info->name().is_identical_to(isolate()->factory()->prototype_string()) &&
+      info->map()->IsJSFunctionMap() && info->map()->is_constructor()) {
+    DCHECK(!info->map()->has_non_instance_prototype());
+    return New<HLoadFunctionPrototype>(checked_object);
+  }
+
+  HValue* checked_holder = checked_object;
+  if (info->has_holder()) {
+    Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
+    checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
+  }
+
+  if (!info->IsFound()) {
+    DCHECK(info->IsLoad());
+    if (is_strong(function_language_mode())) {
+      return New<HCallRuntime>(
+          Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
+          0);
+    } else {
+      return graph()->GetConstantUndefined();
+    }
+  }
+
+  if (info->IsData()) {
+    if (info->IsLoad()) {
+      return BuildLoadNamedField(info, checked_holder);
+    } else {
+      return BuildStoreNamedField(info, checked_object, value);
+    }
+  }
+
+  if (info->IsTransition()) {
+    DCHECK(!info->IsLoad());
+    return BuildStoreNamedField(info, checked_object, value);
+  }
+
+  if (info->IsAccessorConstant()) {
+    Push(checked_object);
+    int argument_count = 1;
+    if (!info->IsLoad()) {
+      argument_count = 2;
+      Push(value);
+    }
+
+    if (info->NeedsWrappingFor(info->accessor())) {
+      HValue* function = Add<HConstant>(info->accessor());
+      PushArgumentsFromEnvironment(argument_count);
+      return New<HCallFunction>(function, argument_count,
+                                ConvertReceiverMode::kNotNullOrUndefined);
+    } else if (FLAG_inline_accessors && can_inline_accessor) {
+      bool success = info->IsLoad()
+          ? TryInlineGetter(info->accessor(), info->map(), ast_id, return_id)
+          : TryInlineSetter(
+              info->accessor(), info->map(), ast_id, return_id, value);
+      if (success || HasStackOverflow()) return NULL;
+    }
+
+    PushArgumentsFromEnvironment(argument_count);
+    return BuildCallConstantFunction(info->accessor(), argument_count);
+  }
+
+  DCHECK(info->IsDataConstant());
+  if (info->IsLoad()) {
+    return New<HConstant>(info->constant());
+  } else {
+    return New<HCheckValue>(value, Handle<JSFunction>::cast(info->constant()));
+  }
+}
+
+
+void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
+    PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+    BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
+    SmallMapList* maps, Handle<Name> name) {
+  // Something did not match; must use a polymorphic load.
+  int count = 0;
+  HBasicBlock* join = NULL;
+  HBasicBlock* number_block = NULL;
+  bool handled_string = false;
+
+  bool handle_smi = false;
+  STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
+  int i;
+  for (i = 0; i < maps->length() && count < kMaxLoadPolymorphism; ++i) {
+    PropertyAccessInfo info(this, access_type, maps->at(i), name);
+    if (info.IsStringType()) {
+      if (handled_string) continue;
+      handled_string = true;
+    }
+    if (info.CanAccessMonomorphic()) {
+      count++;
+      if (info.IsNumberType()) {
+        handle_smi = true;
+        break;
+      }
+    }
+  }
+
+  if (i < maps->length()) {
+    count = -1;
+    maps->Clear();
+  } else {
+    count = 0;
+  }
+  HControlInstruction* smi_check = NULL;
+  handled_string = false;
+
+  for (i = 0; i < maps->length() && count < kMaxLoadPolymorphism; ++i) {
+    PropertyAccessInfo info(this, access_type, maps->at(i), name);
+    if (info.IsStringType()) {
+      if (handled_string) continue;
+      handled_string = true;
+    }
+    if (!info.CanAccessMonomorphic()) continue;
+
+    if (count == 0) {
+      join = graph()->CreateBasicBlock();
+      if (handle_smi) {
+        HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
+        HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
+        number_block = graph()->CreateBasicBlock();
+        smi_check = New<HIsSmiAndBranch>(
+            object, empty_smi_block, not_smi_block);
+        FinishCurrentBlock(smi_check);
+        GotoNoSimulate(empty_smi_block, number_block);
+        set_current_block(not_smi_block);
+      } else {
+        BuildCheckHeapObject(object);
+      }
+    }
+    ++count;
+    HBasicBlock* if_true = graph()->CreateBasicBlock();
+    HBasicBlock* if_false = graph()->CreateBasicBlock();
+    HUnaryControlInstruction* compare;
+
+    HValue* dependency;
+    if (info.IsNumberType()) {
+      Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
+      compare = New<HCompareMap>(object, heap_number_map, if_true, if_false);
+      dependency = smi_check;
+    } else if (info.IsStringType()) {
+      compare = New<HIsStringAndBranch>(object, if_true, if_false);
+      dependency = compare;
+    } else {
+      compare = New<HCompareMap>(object, info.map(), if_true, if_false);
+      dependency = compare;
+    }
+    FinishCurrentBlock(compare);
+
+    if (info.IsNumberType()) {
+      GotoNoSimulate(if_true, number_block);
+      if_true = number_block;
+    }
+
+    set_current_block(if_true);
+
+    HValue* access =
+        BuildMonomorphicAccess(&info, object, dependency, value, ast_id,
+                               return_id, FLAG_polymorphic_inlining);
+
+    HValue* result = NULL;
+    switch (access_type) {
+      case LOAD:
+        result = access;
+        break;
+      case STORE:
+        result = value;
+        break;
+    }
+
+    if (access == NULL) {
+      if (HasStackOverflow()) return;
+    } else {
+      if (access->IsInstruction()) {
+        HInstruction* instr = HInstruction::cast(access);
+        if (!instr->IsLinked()) AddInstruction(instr);
+      }
+      if (!ast_context()->IsEffect()) Push(result);
+    }
+
+    if (current_block() != NULL) Goto(join);
+    set_current_block(if_false);
+  }
+
+  // Finish up.  Unconditionally deoptimize if we've handled all the maps we
+  // know about and do not want to handle ones we've never seen.  Otherwise
+  // use a generic IC.
+  if (count == maps->length() && FLAG_deoptimize_uncommon_cases) {
+    FinishExitWithHardDeoptimization(
+        Deoptimizer::kUnknownMapInPolymorphicAccess);
+  } else {
+    HInstruction* instr =
+        BuildNamedGeneric(access_type, expr, slot, object, name, value);
+    AddInstruction(instr);
+    if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
+
+    if (join != NULL) {
+      Goto(join);
+    } else {
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+      if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+      return;
+    }
+  }
+
+  DCHECK(join != NULL);
+  if (join->HasPredecessor()) {
+    join->SetJoinId(ast_id);
+    set_current_block(join);
+    if (!ast_context()->IsEffect()) ast_context()->ReturnValue(Pop());
+  } else {
+    set_current_block(NULL);
+  }
+}
+
+
+static bool ComputeReceiverTypes(Expression* expr,
+                                 HValue* receiver,
+                                 SmallMapList** t,
+                                 Zone* zone) {
+  SmallMapList* maps = expr->GetReceiverTypes();
+  *t = maps;
+  bool monomorphic = expr->IsMonomorphic();
+  if (maps != NULL && receiver->HasMonomorphicJSObjectType()) {
+    Map* root_map = receiver->GetMonomorphicJSObjectMap()->FindRootMap();
+    maps->FilterForPossibleTransitions(root_map);
+    monomorphic = maps->length() == 1;
+  }
+  return monomorphic && CanInlinePropertyAccess(maps->first());
+}
+
+
+static bool AreStringTypes(SmallMapList* maps) {
+  for (int i = 0; i < maps->length(); i++) {
+    if (maps->at(i)->instance_type() >= FIRST_NONSTRING_TYPE) return false;
+  }
+  return true;
+}
+
+
+void HOptimizedGraphBuilder::BuildStore(Expression* expr, Property* prop,
+                                        FeedbackVectorSlot slot,
+                                        BailoutId ast_id, BailoutId return_id,
+                                        bool is_uninitialized) {
+  if (!prop->key()->IsPropertyName()) {
+    // Keyed store.
+    HValue* value = Pop();
+    HValue* key = Pop();
+    HValue* object = Pop();
+    bool has_side_effects = false;
+    HValue* result =
+        HandleKeyedElementAccess(object, key, value, expr, slot, ast_id,
+                                 return_id, STORE, &has_side_effects);
+    if (has_side_effects) {
+      if (!ast_context()->IsEffect()) Push(value);
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+      if (!ast_context()->IsEffect()) Drop(1);
+    }
+    if (result == NULL) return;
+    return ast_context()->ReturnValue(value);
+  }
+
+  // Named store.
+  HValue* value = Pop();
+  HValue* object = Pop();
+
+  Literal* key = prop->key()->AsLiteral();
+  Handle<String> name = Handle<String>::cast(key->value());
+  DCHECK(!name.is_null());
+
+  HValue* access = BuildNamedAccess(STORE, ast_id, return_id, expr, slot,
+                                    object, name, value, is_uninitialized);
+  if (access == NULL) return;
+
+  if (!ast_context()->IsEffect()) Push(value);
+  if (access->IsInstruction()) AddInstruction(HInstruction::cast(access));
+  if (access->HasObservableSideEffects()) {
+    Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+  }
+  if (!ast_context()->IsEffect()) Drop(1);
+  return ast_context()->ReturnValue(value);
+}
+
+
+void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
+  Property* prop = expr->target()->AsProperty();
+  DCHECK(prop != NULL);
+  CHECK_ALIVE(VisitForValue(prop->obj()));
+  if (!prop->key()->IsPropertyName()) {
+    CHECK_ALIVE(VisitForValue(prop->key()));
+  }
+  CHECK_ALIVE(VisitForValue(expr->value()));
+  BuildStore(expr, prop, expr->AssignmentSlot(), expr->id(),
+             expr->AssignmentId(), expr->IsUninitialized());
+}
+
+
+// Because not every expression has a position and there is not common
+// superclass of Assignment and CountOperation, we cannot just pass the
+// owning expression instead of position and ast_id separately.
+void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
+    Variable* var, HValue* value, FeedbackVectorSlot slot, BailoutId ast_id) {
+  Handle<JSGlobalObject> global(current_info()->global_object());
+
+  // Lookup in script contexts.
+  {
+    Handle<ScriptContextTable> script_contexts(
+        global->native_context()->script_context_table());
+    ScriptContextTable::LookupResult lookup;
+    if (ScriptContextTable::Lookup(script_contexts, var->name(), &lookup)) {
+      if (lookup.mode == CONST) {
+        return Bailout(kNonInitializerAssignmentToConst);
+      }
+      Handle<Context> script_context =
+          ScriptContextTable::GetContext(script_contexts, lookup.context_index);
+
+      Handle<Object> current_value =
+          FixedArray::get(script_context, lookup.slot_index);
+
+      // If the values is not the hole, it will stay initialized,
+      // so no need to generate a check.
+      if (*current_value == *isolate()->factory()->the_hole_value()) {
+        return Bailout(kReferenceToUninitializedVariable);
+      }
+
+      HStoreNamedField* instr = Add<HStoreNamedField>(
+          Add<HConstant>(script_context),
+          HObjectAccess::ForContextSlot(lookup.slot_index), value);
+      USE(instr);
+      DCHECK(instr->HasObservableSideEffects());
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+      return;
+    }
+  }
+
+  LookupIterator it(global, var->name(), LookupIterator::OWN);
+  GlobalPropertyAccess type = LookupGlobalProperty(var, &it, STORE);
+  if (type == kUseCell) {
+    Handle<PropertyCell> cell = it.GetPropertyCell();
+    top_info()->dependencies()->AssumePropertyCell(cell);
+    auto cell_type = it.property_details().cell_type();
+    if (cell_type == PropertyCellType::kConstant ||
+        cell_type == PropertyCellType::kUndefined) {
+      Handle<Object> constant(cell->value(), isolate());
+      if (value->IsConstant()) {
+        HConstant* c_value = HConstant::cast(value);
+        if (!constant.is_identical_to(c_value->handle(isolate()))) {
+          Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
+                           Deoptimizer::EAGER);
+        }
+      } else {
+        HValue* c_constant = Add<HConstant>(constant);
+        IfBuilder builder(this);
+        if (constant->IsNumber()) {
+          builder.If<HCompareNumericAndBranch>(value, c_constant, Token::EQ);
+        } else {
+          builder.If<HCompareObjectEqAndBranch>(value, c_constant);
+        }
+        builder.Then();
+        builder.Else();
+        Add<HDeoptimize>(Deoptimizer::kConstantGlobalVariableAssignment,
+                         Deoptimizer::EAGER);
+        builder.End();
+      }
+    }
+    HConstant* cell_constant = Add<HConstant>(cell);
+    auto access = HObjectAccess::ForPropertyCellValue();
+    if (cell_type == PropertyCellType::kConstantType) {
+      switch (cell->GetConstantType()) {
+        case PropertyCellConstantType::kSmi:
+          access = access.WithRepresentation(Representation::Smi());
+          break;
+        case PropertyCellConstantType::kStableMap: {
+          // The map may no longer be stable, deopt if it's ever different from
+          // what is currently there, which will allow for restablization.
+          Handle<Map> map(HeapObject::cast(cell->value())->map());
+          Add<HCheckHeapObject>(value);
+          value = Add<HCheckMaps>(value, map);
+          access = access.WithRepresentation(Representation::HeapObject());
+          break;
+        }
+      }
+    }
+    HInstruction* instr = Add<HStoreNamedField>(cell_constant, access, value);
+    instr->ClearChangesFlag(kInobjectFields);
+    instr->SetChangesFlag(kGlobalVars);
+    if (instr->HasObservableSideEffects()) {
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+    }
+  } else {
+    HValue* global_object = Add<HLoadNamedField>(
+        BuildGetNativeContext(), nullptr,
+        HObjectAccess::ForContextSlot(Context::EXTENSION_INDEX));
+    HStoreNamedGeneric* instr =
+        Add<HStoreNamedGeneric>(global_object, var->name(), value,
+                                function_language_mode(), PREMONOMORPHIC);
+    Handle<TypeFeedbackVector> vector =
+        handle(current_feedback_vector(), isolate());
+    instr->SetVectorAndSlot(vector, slot);
+    USE(instr);
+    DCHECK(instr->HasObservableSideEffects());
+    Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+  }
+}
+
+
+void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
+  Expression* target = expr->target();
+  VariableProxy* proxy = target->AsVariableProxy();
+  Property* prop = target->AsProperty();
+  DCHECK(proxy == NULL || prop == NULL);
+
+  // We have a second position recorded in the FullCodeGenerator to have
+  // type feedback for the binary operation.
+  BinaryOperation* operation = expr->binary_operation();
+
+  if (proxy != NULL) {
+    Variable* var = proxy->var();
+    if (var->mode() == LET)  {
+      return Bailout(kUnsupportedLetCompoundAssignment);
+    }
+
+    CHECK_ALIVE(VisitForValue(operation));
+
+    switch (var->location()) {
+      case VariableLocation::GLOBAL:
+      case VariableLocation::UNALLOCATED:
+        HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
+                                       expr->AssignmentId());
+        break;
+
+      case VariableLocation::PARAMETER:
+      case VariableLocation::LOCAL:
+        if (var->mode() == CONST_LEGACY)  {
+          return Bailout(kUnsupportedConstCompoundAssignment);
+        }
+        if (var->mode() == CONST) {
+          return Bailout(kNonInitializerAssignmentToConst);
+        }
+        BindIfLive(var, Top());
+        break;
+
+      case VariableLocation::CONTEXT: {
+        // Bail out if we try to mutate a parameter value in a function
+        // using the arguments object.  We do not (yet) correctly handle the
+        // arguments property of the function.
+        if (current_info()->scope()->arguments() != NULL) {
+          // Parameters will be allocated to context slots.  We have no
+          // direct way to detect that the variable is a parameter so we do
+          // a linear search of the parameter variables.
+          int count = current_info()->scope()->num_parameters();
+          for (int i = 0; i < count; ++i) {
+            if (var == current_info()->scope()->parameter(i)) {
+              Bailout(kAssignmentToParameterFunctionUsesArgumentsObject);
+            }
+          }
+        }
+
+        HStoreContextSlot::Mode mode;
+
+        switch (var->mode()) {
+          case LET:
+            mode = HStoreContextSlot::kCheckDeoptimize;
+            break;
+          case CONST:
+            return Bailout(kNonInitializerAssignmentToConst);
+          case CONST_LEGACY:
+            return ast_context()->ReturnValue(Pop());
+          default:
+            mode = HStoreContextSlot::kNoCheck;
+        }
+
+        HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot* instr = Add<HStoreContextSlot>(
+            context, var->index(), mode, Top());
+        if (instr->HasObservableSideEffects()) {
+          Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
+        }
+        break;
+      }
+
+      case VariableLocation::LOOKUP:
+        return Bailout(kCompoundAssignmentToLookupSlot);
+    }
+    return ast_context()->ReturnValue(Pop());
+
+  } else if (prop != NULL) {
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    HValue* object = Top();
+    HValue* key = NULL;
+    if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) {
+      CHECK_ALIVE(VisitForValue(prop->key()));
+      key = Top();
+    }
+
+    CHECK_ALIVE(PushLoad(prop, object, key));
+
+    CHECK_ALIVE(VisitForValue(expr->value()));
+    HValue* right = Pop();
+    HValue* left = Pop();
+
+    Push(BuildBinaryOperation(operation, left, right, PUSH_BEFORE_SIMULATE));
+
+    BuildStore(expr, prop, expr->AssignmentSlot(), expr->id(),
+               expr->AssignmentId(), expr->IsUninitialized());
+  } else {
+    return Bailout(kInvalidLhsInCompoundAssignment);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+
+  VariableProxy* proxy = expr->target()->AsVariableProxy();
+  Property* prop = expr->target()->AsProperty();
+  DCHECK(proxy == NULL || prop == NULL);
+
+  if (expr->is_compound()) {
+    HandleCompoundAssignment(expr);
+    return;
+  }
+
+  if (prop != NULL) {
+    HandlePropertyAssignment(expr);
+  } else if (proxy != NULL) {
+    Variable* var = proxy->var();
+
+    if (var->mode() == CONST) {
+      if (expr->op() != Token::INIT) {
+        return Bailout(kNonInitializerAssignmentToConst);
+      }
+    } else if (var->mode() == CONST_LEGACY) {
+      if (expr->op() != Token::INIT) {
+        CHECK_ALIVE(VisitForValue(expr->value()));
+        return ast_context()->ReturnValue(Pop());
+      }
+
+      if (var->IsStackAllocated()) {
+        // We insert a use of the old value to detect unsupported uses of const
+        // variables (e.g. initialization inside a loop).
+        HValue* old_value = environment()->Lookup(var);
+        Add<HUseConst>(old_value);
+      }
+    }
+
+    if (proxy->IsArguments()) return Bailout(kAssignmentToArguments);
+
+    // Handle the assignment.
+    switch (var->location()) {
+      case VariableLocation::GLOBAL:
+      case VariableLocation::UNALLOCATED:
+        CHECK_ALIVE(VisitForValue(expr->value()));
+        HandleGlobalVariableAssignment(var, Top(), expr->AssignmentSlot(),
+                                       expr->AssignmentId());
+        return ast_context()->ReturnValue(Pop());
+
+      case VariableLocation::PARAMETER:
+      case VariableLocation::LOCAL: {
+        // Perform an initialization check for let declared variables
+        // or parameters.
+        if (var->mode() == LET && expr->op() == Token::ASSIGN) {
+          HValue* env_value = environment()->Lookup(var);
+          if (env_value == graph()->GetConstantHole()) {
+            return Bailout(kAssignmentToLetVariableBeforeInitialization);
+          }
+        }
+        // We do not allow the arguments object to occur in a context where it
+        // may escape, but assignments to stack-allocated locals are
+        // permitted.
+        CHECK_ALIVE(VisitForValue(expr->value(), ARGUMENTS_ALLOWED));
+        HValue* value = Pop();
+        BindIfLive(var, value);
+        return ast_context()->ReturnValue(value);
+      }
+
+      case VariableLocation::CONTEXT: {
+        // Bail out if we try to mutate a parameter value in a function using
+        // the arguments object.  We do not (yet) correctly handle the
+        // arguments property of the function.
+        if (current_info()->scope()->arguments() != NULL) {
+          // Parameters will rewrite to context slots.  We have no direct way
+          // to detect that the variable is a parameter.
+          int count = current_info()->scope()->num_parameters();
+          for (int i = 0; i < count; ++i) {
+            if (var == current_info()->scope()->parameter(i)) {
+              return Bailout(kAssignmentToParameterInArgumentsObject);
+            }
+          }
+        }
+
+        CHECK_ALIVE(VisitForValue(expr->value()));
+        HStoreContextSlot::Mode mode;
+        if (expr->op() == Token::ASSIGN) {
+          switch (var->mode()) {
+            case LET:
+              mode = HStoreContextSlot::kCheckDeoptimize;
+              break;
+            case CONST:
+              // This case is checked statically so no need to
+              // perform checks here
+              UNREACHABLE();
+            case CONST_LEGACY:
+              return ast_context()->ReturnValue(Pop());
+            default:
+              mode = HStoreContextSlot::kNoCheck;
+          }
+        } else {
+          DCHECK_EQ(Token::INIT, expr->op());
+          if (var->mode() == CONST_LEGACY) {
+            mode = HStoreContextSlot::kCheckIgnoreAssignment;
+          } else {
+            mode = HStoreContextSlot::kNoCheck;
+          }
+        }
+
+        HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot* instr = Add<HStoreContextSlot>(
+            context, var->index(), mode, Top());
+        if (instr->HasObservableSideEffects()) {
+          Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
+        }
+        return ast_context()->ReturnValue(Pop());
+      }
+
+      case VariableLocation::LOOKUP:
+        return Bailout(kAssignmentToLOOKUPVariable);
+    }
+  } else {
+    return Bailout(kInvalidLeftHandSideInAssignment);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitYield(Yield* expr) {
+  // Generators are not optimized, so we should never get here.
+  UNREACHABLE();
+}
+
+
+void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (!ast_context()->IsEffect()) {
+    // The parser turns invalid left-hand sides in assignments into throw
+    // statements, which may not be in effect contexts. We might still try
+    // to optimize such functions; bail out now if we do.
+    return Bailout(kInvalidLeftHandSideInAssignment);
+  }
+  CHECK_ALIVE(VisitForValue(expr->exception()));
+
+  HValue* value = environment()->Pop();
+  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  Add<HPushArguments>(value);
+  Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kThrow), 1);
+  Add<HSimulate>(expr->id());
+
+  // If the throw definitely exits the function, we can finish with a dummy
+  // control flow at this point.  This is not the case if the throw is inside
+  // an inlined function which may be replaced.
+  if (call_context() == NULL) {
+    FinishExitCurrentBlock(New<HAbnormalExit>());
+  }
+}
+
+
+HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
+  if (string->IsConstant()) {
+    HConstant* c_string = HConstant::cast(string);
+    if (c_string->HasStringValue()) {
+      return Add<HConstant>(c_string->StringValue()->map()->instance_type());
+    }
+  }
+  return Add<HLoadNamedField>(
+      Add<HLoadNamedField>(string, nullptr, HObjectAccess::ForMap()), nullptr,
+      HObjectAccess::ForMapInstanceType());
+}
+
+
+HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
+  return AddInstruction(BuildLoadStringLength(string));
+}
+
+
+HInstruction* HGraphBuilder::BuildLoadStringLength(HValue* string) {
+  if (string->IsConstant()) {
+    HConstant* c_string = HConstant::cast(string);
+    if (c_string->HasStringValue()) {
+      return New<HConstant>(c_string->StringValue()->length());
+    }
+  }
+  return New<HLoadNamedField>(string, nullptr,
+                              HObjectAccess::ForStringLength());
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
+    PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+    HValue* object, Handle<Name> name, HValue* value, bool is_uninitialized) {
+  if (is_uninitialized) {
+    Add<HDeoptimize>(
+        Deoptimizer::kInsufficientTypeFeedbackForGenericNamedAccess,
+        Deoptimizer::SOFT);
+  }
+  if (access_type == LOAD) {
+    Handle<TypeFeedbackVector> vector =
+        handle(current_feedback_vector(), isolate());
+
+    if (!expr->AsProperty()->key()->IsPropertyName()) {
+      // It's possible that a keyed load of a constant string was converted
+      // to a named load. Here, at the last minute, we need to make sure to
+      // use a generic Keyed Load if we are using the type vector, because
+      // it has to share information with full code.
+      HConstant* key = Add<HConstant>(name);
+      HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
+          object, key, function_language_mode(), PREMONOMORPHIC);
+      result->SetVectorAndSlot(vector, slot);
+      return result;
+    }
+
+    HLoadNamedGeneric* result = New<HLoadNamedGeneric>(
+        object, name, function_language_mode(), PREMONOMORPHIC);
+    result->SetVectorAndSlot(vector, slot);
+    return result;
+  } else {
+    if (current_feedback_vector()->GetKind(slot) ==
+        FeedbackVectorSlotKind::KEYED_STORE_IC) {
+      // It's possible that a keyed store of a constant string was converted
+      // to a named store. Here, at the last minute, we need to make sure to
+      // use a generic Keyed Store if we are using the type vector, because
+      // it has to share information with full code.
+      HConstant* key = Add<HConstant>(name);
+      HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
+          object, key, value, function_language_mode(), PREMONOMORPHIC);
+      Handle<TypeFeedbackVector> vector =
+          handle(current_feedback_vector(), isolate());
+      result->SetVectorAndSlot(vector, slot);
+      return result;
+    }
+
+    HStoreNamedGeneric* result = New<HStoreNamedGeneric>(
+        object, name, value, function_language_mode(), PREMONOMORPHIC);
+    Handle<TypeFeedbackVector> vector =
+        handle(current_feedback_vector(), isolate());
+    result->SetVectorAndSlot(vector, slot);
+    return result;
+  }
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
+    PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+    HValue* object, HValue* key, HValue* value) {
+  if (access_type == LOAD) {
+    InlineCacheState initial_state = expr->AsProperty()->GetInlineCacheState();
+    HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(
+        object, key, function_language_mode(), initial_state);
+    // HLoadKeyedGeneric with vector ics benefits from being encoded as
+    // MEGAMORPHIC because the vector/slot combo becomes unnecessary.
+    if (initial_state != MEGAMORPHIC) {
+      // We need to pass vector information.
+      Handle<TypeFeedbackVector> vector =
+          handle(current_feedback_vector(), isolate());
+      result->SetVectorAndSlot(vector, slot);
+    }
+    return result;
+  } else {
+    HStoreKeyedGeneric* result = New<HStoreKeyedGeneric>(
+        object, key, value, function_language_mode(), PREMONOMORPHIC);
+    Handle<TypeFeedbackVector> vector =
+        handle(current_feedback_vector(), isolate());
+    result->SetVectorAndSlot(vector, slot);
+    return result;
+  }
+}
+
+
+LoadKeyedHoleMode HOptimizedGraphBuilder::BuildKeyedHoleMode(Handle<Map> map) {
+  // Loads from a "stock" fast holey double arrays can elide the hole check.
+  // Loads from a "stock" fast holey array can convert the hole to undefined
+  // with impunity.
+  LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
+  bool holey_double_elements =
+      *map == isolate()->get_initial_js_array_map(FAST_HOLEY_DOUBLE_ELEMENTS);
+  bool holey_elements =
+      *map == isolate()->get_initial_js_array_map(FAST_HOLEY_ELEMENTS);
+  if ((holey_double_elements || holey_elements) &&
+      isolate()->IsFastArrayConstructorPrototypeChainIntact()) {
+    load_mode =
+        holey_double_elements ? ALLOW_RETURN_HOLE : CONVERT_HOLE_TO_UNDEFINED;
+
+    Handle<JSObject> prototype(JSObject::cast(map->prototype()), isolate());
+    Handle<JSObject> object_prototype = isolate()->initial_object_prototype();
+    BuildCheckPrototypeMaps(prototype, object_prototype);
+    graph()->MarkDependsOnEmptyArrayProtoElements();
+  }
+  return load_mode;
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildMonomorphicElementAccess(
+    HValue* object,
+    HValue* key,
+    HValue* val,
+    HValue* dependency,
+    Handle<Map> map,
+    PropertyAccessType access_type,
+    KeyedAccessStoreMode store_mode) {
+  HCheckMaps* checked_object = Add<HCheckMaps>(object, map, dependency);
+
+  if (access_type == STORE && map->prototype()->IsJSObject()) {
+    // monomorphic stores need a prototype chain check because shape
+    // changes could allow callbacks on elements in the chain that
+    // aren't compatible with monomorphic keyed stores.
+    PrototypeIterator iter(map);
+    JSObject* holder = NULL;
+    while (!iter.IsAtEnd()) {
+      // JSProxies can't occur here because we wouldn't have installed a
+      // non-generic IC if there were any.
+      holder = *PrototypeIterator::GetCurrent<JSObject>(iter);
+      iter.Advance();
+    }
+    DCHECK(holder && holder->IsJSObject());
+
+    BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())),
+                            Handle<JSObject>(holder));
+  }
+
+  LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
+  return BuildUncheckedMonomorphicElementAccess(
+      checked_object, key, val,
+      map->instance_type() == JS_ARRAY_TYPE,
+      map->elements_kind(), access_type,
+      load_mode, store_mode);
+}
+
+
+static bool CanInlineElementAccess(Handle<Map> map) {
+  return map->IsJSObjectMap() && !map->has_dictionary_elements() &&
+         !map->has_sloppy_arguments_elements() &&
+         !map->has_indexed_interceptor() && !map->is_access_check_needed();
+}
+
+
+HInstruction* HOptimizedGraphBuilder::TryBuildConsolidatedElementLoad(
+    HValue* object,
+    HValue* key,
+    HValue* val,
+    SmallMapList* maps) {
+  // For polymorphic loads of similar elements kinds (i.e. all tagged or all
+  // double), always use the "worst case" code without a transition.  This is
+  // much faster than transitioning the elements to the worst case, trading a
+  // HTransitionElements for a HCheckMaps, and avoiding mutation of the array.
+  bool has_double_maps = false;
+  bool has_smi_or_object_maps = false;
+  bool has_js_array_access = false;
+  bool has_non_js_array_access = false;
+  bool has_seen_holey_elements = false;
+  Handle<Map> most_general_consolidated_map;
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    if (!CanInlineElementAccess(map)) return NULL;
+    // Don't allow mixing of JSArrays with JSObjects.
+    if (map->instance_type() == JS_ARRAY_TYPE) {
+      if (has_non_js_array_access) return NULL;
+      has_js_array_access = true;
+    } else if (has_js_array_access) {
+      return NULL;
+    } else {
+      has_non_js_array_access = true;
+    }
+    // Don't allow mixed, incompatible elements kinds.
+    if (map->has_fast_double_elements()) {
+      if (has_smi_or_object_maps) return NULL;
+      has_double_maps = true;
+    } else if (map->has_fast_smi_or_object_elements()) {
+      if (has_double_maps) return NULL;
+      has_smi_or_object_maps = true;
+    } else {
+      return NULL;
+    }
+    // Remember if we've ever seen holey elements.
+    if (IsHoleyElementsKind(map->elements_kind())) {
+      has_seen_holey_elements = true;
+    }
+    // Remember the most general elements kind, the code for its load will
+    // properly handle all of the more specific cases.
+    if ((i == 0) || IsMoreGeneralElementsKindTransition(
+            most_general_consolidated_map->elements_kind(),
+            map->elements_kind())) {
+      most_general_consolidated_map = map;
+    }
+  }
+  if (!has_double_maps && !has_smi_or_object_maps) return NULL;
+
+  HCheckMaps* checked_object = Add<HCheckMaps>(object, maps);
+  // FAST_ELEMENTS is considered more general than FAST_HOLEY_SMI_ELEMENTS.
+  // If we've seen both, the consolidated load must use FAST_HOLEY_ELEMENTS.
+  ElementsKind consolidated_elements_kind = has_seen_holey_elements
+      ? GetHoleyElementsKind(most_general_consolidated_map->elements_kind())
+      : most_general_consolidated_map->elements_kind();
+  LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE;
+  if (has_seen_holey_elements) {
+    // Make sure that all of the maps we are handling have the initial array
+    // prototype.
+    bool saw_non_array_prototype = false;
+    for (int i = 0; i < maps->length(); ++i) {
+      Handle<Map> map = maps->at(i);
+      if (map->prototype() != *isolate()->initial_array_prototype()) {
+        // We can't guarantee that loading the hole is safe. The prototype may
+        // have an element at this position.
+        saw_non_array_prototype = true;
+        break;
+      }
+    }
+
+    if (!saw_non_array_prototype) {
+      Handle<Map> holey_map = handle(
+          isolate()->get_initial_js_array_map(consolidated_elements_kind));
+      load_mode = BuildKeyedHoleMode(holey_map);
+      if (load_mode != NEVER_RETURN_HOLE) {
+        for (int i = 0; i < maps->length(); ++i) {
+          Handle<Map> map = maps->at(i);
+          // The prototype check was already done for the holey map in
+          // BuildKeyedHoleMode.
+          if (!map.is_identical_to(holey_map)) {
+            Handle<JSObject> prototype(JSObject::cast(map->prototype()),
+                                       isolate());
+            Handle<JSObject> object_prototype =
+                isolate()->initial_object_prototype();
+            BuildCheckPrototypeMaps(prototype, object_prototype);
+          }
+        }
+      }
+    }
+  }
+  HInstruction* instr = BuildUncheckedMonomorphicElementAccess(
+      checked_object, key, val,
+      most_general_consolidated_map->instance_type() == JS_ARRAY_TYPE,
+      consolidated_elements_kind, LOAD, load_mode, STANDARD_STORE);
+  return instr;
+}
+
+
+HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+    Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
+    HValue* val, SmallMapList* maps, PropertyAccessType access_type,
+    KeyedAccessStoreMode store_mode, bool* has_side_effects) {
+  *has_side_effects = false;
+  BuildCheckHeapObject(object);
+
+  if (access_type == LOAD) {
+    HInstruction* consolidated_load =
+        TryBuildConsolidatedElementLoad(object, key, val, maps);
+    if (consolidated_load != NULL) {
+      *has_side_effects |= consolidated_load->HasObservableSideEffects();
+      return consolidated_load;
+    }
+  }
+
+  // Elements_kind transition support.
+  MapHandleList transition_target(maps->length());
+  // Collect possible transition targets.
+  MapHandleList possible_transitioned_maps(maps->length());
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    // Loads from strings or loads with a mix of string and non-string maps
+    // shouldn't be handled polymorphically.
+    DCHECK(access_type != LOAD || !map->IsStringMap());
+    ElementsKind elements_kind = map->elements_kind();
+    if (CanInlineElementAccess(map) && IsFastElementsKind(elements_kind) &&
+        elements_kind != GetInitialFastElementsKind()) {
+      possible_transitioned_maps.Add(map);
+    }
+    if (IsSloppyArgumentsElements(elements_kind)) {
+      HInstruction* result =
+          BuildKeyedGeneric(access_type, expr, slot, object, key, val);
+      *has_side_effects = result->HasObservableSideEffects();
+      return AddInstruction(result);
+    }
+  }
+  // Get transition target for each map (NULL == no transition).
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    Handle<Map> transitioned_map =
+        Map::FindTransitionedMap(map, &possible_transitioned_maps);
+    transition_target.Add(transitioned_map);
+  }
+
+  MapHandleList untransitionable_maps(maps->length());
+  HTransitionElementsKind* transition = NULL;
+  for (int i = 0; i < maps->length(); ++i) {
+    Handle<Map> map = maps->at(i);
+    DCHECK(map->IsMap());
+    if (!transition_target.at(i).is_null()) {
+      DCHECK(Map::IsValidElementsTransition(
+          map->elements_kind(),
+          transition_target.at(i)->elements_kind()));
+      transition = Add<HTransitionElementsKind>(object, map,
+                                                transition_target.at(i));
+    } else {
+      untransitionable_maps.Add(map);
+    }
+  }
+
+  // If only one map is left after transitioning, handle this case
+  // monomorphically.
+  DCHECK(untransitionable_maps.length() >= 1);
+  if (untransitionable_maps.length() == 1) {
+    Handle<Map> untransitionable_map = untransitionable_maps[0];
+    HInstruction* instr = NULL;
+    if (!CanInlineElementAccess(untransitionable_map)) {
+      instr = AddInstruction(
+          BuildKeyedGeneric(access_type, expr, slot, object, key, val));
+    } else {
+      instr = BuildMonomorphicElementAccess(
+          object, key, val, transition, untransitionable_map, access_type,
+          store_mode);
+    }
+    *has_side_effects |= instr->HasObservableSideEffects();
+    return access_type == STORE ? val : instr;
+  }
+
+  HBasicBlock* join = graph()->CreateBasicBlock();
+
+  for (int i = 0; i < untransitionable_maps.length(); ++i) {
+    Handle<Map> map = untransitionable_maps[i];
+    ElementsKind elements_kind = map->elements_kind();
+    HBasicBlock* this_map = graph()->CreateBasicBlock();
+    HBasicBlock* other_map = graph()->CreateBasicBlock();
+    HCompareMap* mapcompare =
+        New<HCompareMap>(object, map, this_map, other_map);
+    FinishCurrentBlock(mapcompare);
+
+    set_current_block(this_map);
+    HInstruction* access = NULL;
+    if (!CanInlineElementAccess(map)) {
+      access = AddInstruction(
+          BuildKeyedGeneric(access_type, expr, slot, object, key, val));
+    } else {
+      DCHECK(IsFastElementsKind(elements_kind) ||
+             IsFixedTypedArrayElementsKind(elements_kind));
+      LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
+      // Happily, mapcompare is a checked object.
+      access = BuildUncheckedMonomorphicElementAccess(
+          mapcompare, key, val,
+          map->instance_type() == JS_ARRAY_TYPE,
+          elements_kind, access_type,
+          load_mode,
+          store_mode);
+    }
+    *has_side_effects |= access->HasObservableSideEffects();
+    // The caller will use has_side_effects and add a correct Simulate.
+    access->SetFlag(HValue::kHasNoObservableSideEffects);
+    if (access_type == LOAD) {
+      Push(access);
+    }
+    NoObservableSideEffectsScope scope(this);
+    GotoNoSimulate(join);
+    set_current_block(other_map);
+  }
+
+  // Ensure that we visited at least one map above that goes to join. This is
+  // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
+  // rather than joining the join block. If this becomes an issue, insert a
+  // generic access in the case length() == 0.
+  DCHECK(join->predecessors()->length() > 0);
+  // Deopt if none of the cases matched.
+  NoObservableSideEffectsScope scope(this);
+  FinishExitWithHardDeoptimization(
+      Deoptimizer::kUnknownMapInPolymorphicElementAccess);
+  set_current_block(join);
+  return access_type == STORE ? val : Pop();
+}
+
+
+HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
+    HValue* obj, HValue* key, HValue* val, Expression* expr,
+    FeedbackVectorSlot slot, BailoutId ast_id, BailoutId return_id,
+    PropertyAccessType access_type, bool* has_side_effects) {
+  // A keyed name access with type feedback may contain the name.
+  Handle<TypeFeedbackVector> vector =
+      handle(current_feedback_vector(), isolate());
+  HValue* expected_key = key;
+  if (!key->ActualValue()->IsConstant()) {
+    Name* name = nullptr;
+    if (access_type == LOAD) {
+      KeyedLoadICNexus nexus(vector, slot);
+      name = nexus.FindFirstName();
+    } else {
+      KeyedStoreICNexus nexus(vector, slot);
+      name = nexus.FindFirstName();
+    }
+    if (name != nullptr) {
+      Handle<Name> handle_name(name);
+      expected_key = Add<HConstant>(handle_name);
+      // We need a check against the key.
+      bool in_new_space = isolate()->heap()->InNewSpace(*handle_name);
+      Unique<Name> unique_name = Unique<Name>::CreateUninitialized(handle_name);
+      Add<HCheckValue>(key, unique_name, in_new_space);
+    }
+  }
+  if (expected_key->ActualValue()->IsConstant()) {
+    Handle<Object> constant =
+        HConstant::cast(expected_key->ActualValue())->handle(isolate());
+    uint32_t array_index;
+    if ((constant->IsString() &&
+         !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) ||
+        constant->IsSymbol()) {
+      if (!constant->IsUniqueName()) {
+        constant = isolate()->factory()->InternalizeString(
+            Handle<String>::cast(constant));
+      }
+      HValue* access =
+          BuildNamedAccess(access_type, ast_id, return_id, expr, slot, obj,
+                           Handle<Name>::cast(constant), val, false);
+      if (access == NULL || access->IsPhi() ||
+          HInstruction::cast(access)->IsLinked()) {
+        *has_side_effects = false;
+      } else {
+        HInstruction* instr = HInstruction::cast(access);
+        AddInstruction(instr);
+        *has_side_effects = instr->HasObservableSideEffects();
+      }
+      return access;
+    }
+  }
+
+  DCHECK(!expr->IsPropertyName());
+  HInstruction* instr = NULL;
+
+  SmallMapList* maps;
+  bool monomorphic = ComputeReceiverTypes(expr, obj, &maps, zone());
+
+  bool force_generic = false;
+  if (expr->GetKeyType() == PROPERTY) {
+    // Non-Generic accesses assume that elements are being accessed, and will
+    // deopt for non-index keys, which the IC knows will occur.
+    // TODO(jkummerow): Consider adding proper support for property accesses.
+    force_generic = true;
+    monomorphic = false;
+  } else if (access_type == STORE &&
+             (monomorphic || (maps != NULL && !maps->is_empty()))) {
+    // Stores can't be mono/polymorphic if their prototype chain has dictionary
+    // elements. However a receiver map that has dictionary elements itself
+    // should be left to normal mono/poly behavior (the other maps may benefit
+    // from highly optimized stores).
+    for (int i = 0; i < maps->length(); i++) {
+      Handle<Map> current_map = maps->at(i);
+      if (current_map->DictionaryElementsInPrototypeChainOnly()) {
+        force_generic = true;
+        monomorphic = false;
+        break;
+      }
+    }
+  } else if (access_type == LOAD && !monomorphic &&
+             (maps != NULL && !maps->is_empty())) {
+    // Polymorphic loads have to go generic if any of the maps are strings.
+    // If some, but not all of the maps are strings, we should go generic
+    // because polymorphic access wants to key on ElementsKind and isn't
+    // compatible with strings.
+    for (int i = 0; i < maps->length(); i++) {
+      Handle<Map> current_map = maps->at(i);
+      if (current_map->IsStringMap()) {
+        force_generic = true;
+        break;
+      }
+    }
+  }
+
+  if (monomorphic) {
+    Handle<Map> map = maps->first();
+    if (!CanInlineElementAccess(map)) {
+      instr = AddInstruction(
+          BuildKeyedGeneric(access_type, expr, slot, obj, key, val));
+    } else {
+      BuildCheckHeapObject(obj);
+      instr = BuildMonomorphicElementAccess(
+          obj, key, val, NULL, map, access_type, expr->GetStoreMode());
+    }
+  } else if (!force_generic && (maps != NULL && !maps->is_empty())) {
+    return HandlePolymorphicElementAccess(expr, slot, obj, key, val, maps,
+                                          access_type, expr->GetStoreMode(),
+                                          has_side_effects);
+  } else {
+    if (access_type == STORE) {
+      if (expr->IsAssignment() &&
+          expr->AsAssignment()->HasNoTypeInformation()) {
+        Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedStore,
+                         Deoptimizer::SOFT);
+      }
+    } else {
+      if (expr->AsProperty()->HasNoTypeInformation()) {
+        Add<HDeoptimize>(Deoptimizer::kInsufficientTypeFeedbackForKeyedLoad,
+                         Deoptimizer::SOFT);
+      }
+    }
+    instr = AddInstruction(
+        BuildKeyedGeneric(access_type, expr, slot, obj, key, val));
+  }
+  *has_side_effects = instr->HasObservableSideEffects();
+  return instr;
+}
+
+
+void HOptimizedGraphBuilder::EnsureArgumentsArePushedForAccess() {
+  // Outermost function already has arguments on the stack.
+  if (function_state()->outer() == NULL) return;
+
+  if (function_state()->arguments_pushed()) return;
+
+  // Push arguments when entering inlined function.
+  HEnterInlined* entry = function_state()->entry();
+  entry->set_arguments_pushed();
+
+  HArgumentsObject* arguments = entry->arguments_object();
+  const ZoneList<HValue*>* arguments_values = arguments->arguments_values();
+
+  HInstruction* insert_after = entry;
+  for (int i = 0; i < arguments_values->length(); i++) {
+    HValue* argument = arguments_values->at(i);
+    HInstruction* push_argument = New<HPushArguments>(argument);
+    push_argument->InsertAfter(insert_after);
+    insert_after = push_argument;
+  }
+
+  HArgumentsElements* arguments_elements = New<HArgumentsElements>(true);
+  arguments_elements->ClearFlag(HValue::kUseGVN);
+  arguments_elements->InsertAfter(insert_after);
+  function_state()->set_arguments_elements(arguments_elements);
+}
+
+
+bool HOptimizedGraphBuilder::TryArgumentsAccess(Property* expr) {
+  VariableProxy* proxy = expr->obj()->AsVariableProxy();
+  if (proxy == NULL) return false;
+  if (!proxy->var()->IsStackAllocated()) return false;
+  if (!environment()->Lookup(proxy->var())->CheckFlag(HValue::kIsArguments)) {
+    return false;
+  }
+
+  HInstruction* result = NULL;
+  if (expr->key()->IsPropertyName()) {
+    Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
+    if (!String::Equals(name, isolate()->factory()->length_string())) {
+      return false;
+    }
+
+    if (function_state()->outer() == NULL) {
+      HInstruction* elements = Add<HArgumentsElements>(false);
+      result = New<HArgumentsLength>(elements);
+    } else {
+      // Number of arguments without receiver.
+      int argument_count = environment()->
+          arguments_environment()->parameter_count() - 1;
+      result = New<HConstant>(argument_count);
+    }
+  } else {
+    Push(graph()->GetArgumentsObject());
+    CHECK_ALIVE_OR_RETURN(VisitForValue(expr->key()), true);
+    HValue* key = Pop();
+    Drop(1);  // Arguments object.
+    if (function_state()->outer() == NULL) {
+      HInstruction* elements = Add<HArgumentsElements>(false);
+      HInstruction* length = Add<HArgumentsLength>(elements);
+      HInstruction* checked_key = Add<HBoundsCheck>(key, length);
+      result = New<HAccessArgumentsAt>(elements, length, checked_key);
+    } else {
+      EnsureArgumentsArePushedForAccess();
+
+      // Number of arguments without receiver.
+      HInstruction* elements = function_state()->arguments_elements();
+      int argument_count = environment()->
+          arguments_environment()->parameter_count() - 1;
+      HInstruction* length = Add<HConstant>(argument_count);
+      HInstruction* checked_key = Add<HBoundsCheck>(key, length);
+      result = New<HAccessArgumentsAt>(elements, length, checked_key);
+    }
+  }
+  ast_context()->ReturnInstruction(result, expr->id());
+  return true;
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildNamedAccess(
+    PropertyAccessType access, BailoutId ast_id, BailoutId return_id,
+    Expression* expr, FeedbackVectorSlot slot, HValue* object,
+    Handle<Name> name, HValue* value, bool is_uninitialized) {
+  SmallMapList* maps;
+  ComputeReceiverTypes(expr, object, &maps, zone());
+  DCHECK(maps != NULL);
+
+  if (maps->length() > 0) {
+    PropertyAccessInfo info(this, access, maps->first(), name);
+    if (!info.CanAccessAsMonomorphic(maps)) {
+      HandlePolymorphicNamedFieldAccess(access, expr, slot, ast_id, return_id,
+                                        object, value, maps, name);
+      return NULL;
+    }
+
+    HValue* checked_object;
+    // Type::Number() is only supported by polymorphic load/call handling.
+    DCHECK(!info.IsNumberType());
+    BuildCheckHeapObject(object);
+    if (AreStringTypes(maps)) {
+      checked_object =
+          Add<HCheckInstanceType>(object, HCheckInstanceType::IS_STRING);
+    } else {
+      checked_object = Add<HCheckMaps>(object, maps);
+    }
+    return BuildMonomorphicAccess(
+        &info, object, checked_object, value, ast_id, return_id);
+  }
+
+  return BuildNamedGeneric(access, expr, slot, object, name, value,
+                           is_uninitialized);
+}
+
+
+void HOptimizedGraphBuilder::PushLoad(Property* expr,
+                                      HValue* object,
+                                      HValue* key) {
+  ValueContext for_value(this, ARGUMENTS_NOT_ALLOWED);
+  Push(object);
+  if (key != NULL) Push(key);
+  BuildLoad(expr, expr->LoadId());
+}
+
+
+void HOptimizedGraphBuilder::BuildLoad(Property* expr,
+                                       BailoutId ast_id) {
+  HInstruction* instr = NULL;
+  if (expr->IsStringAccess() && expr->GetKeyType() == ELEMENT) {
+    HValue* index = Pop();
+    HValue* string = Pop();
+    HInstruction* char_code = BuildStringCharCodeAt(string, index);
+    AddInstruction(char_code);
+    instr = NewUncasted<HStringCharFromCode>(char_code);
+
+  } else if (expr->key()->IsPropertyName()) {
+    Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
+    HValue* object = Pop();
+
+    HValue* value = BuildNamedAccess(LOAD, ast_id, expr->LoadId(), expr,
+                                     expr->PropertyFeedbackSlot(), object, name,
+                                     NULL, expr->IsUninitialized());
+    if (value == NULL) return;
+    if (value->IsPhi()) return ast_context()->ReturnValue(value);
+    instr = HInstruction::cast(value);
+    if (instr->IsLinked()) return ast_context()->ReturnValue(instr);
+
+  } else {
+    HValue* key = Pop();
+    HValue* obj = Pop();
+
+    bool has_side_effects = false;
+    HValue* load = HandleKeyedElementAccess(
+        obj, key, NULL, expr, expr->PropertyFeedbackSlot(), ast_id,
+        expr->LoadId(), LOAD, &has_side_effects);
+    if (has_side_effects) {
+      if (ast_context()->IsEffect()) {
+        Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+      } else {
+        Push(load);
+        Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+        Drop(1);
+      }
+    }
+    if (load == NULL) return;
+    return ast_context()->ReturnValue(load);
+  }
+  return ast_context()->ReturnInstruction(instr, ast_id);
+}
+
+
+void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+
+  if (TryArgumentsAccess(expr)) return;
+
+  CHECK_ALIVE(VisitForValue(expr->obj()));
+  if (!expr->key()->IsPropertyName() || expr->IsStringAccess()) {
+    CHECK_ALIVE(VisitForValue(expr->key()));
+  }
+
+  BuildLoad(expr, expr->id());
+}
+
+
+HInstruction* HGraphBuilder::BuildConstantMapCheck(Handle<JSObject> constant) {
+  HCheckMaps* check = Add<HCheckMaps>(
+      Add<HConstant>(constant), handle(constant->map()));
+  check->ClearDependsOnFlag(kElementsKind);
+  return check;
+}
+
+
+HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+                                                     Handle<JSObject> holder) {
+  PrototypeIterator iter(isolate(), prototype,
+                         PrototypeIterator::START_AT_RECEIVER);
+  while (holder.is_null() ||
+         !PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
+    BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
+    iter.Advance();
+    if (iter.IsAtEnd()) {
+      return NULL;
+    }
+  }
+  return BuildConstantMapCheck(PrototypeIterator::GetCurrent<JSObject>(iter));
+}
+
+
+void HOptimizedGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
+                                                   Handle<Map> receiver_map) {
+  if (!holder.is_null()) {
+    Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
+    BuildCheckPrototypeMaps(prototype, holder);
+  }
+}
+
+
+HInstruction* HOptimizedGraphBuilder::NewPlainFunctionCall(HValue* fun,
+                                                           int argument_count) {
+  return New<HCallJSFunction>(fun, argument_count);
+}
+
+
+HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
+    HValue* fun, HValue* context,
+    int argument_count, HValue* expected_param_count) {
+  HValue* new_target = graph()->GetConstantUndefined();
+  HValue* arity = Add<HConstant>(argument_count - 1);
+
+  HValue* op_vals[] = {context, fun, new_target, arity, expected_param_count};
+
+  Callable callable = CodeFactory::ArgumentAdaptor(isolate());
+  HConstant* stub = Add<HConstant>(callable.code());
+
+  return New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
+                                  Vector<HValue*>(op_vals, arraysize(op_vals)));
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
+    Handle<JSFunction> jsfun, int argument_count) {
+  HValue* target = Add<HConstant>(jsfun);
+  // For constant functions, we try to avoid calling the
+  // argument adaptor and instead call the function directly
+  int formal_parameter_count =
+      jsfun->shared()->internal_formal_parameter_count();
+  bool dont_adapt_arguments =
+      (formal_parameter_count ==
+       SharedFunctionInfo::kDontAdaptArgumentsSentinel);
+  int arity = argument_count - 1;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+  if (can_invoke_directly) {
+    if (jsfun.is_identical_to(current_info()->closure())) {
+      graph()->MarkRecursive();
+    }
+    return NewPlainFunctionCall(target, argument_count);
+  } else {
+    HValue* param_count_value = Add<HConstant>(formal_parameter_count);
+    HValue* context = Add<HLoadNamedField>(
+        target, nullptr, HObjectAccess::ForFunctionContextPointer());
+    return NewArgumentAdaptorCall(target, context,
+        argument_count, param_count_value);
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+class FunctionSorter {
+ public:
+  explicit FunctionSorter(int index = 0, int ticks = 0, int size = 0)
+      : index_(index), ticks_(ticks), size_(size) {}
+
+  int index() const { return index_; }
+  int ticks() const { return ticks_; }
+  int size() const { return size_; }
+
+ private:
+  int index_;
+  int ticks_;
+  int size_;
+};
+
+
+inline bool operator<(const FunctionSorter& lhs, const FunctionSorter& rhs) {
+  int diff = lhs.ticks() - rhs.ticks();
+  if (diff != 0) return diff > 0;
+  return lhs.size() < rhs.size();
+}
+
+
+void HOptimizedGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
+                                                        HValue* receiver,
+                                                        SmallMapList* maps,
+                                                        Handle<String> name) {
+  int argument_count = expr->arguments()->length() + 1;  // Includes receiver.
+  FunctionSorter order[kMaxCallPolymorphism];
+
+  bool handle_smi = false;
+  bool handled_string = false;
+  int ordered_functions = 0;
+
+  int i;
+  for (i = 0; i < maps->length() && ordered_functions < kMaxCallPolymorphism;
+       ++i) {
+    PropertyAccessInfo info(this, LOAD, maps->at(i), name);
+    if (info.CanAccessMonomorphic() && info.IsDataConstant() &&
+        info.constant()->IsJSFunction()) {
+      if (info.IsStringType()) {
+        if (handled_string) continue;
+        handled_string = true;
+      }
+      Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
+      if (info.IsNumberType()) {
+        handle_smi = true;
+      }
+      expr->set_target(target);
+      order[ordered_functions++] = FunctionSorter(
+          i, target->shared()->profiler_ticks(), InliningAstSize(target));
+    }
+  }
+
+  std::sort(order, order + ordered_functions);
+
+  if (i < maps->length()) {
+    maps->Clear();
+    ordered_functions = -1;
+  }
+
+  HBasicBlock* number_block = NULL;
+  HBasicBlock* join = NULL;
+  handled_string = false;
+  int count = 0;
+
+  for (int fn = 0; fn < ordered_functions; ++fn) {
+    int i = order[fn].index();
+    PropertyAccessInfo info(this, LOAD, maps->at(i), name);
+    if (info.IsStringType()) {
+      if (handled_string) continue;
+      handled_string = true;
+    }
+    // Reloads the target.
+    info.CanAccessMonomorphic();
+    Handle<JSFunction> target = Handle<JSFunction>::cast(info.constant());
+
+    expr->set_target(target);
+    if (count == 0) {
+      // Only needed once.
+      join = graph()->CreateBasicBlock();
+      if (handle_smi) {
+        HBasicBlock* empty_smi_block = graph()->CreateBasicBlock();
+        HBasicBlock* not_smi_block = graph()->CreateBasicBlock();
+        number_block = graph()->CreateBasicBlock();
+        FinishCurrentBlock(New<HIsSmiAndBranch>(
+                receiver, empty_smi_block, not_smi_block));
+        GotoNoSimulate(empty_smi_block, number_block);
+        set_current_block(not_smi_block);
+      } else {
+        BuildCheckHeapObject(receiver);
+      }
+    }
+    ++count;
+    HBasicBlock* if_true = graph()->CreateBasicBlock();
+    HBasicBlock* if_false = graph()->CreateBasicBlock();
+    HUnaryControlInstruction* compare;
+
+    Handle<Map> map = info.map();
+    if (info.IsNumberType()) {
+      Handle<Map> heap_number_map = isolate()->factory()->heap_number_map();
+      compare = New<HCompareMap>(receiver, heap_number_map, if_true, if_false);
+    } else if (info.IsStringType()) {
+      compare = New<HIsStringAndBranch>(receiver, if_true, if_false);
+    } else {
+      compare = New<HCompareMap>(receiver, map, if_true, if_false);
+    }
+    FinishCurrentBlock(compare);
+
+    if (info.IsNumberType()) {
+      GotoNoSimulate(if_true, number_block);
+      if_true = number_block;
+    }
+
+    set_current_block(if_true);
+
+    AddCheckPrototypeMaps(info.holder(), map);
+
+    HValue* function = Add<HConstant>(expr->target());
+    environment()->SetExpressionStackAt(0, function);
+    Push(receiver);
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
+    bool needs_wrapping = info.NeedsWrappingFor(target);
+    bool try_inline = FLAG_polymorphic_inlining && !needs_wrapping;
+    if (FLAG_trace_inlining && try_inline) {
+      Handle<JSFunction> caller = current_info()->closure();
+      base::SmartArrayPointer<char> caller_name =
+          caller->shared()->DebugName()->ToCString();
+      PrintF("Trying to inline the polymorphic call to %s from %s\n",
+             name->ToCString().get(),
+             caller_name.get());
+    }
+    if (try_inline && TryInlineCall(expr)) {
+      // Trying to inline will signal that we should bailout from the
+      // entire compilation by setting stack overflow on the visitor.
+      if (HasStackOverflow()) return;
+    } else {
+      // Since HWrapReceiver currently cannot actually wrap numbers and strings,
+      // use the regular CallFunctionStub for method calls to wrap the receiver.
+      // TODO(verwaest): Support creation of value wrappers directly in
+      // HWrapReceiver.
+      HInstruction* call =
+          needs_wrapping ? NewUncasted<HCallFunction>(
+                               function, argument_count,
+                               ConvertReceiverMode::kNotNullOrUndefined)
+                         : BuildCallConstantFunction(target, argument_count);
+      PushArgumentsFromEnvironment(argument_count);
+      AddInstruction(call);
+      Drop(1);  // Drop the function.
+      if (!ast_context()->IsEffect()) Push(call);
+    }
+
+    if (current_block() != NULL) Goto(join);
+    set_current_block(if_false);
+  }
+
+  // Finish up.  Unconditionally deoptimize if we've handled all the maps we
+  // know about and do not want to handle ones we've never seen.  Otherwise
+  // use a generic IC.
+  if (ordered_functions == maps->length() && FLAG_deoptimize_uncommon_cases) {
+    FinishExitWithHardDeoptimization(Deoptimizer::kUnknownMapInPolymorphicCall);
+  } else {
+    Property* prop = expr->expression()->AsProperty();
+    HInstruction* function =
+        BuildNamedGeneric(LOAD, prop, prop->PropertyFeedbackSlot(), receiver,
+                          name, NULL, prop->IsUninitialized());
+    AddInstruction(function);
+    Push(function);
+    AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+
+    environment()->SetExpressionStackAt(1, function);
+    environment()->SetExpressionStackAt(0, receiver);
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+    HInstruction* call = New<HCallFunction>(
+        function, argument_count, ConvertReceiverMode::kNotNullOrUndefined);
+
+    PushArgumentsFromEnvironment(argument_count);
+
+    Drop(1);  // Function.
+
+    if (join != NULL) {
+      AddInstruction(call);
+      if (!ast_context()->IsEffect()) Push(call);
+      Goto(join);
+    } else {
+      return ast_context()->ReturnInstruction(call, expr->id());
+    }
+  }
+
+  // We assume that control flow is always live after an expression.  So
+  // even without predecessors to the join block, we set it as the exit
+  // block and continue by adding instructions there.
+  DCHECK(join != NULL);
+  if (join->HasPredecessor()) {
+    set_current_block(join);
+    join->SetJoinId(expr->id());
+    if (!ast_context()->IsEffect()) return ast_context()->ReturnValue(Pop());
+  } else {
+    set_current_block(NULL);
+  }
+}
+
+
+void HOptimizedGraphBuilder::TraceInline(Handle<JSFunction> target,
+                                         Handle<JSFunction> caller,
+                                         const char* reason) {
+  if (FLAG_trace_inlining) {
+    base::SmartArrayPointer<char> target_name =
+        target->shared()->DebugName()->ToCString();
+    base::SmartArrayPointer<char> caller_name =
+        caller->shared()->DebugName()->ToCString();
+    if (reason == NULL) {
+      PrintF("Inlined %s called from %s.\n", target_name.get(),
+             caller_name.get());
+    } else {
+      PrintF("Did not inline %s called from %s (%s).\n",
+             target_name.get(), caller_name.get(), reason);
+    }
+  }
+}
+
+
+static const int kNotInlinable = 1000000000;
+
+
+int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
+  if (!FLAG_use_inlining) return kNotInlinable;
+
+  // Precondition: call is monomorphic and we have found a target with the
+  // appropriate arity.
+  Handle<JSFunction> caller = current_info()->closure();
+  Handle<SharedFunctionInfo> target_shared(target->shared());
+
+  // Always inline functions that force inlining.
+  if (target_shared->force_inline()) {
+    return 0;
+  }
+  if (target->shared()->IsBuiltin()) {
+    return kNotInlinable;
+  }
+
+  if (target_shared->IsApiFunction()) {
+    TraceInline(target, caller, "target is api function");
+    return kNotInlinable;
+  }
+
+  // Do a quick check on source code length to avoid parsing large
+  // inlining candidates.
+  if (target_shared->SourceSize() >
+      Min(FLAG_max_inlined_source_size, kUnlimitedMaxInlinedSourceSize)) {
+    TraceInline(target, caller, "target text too big");
+    return kNotInlinable;
+  }
+
+  // Target must be inlineable.
+  BailoutReason noopt_reason = target_shared->disable_optimization_reason();
+  if (!target_shared->IsInlineable() && noopt_reason != kHydrogenFilter) {
+    TraceInline(target, caller, "target not inlineable");
+    return kNotInlinable;
+  }
+  if (noopt_reason != kNoReason && noopt_reason != kHydrogenFilter) {
+    TraceInline(target, caller, "target contains unsupported syntax [early]");
+    return kNotInlinable;
+  }
+
+  int nodes_added = target_shared->ast_node_count();
+  return nodes_added;
+}
+
+
+bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
+                                       int arguments_count,
+                                       HValue* implicit_return_value,
+                                       BailoutId ast_id, BailoutId return_id,
+                                       InliningKind inlining_kind) {
+  if (target->context()->native_context() !=
+      top_info()->closure()->context()->native_context()) {
+    return false;
+  }
+  int nodes_added = InliningAstSize(target);
+  if (nodes_added == kNotInlinable) return false;
+
+  Handle<JSFunction> caller = current_info()->closure();
+
+  if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
+    TraceInline(target, caller, "target AST is too large [early]");
+    return false;
+  }
+
+  // Don't inline deeper than the maximum number of inlining levels.
+  HEnvironment* env = environment();
+  int current_level = 1;
+  while (env->outer() != NULL) {
+    if (current_level == FLAG_max_inlining_levels) {
+      TraceInline(target, caller, "inline depth limit reached");
+      return false;
+    }
+    if (env->outer()->frame_type() == JS_FUNCTION) {
+      current_level++;
+    }
+    env = env->outer();
+  }
+
+  // Don't inline recursive functions.
+  for (FunctionState* state = function_state();
+       state != NULL;
+       state = state->outer()) {
+    if (*state->compilation_info()->closure() == *target) {
+      TraceInline(target, caller, "target is recursive");
+      return false;
+    }
+  }
+
+  // We don't want to add more than a certain number of nodes from inlining.
+  // Always inline small methods (<= 10 nodes).
+  if (inlined_count_ > Min(FLAG_max_inlined_nodes_cumulative,
+                           kUnlimitedMaxInlinedNodesCumulative)) {
+    TraceInline(target, caller, "cumulative AST node limit reached");
+    return false;
+  }
+
+  // Parse and allocate variables.
+  // Use the same AstValueFactory for creating strings in the sub-compilation
+  // step, but don't transfer ownership to target_info.
+  ParseInfo parse_info(zone(), target);
+  parse_info.set_ast_value_factory(
+      top_info()->parse_info()->ast_value_factory());
+  parse_info.set_ast_value_factory_owned(false);
+
+  CompilationInfo target_info(&parse_info);
+  Handle<SharedFunctionInfo> target_shared(target->shared());
+
+  if (IsClassConstructor(target_shared->kind())) {
+    TraceInline(target, caller, "target is classConstructor");
+    return false;
+  }
+  if (target_shared->HasDebugInfo()) {
+    TraceInline(target, caller, "target is being debugged");
+    return false;
+  }
+  if (!Compiler::ParseAndAnalyze(target_info.parse_info())) {
+    if (target_info.isolate()->has_pending_exception()) {
+      // Parse or scope error, never optimize this function.
+      SetStackOverflow();
+      target_shared->DisableOptimization(kParseScopeError);
+    }
+    TraceInline(target, caller, "parse failure");
+    return false;
+  }
+
+  if (target_info.scope()->num_heap_slots() > 0) {
+    TraceInline(target, caller, "target has context-allocated variables");
+    return false;
+  }
+
+  int rest_index;
+  Variable* rest = target_info.scope()->rest_parameter(&rest_index);
+  if (rest) {
+    TraceInline(target, caller, "target uses rest parameters");
+    return false;
+  }
+
+  FunctionLiteral* function = target_info.literal();
+
+  // The following conditions must be checked again after re-parsing, because
+  // earlier the information might not have been complete due to lazy parsing.
+  nodes_added = function->ast_node_count();
+  if (nodes_added > Min(FLAG_max_inlined_nodes, kUnlimitedMaxInlinedNodes)) {
+    TraceInline(target, caller, "target AST is too large [late]");
+    return false;
+  }
+  if (function->dont_optimize()) {
+    TraceInline(target, caller, "target contains unsupported syntax [late]");
+    return false;
+  }
+
+  // If the function uses the arguments object check that inlining of functions
+  // with arguments object is enabled and the arguments-variable is
+  // stack allocated.
+  if (function->scope()->arguments() != NULL) {
+    if (!FLAG_inline_arguments) {
+      TraceInline(target, caller, "target uses arguments object");
+      return false;
+    }
+  }
+
+  // Unsupported variable references present.
+  if (function->scope()->this_function_var() != nullptr ||
+      function->scope()->new_target_var() != nullptr) {
+    TraceInline(target, caller, "target uses new target or this function");
+    return false;
+  }
+
+  // All declarations must be inlineable.
+  ZoneList<Declaration*>* decls = target_info.scope()->declarations();
+  int decl_count = decls->length();
+  for (int i = 0; i < decl_count; ++i) {
+    if (!decls->at(i)->IsInlineable()) {
+      TraceInline(target, caller, "target has non-trivial declaration");
+      return false;
+    }
+  }
+
+  // In strong mode it is an error to call a function with too few arguments.
+  // In that case do not inline because then the arity check would be skipped.
+  if (is_strong(function->language_mode()) &&
+      arguments_count < function->parameter_count()) {
+    TraceInline(target, caller,
+                "too few arguments passed to a strong function");
+    return false;
+  }
+
+  // Generate the deoptimization data for the unoptimized version of
+  // the target function if we don't already have it.
+  if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
+    TraceInline(target, caller, "could not generate deoptimization info");
+    return false;
+  }
+  // Remember that we inlined this function. This needs to be called right
+  // after the EnsureDeoptimizationSupport call so that the code flusher
+  // does not remove the code with the deoptimization support.
+  top_info()->AddInlinedFunction(target_info.shared_info());
+
+  // ----------------------------------------------------------------
+  // After this point, we've made a decision to inline this function (so
+  // TryInline should always return true).
+
+  // Type-check the inlined function.
+  DCHECK(target_shared->has_deoptimization_support());
+  AstTyper(target_info.isolate(), target_info.zone(), target_info.closure(),
+           target_info.scope(), target_info.osr_ast_id(), target_info.literal())
+      .Run();
+
+  int inlining_id = 0;
+  if (top_info()->is_tracking_positions()) {
+    inlining_id = top_info()->TraceInlinedFunction(
+        target_shared, source_position(), function_state()->inlining_id());
+  }
+
+  // Save the pending call context. Set up new one for the inlined function.
+  // The function state is new-allocated because we need to delete it
+  // in two different places.
+  FunctionState* target_state =
+      new FunctionState(this, &target_info, inlining_kind, inlining_id);
+
+  HConstant* undefined = graph()->GetConstantUndefined();
+
+  HEnvironment* inner_env =
+      environment()->CopyForInlining(target,
+                                     arguments_count,
+                                     function,
+                                     undefined,
+                                     function_state()->inlining_kind());
+
+  HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
+  inner_env->BindContext(context);
+
+  // Create a dematerialized arguments object for the function, also copy the
+  // current arguments values to use them for materialization.
+  HEnvironment* arguments_env = inner_env->arguments_environment();
+  int parameter_count = arguments_env->parameter_count();
+  HArgumentsObject* arguments_object = Add<HArgumentsObject>(parameter_count);
+  for (int i = 0; i < parameter_count; i++) {
+    arguments_object->AddArgument(arguments_env->Lookup(i), zone());
+  }
+
+  // If the function uses arguments object then bind bind one.
+  if (function->scope()->arguments() != NULL) {
+    DCHECK(function->scope()->arguments()->IsStackAllocated());
+    inner_env->Bind(function->scope()->arguments(), arguments_object);
+  }
+
+  // Capture the state before invoking the inlined function for deopt in the
+  // inlined function. This simulate has no bailout-id since it's not directly
+  // reachable for deopt, and is only used to capture the state. If the simulate
+  // becomes reachable by merging, the ast id of the simulate merged into it is
+  // adopted.
+  Add<HSimulate>(BailoutId::None());
+
+  current_block()->UpdateEnvironment(inner_env);
+  Scope* saved_scope = scope();
+  set_scope(target_info.scope());
+  HEnterInlined* enter_inlined =
+      Add<HEnterInlined>(return_id, target, context, arguments_count, function,
+                         function_state()->inlining_kind(),
+                         function->scope()->arguments(), arguments_object);
+  if (top_info()->is_tracking_positions()) {
+    enter_inlined->set_inlining_id(inlining_id);
+  }
+  function_state()->set_entry(enter_inlined);
+
+  VisitDeclarations(target_info.scope()->declarations());
+  VisitStatements(function->body());
+  set_scope(saved_scope);
+  if (HasStackOverflow()) {
+    // Bail out if the inline function did, as we cannot residualize a call
+    // instead, but do not disable optimization for the outer function.
+    TraceInline(target, caller, "inline graph construction failed");
+    target_shared->DisableOptimization(kInliningBailedOut);
+    current_info()->RetryOptimization(kInliningBailedOut);
+    delete target_state;
+    return true;
+  }
+
+  // Update inlined nodes count.
+  inlined_count_ += nodes_added;
+
+  Handle<Code> unoptimized_code(target_shared->code());
+  DCHECK(unoptimized_code->kind() == Code::FUNCTION);
+  Handle<TypeFeedbackInfo> type_info(
+      TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+  graph()->update_type_change_checksum(type_info->own_type_change_checksum());
+
+  TraceInline(target, caller, NULL);
+
+  if (current_block() != NULL) {
+    FunctionState* state = function_state();
+    if (state->inlining_kind() == CONSTRUCT_CALL_RETURN) {
+      // Falling off the end of an inlined construct call. In a test context the
+      // return value will always evaluate to true, in a value context the
+      // return value is the newly allocated receiver.
+      if (call_context()->IsTest()) {
+        Goto(inlined_test_context()->if_true(), state);
+      } else if (call_context()->IsEffect()) {
+        Goto(function_return(), state);
+      } else {
+        DCHECK(call_context()->IsValue());
+        AddLeaveInlined(implicit_return_value, state);
+      }
+    } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
+      // Falling off the end of an inlined setter call. The returned value is
+      // never used, the value of an assignment is always the value of the RHS
+      // of the assignment.
+      if (call_context()->IsTest()) {
+        inlined_test_context()->ReturnValue(implicit_return_value);
+      } else if (call_context()->IsEffect()) {
+        Goto(function_return(), state);
+      } else {
+        DCHECK(call_context()->IsValue());
+        AddLeaveInlined(implicit_return_value, state);
+      }
+    } else {
+      // Falling off the end of a normal inlined function. This basically means
+      // returning undefined.
+      if (call_context()->IsTest()) {
+        Goto(inlined_test_context()->if_false(), state);
+      } else if (call_context()->IsEffect()) {
+        Goto(function_return(), state);
+      } else {
+        DCHECK(call_context()->IsValue());
+        AddLeaveInlined(undefined, state);
+      }
+    }
+  }
+
+  // Fix up the function exits.
+  if (inlined_test_context() != NULL) {
+    HBasicBlock* if_true = inlined_test_context()->if_true();
+    HBasicBlock* if_false = inlined_test_context()->if_false();
+
+    HEnterInlined* entry = function_state()->entry();
+
+    // Pop the return test context from the expression context stack.
+    DCHECK(ast_context() == inlined_test_context());
+    ClearInlinedTestContext();
+    delete target_state;
+
+    // Forward to the real test context.
+    if (if_true->HasPredecessor()) {
+      entry->RegisterReturnTarget(if_true, zone());
+      if_true->SetJoinId(ast_id);
+      HBasicBlock* true_target = TestContext::cast(ast_context())->if_true();
+      Goto(if_true, true_target, function_state());
+    }
+    if (if_false->HasPredecessor()) {
+      entry->RegisterReturnTarget(if_false, zone());
+      if_false->SetJoinId(ast_id);
+      HBasicBlock* false_target = TestContext::cast(ast_context())->if_false();
+      Goto(if_false, false_target, function_state());
+    }
+    set_current_block(NULL);
+    return true;
+
+  } else if (function_return()->HasPredecessor()) {
+    function_state()->entry()->RegisterReturnTarget(function_return(), zone());
+    function_return()->SetJoinId(ast_id);
+    set_current_block(function_return());
+  } else {
+    set_current_block(NULL);
+  }
+  delete target_state;
+  return true;
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineCall(Call* expr) {
+  return TryInline(expr->target(), expr->arguments()->length(), NULL,
+                   expr->id(), expr->ReturnId(), NORMAL_RETURN);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineConstruct(CallNew* expr,
+                                                HValue* implicit_return_value) {
+  return TryInline(expr->target(), expr->arguments()->length(),
+                   implicit_return_value, expr->id(), expr->ReturnId(),
+                   CONSTRUCT_CALL_RETURN);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineGetter(Handle<JSFunction> getter,
+                                             Handle<Map> receiver_map,
+                                             BailoutId ast_id,
+                                             BailoutId return_id) {
+  if (TryInlineApiGetter(getter, receiver_map, ast_id)) return true;
+  return TryInline(getter, 0, NULL, ast_id, return_id, GETTER_CALL_RETURN);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineSetter(Handle<JSFunction> setter,
+                                             Handle<Map> receiver_map,
+                                             BailoutId id,
+                                             BailoutId assignment_id,
+                                             HValue* implicit_return_value) {
+  if (TryInlineApiSetter(setter, receiver_map, id)) return true;
+  return TryInline(setter, 1, implicit_return_value, id, assignment_id,
+                   SETTER_CALL_RETURN);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineIndirectCall(Handle<JSFunction> function,
+                                                   Call* expr,
+                                                   int arguments_count) {
+  return TryInline(function, arguments_count, NULL, expr->id(),
+                   expr->ReturnId(), NORMAL_RETURN);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr) {
+  if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
+  BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
+  switch (id) {
+    case kMathExp:
+      if (!FLAG_fast_math) break;
+      // Fall through if FLAG_fast_math.
+    case kMathRound:
+    case kMathFround:
+    case kMathFloor:
+    case kMathAbs:
+    case kMathSqrt:
+    case kMathLog:
+    case kMathClz32:
+      if (expr->arguments()->length() == 1) {
+        HValue* argument = Pop();
+        Drop(2);  // Receiver and function.
+        HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
+        ast_context()->ReturnInstruction(op, expr->id());
+        return true;
+      }
+      break;
+    case kMathImul:
+      if (expr->arguments()->length() == 2) {
+        HValue* right = Pop();
+        HValue* left = Pop();
+        Drop(2);  // Receiver and function.
+        HInstruction* op =
+            HMul::NewImul(isolate(), zone(), context(), left, right);
+        ast_context()->ReturnInstruction(op, expr->id());
+        return true;
+      }
+      break;
+    default:
+      // Not supported for inlining yet.
+      break;
+  }
+  return false;
+}
+
+
+// static
+bool HOptimizedGraphBuilder::IsReadOnlyLengthDescriptor(
+    Handle<Map> jsarray_map) {
+  DCHECK(!jsarray_map->is_dictionary_map());
+  Isolate* isolate = jsarray_map->GetIsolate();
+  Handle<Name> length_string = isolate->factory()->length_string();
+  DescriptorArray* descriptors = jsarray_map->instance_descriptors();
+  int number = descriptors->SearchWithCache(*length_string, *jsarray_map);
+  DCHECK_NE(DescriptorArray::kNotFound, number);
+  return descriptors->GetDetails(number).IsReadOnly();
+}
+
+
+// static
+bool HOptimizedGraphBuilder::CanInlineArrayResizeOperation(
+    Handle<Map> receiver_map) {
+  return !receiver_map.is_null() && receiver_map->prototype()->IsJSObject() &&
+         receiver_map->instance_type() == JS_ARRAY_TYPE &&
+         IsFastElementsKind(receiver_map->elements_kind()) &&
+         !receiver_map->is_dictionary_map() && !receiver_map->is_observed() &&
+         receiver_map->is_extensible() &&
+         (!receiver_map->is_prototype_map() || receiver_map->is_stable()) &&
+         !IsReadOnlyLengthDescriptor(receiver_map);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
+    Call* expr, Handle<JSFunction> function, Handle<Map> receiver_map,
+    int args_count_no_receiver) {
+  if (!function->shared()->HasBuiltinFunctionId()) return false;
+  BuiltinFunctionId id = function->shared()->builtin_function_id();
+  int argument_count = args_count_no_receiver + 1;  // Plus receiver.
+
+  if (receiver_map.is_null()) {
+    HValue* receiver = environment()->ExpressionStackAt(args_count_no_receiver);
+    if (receiver->IsConstant() &&
+        HConstant::cast(receiver)->handle(isolate())->IsHeapObject()) {
+      receiver_map =
+          handle(Handle<HeapObject>::cast(
+                     HConstant::cast(receiver)->handle(isolate()))->map());
+    }
+  }
+  // Try to inline calls like Math.* as operations in the calling function.
+  switch (id) {
+    case kStringCharCodeAt:
+    case kStringCharAt:
+      if (argument_count == 2) {
+        HValue* index = Pop();
+        HValue* string = Pop();
+        Drop(1);  // Function.
+        HInstruction* char_code =
+            BuildStringCharCodeAt(string, index);
+        if (id == kStringCharCodeAt) {
+          ast_context()->ReturnInstruction(char_code, expr->id());
+          return true;
+        }
+        AddInstruction(char_code);
+        HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
+        ast_context()->ReturnInstruction(result, expr->id());
+        return true;
+      }
+      break;
+    case kStringFromCharCode:
+      if (argument_count == 2) {
+        HValue* argument = Pop();
+        Drop(2);  // Receiver and function.
+        HInstruction* result = NewUncasted<HStringCharFromCode>(argument);
+        ast_context()->ReturnInstruction(result, expr->id());
+        return true;
+      }
+      break;
+    case kMathExp:
+      if (!FLAG_fast_math) break;
+      // Fall through if FLAG_fast_math.
+    case kMathRound:
+    case kMathFround:
+    case kMathFloor:
+    case kMathAbs:
+    case kMathSqrt:
+    case kMathLog:
+    case kMathClz32:
+      if (argument_count == 2) {
+        HValue* argument = Pop();
+        Drop(2);  // Receiver and function.
+        HInstruction* op = NewUncasted<HUnaryMathOperation>(argument, id);
+        ast_context()->ReturnInstruction(op, expr->id());
+        return true;
+      }
+      break;
+    case kMathPow:
+      if (argument_count == 3) {
+        HValue* right = Pop();
+        HValue* left = Pop();
+        Drop(2);  // Receiver and function.
+        HInstruction* result = NULL;
+        // Use sqrt() if exponent is 0.5 or -0.5.
+        if (right->IsConstant() && HConstant::cast(right)->HasDoubleValue()) {
+          double exponent = HConstant::cast(right)->DoubleValue();
+          if (exponent == 0.5) {
+            result = NewUncasted<HUnaryMathOperation>(left, kMathPowHalf);
+          } else if (exponent == -0.5) {
+            HValue* one = graph()->GetConstant1();
+            HInstruction* sqrt = AddUncasted<HUnaryMathOperation>(
+                left, kMathPowHalf);
+            // MathPowHalf doesn't have side effects so there's no need for
+            // an environment simulation here.
+            DCHECK(!sqrt->HasObservableSideEffects());
+            result = NewUncasted<HDiv>(one, sqrt);
+          } else if (exponent == 2.0) {
+            result = NewUncasted<HMul>(left, left);
+          }
+        }
+
+        if (result == NULL) {
+          result = NewUncasted<HPower>(left, right);
+        }
+        ast_context()->ReturnInstruction(result, expr->id());
+        return true;
+      }
+      break;
+    case kMathMax:
+    case kMathMin:
+      if (argument_count == 3) {
+        HValue* right = Pop();
+        HValue* left = Pop();
+        Drop(2);  // Receiver and function.
+        HMathMinMax::Operation op = (id == kMathMin) ? HMathMinMax::kMathMin
+                                                     : HMathMinMax::kMathMax;
+        HInstruction* result = NewUncasted<HMathMinMax>(left, right, op);
+        ast_context()->ReturnInstruction(result, expr->id());
+        return true;
+      }
+      break;
+    case kMathImul:
+      if (argument_count == 3) {
+        HValue* right = Pop();
+        HValue* left = Pop();
+        Drop(2);  // Receiver and function.
+        HInstruction* result =
+            HMul::NewImul(isolate(), zone(), context(), left, right);
+        ast_context()->ReturnInstruction(result, expr->id());
+        return true;
+      }
+      break;
+    case kArrayPop: {
+      if (!CanInlineArrayResizeOperation(receiver_map)) return false;
+      ElementsKind elements_kind = receiver_map->elements_kind();
+
+      Drop(args_count_no_receiver);
+      HValue* result;
+      HValue* reduced_length;
+      HValue* receiver = Pop();
+
+      HValue* checked_object = AddCheckMap(receiver, receiver_map);
+      HValue* length =
+          Add<HLoadNamedField>(checked_object, nullptr,
+                               HObjectAccess::ForArrayLength(elements_kind));
+
+      Drop(1);  // Function.
+
+      { NoObservableSideEffectsScope scope(this);
+        IfBuilder length_checker(this);
+
+        HValue* bounds_check = length_checker.If<HCompareNumericAndBranch>(
+            length, graph()->GetConstant0(), Token::EQ);
+        length_checker.Then();
+
+        if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+
+        length_checker.Else();
+        HValue* elements = AddLoadElements(checked_object);
+        // Ensure that we aren't popping from a copy-on-write array.
+        if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+          elements = BuildCopyElementsOnWrite(checked_object, elements,
+                                              elements_kind, length);
+        }
+        reduced_length = AddUncasted<HSub>(length, graph()->GetConstant1());
+        result = AddElementAccess(elements, reduced_length, nullptr,
+                                  bounds_check, nullptr, elements_kind, LOAD);
+        HValue* hole = IsFastSmiOrObjectElementsKind(elements_kind)
+                           ? graph()->GetConstantHole()
+                           : Add<HConstant>(HConstant::kHoleNaN);
+        if (IsFastSmiOrObjectElementsKind(elements_kind)) {
+          elements_kind = FAST_HOLEY_ELEMENTS;
+        }
+        AddElementAccess(elements, reduced_length, hole, bounds_check, nullptr,
+                         elements_kind, STORE);
+        Add<HStoreNamedField>(
+            checked_object, HObjectAccess::ForArrayLength(elements_kind),
+            reduced_length, STORE_TO_INITIALIZED_ENTRY);
+
+        if (!ast_context()->IsEffect()) Push(result);
+
+        length_checker.End();
+      }
+      result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
+      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+      if (!ast_context()->IsEffect()) Drop(1);
+
+      ast_context()->ReturnValue(result);
+      return true;
+    }
+    case kArrayPush: {
+      if (!CanInlineArrayResizeOperation(receiver_map)) return false;
+      ElementsKind elements_kind = receiver_map->elements_kind();
+
+      // If there may be elements accessors in the prototype chain, the fast
+      // inlined version can't be used.
+      if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
+      // If there currently can be no elements accessors on the prototype chain,
+      // it doesn't mean that there won't be any later. Install a full prototype
+      // chain check to trap element accessors being installed on the prototype
+      // chain, which would cause elements to go to dictionary mode and result
+      // in a map change.
+      Handle<JSObject> prototype(JSObject::cast(receiver_map->prototype()));
+      BuildCheckPrototypeMaps(prototype, Handle<JSObject>());
+
+      // Protect against adding elements to the Array prototype, which needs to
+      // route through appropriate bottlenecks.
+      if (isolate()->IsFastArrayConstructorPrototypeChainIntact() &&
+          !prototype->IsJSArray()) {
+        return false;
+      }
+
+      const int argc = args_count_no_receiver;
+      if (argc != 1) return false;
+
+      HValue* value_to_push = Pop();
+      HValue* array = Pop();
+      Drop(1);  // Drop function.
+
+      HInstruction* new_size = NULL;
+      HValue* length = NULL;
+
+      {
+        NoObservableSideEffectsScope scope(this);
+
+        length = Add<HLoadNamedField>(
+            array, nullptr, HObjectAccess::ForArrayLength(elements_kind));
+
+        new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
+
+        bool is_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+        HValue* checked_array = Add<HCheckMaps>(array, receiver_map);
+        BuildUncheckedMonomorphicElementAccess(
+            checked_array, length, value_to_push, is_array, elements_kind,
+            STORE, NEVER_RETURN_HOLE, STORE_AND_GROW_NO_TRANSITION);
+
+        if (!ast_context()->IsEffect()) Push(new_size);
+        Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+        if (!ast_context()->IsEffect()) Drop(1);
+      }
+
+      ast_context()->ReturnValue(new_size);
+      return true;
+    }
+    case kArrayShift: {
+      if (!CanInlineArrayResizeOperation(receiver_map)) return false;
+      ElementsKind kind = receiver_map->elements_kind();
+
+      // If there may be elements accessors in the prototype chain, the fast
+      // inlined version can't be used.
+      if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
+
+      // If there currently can be no elements accessors on the prototype chain,
+      // it doesn't mean that there won't be any later. Install a full prototype
+      // chain check to trap element accessors being installed on the prototype
+      // chain, which would cause elements to go to dictionary mode and result
+      // in a map change.
+      BuildCheckPrototypeMaps(
+          handle(JSObject::cast(receiver_map->prototype()), isolate()),
+          Handle<JSObject>::null());
+
+      // Threshold for fast inlined Array.shift().
+      HConstant* inline_threshold = Add<HConstant>(static_cast<int32_t>(16));
+
+      Drop(args_count_no_receiver);
+      HValue* receiver = Pop();
+      HValue* function = Pop();
+      HValue* result;
+
+      {
+        NoObservableSideEffectsScope scope(this);
+
+        HValue* length = Add<HLoadNamedField>(
+            receiver, nullptr, HObjectAccess::ForArrayLength(kind));
+
+        IfBuilder if_lengthiszero(this);
+        HValue* lengthiszero = if_lengthiszero.If<HCompareNumericAndBranch>(
+            length, graph()->GetConstant0(), Token::EQ);
+        if_lengthiszero.Then();
+        {
+          if (!ast_context()->IsEffect()) Push(graph()->GetConstantUndefined());
+        }
+        if_lengthiszero.Else();
+        {
+          HValue* elements = AddLoadElements(receiver);
+
+          // Check if we can use the fast inlined Array.shift().
+          IfBuilder if_inline(this);
+          if_inline.If<HCompareNumericAndBranch>(
+              length, inline_threshold, Token::LTE);
+          if (IsFastSmiOrObjectElementsKind(kind)) {
+            // We cannot handle copy-on-write backing stores here.
+            if_inline.AndIf<HCompareMap>(
+                elements, isolate()->factory()->fixed_array_map());
+          }
+          if_inline.Then();
+          {
+            // Remember the result.
+            if (!ast_context()->IsEffect()) {
+              Push(AddElementAccess(elements, graph()->GetConstant0(), nullptr,
+                                    lengthiszero, nullptr, kind, LOAD));
+            }
+
+            // Compute the new length.
+            HValue* new_length = AddUncasted<HSub>(
+                length, graph()->GetConstant1());
+            new_length->ClearFlag(HValue::kCanOverflow);
+
+            // Copy the remaining elements.
+            LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
+            {
+              HValue* new_key = loop.BeginBody(
+                  graph()->GetConstant0(), new_length, Token::LT);
+              HValue* key = AddUncasted<HAdd>(new_key, graph()->GetConstant1());
+              key->ClearFlag(HValue::kCanOverflow);
+              ElementsKind copy_kind =
+                  kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
+              HValue* element =
+                  AddUncasted<HLoadKeyed>(elements, key, lengthiszero, nullptr,
+                                          copy_kind, ALLOW_RETURN_HOLE);
+              HStoreKeyed* store = Add<HStoreKeyed>(elements, new_key, element,
+                                                    nullptr, copy_kind);
+              store->SetFlag(HValue::kAllowUndefinedAsNaN);
+            }
+            loop.EndBody();
+
+            // Put a hole at the end.
+            HValue* hole = IsFastSmiOrObjectElementsKind(kind)
+                               ? graph()->GetConstantHole()
+                               : Add<HConstant>(HConstant::kHoleNaN);
+            if (IsFastSmiOrObjectElementsKind(kind)) kind = FAST_HOLEY_ELEMENTS;
+            Add<HStoreKeyed>(elements, new_length, hole, nullptr, kind,
+                             INITIALIZING_STORE);
+
+            // Remember new length.
+            Add<HStoreNamedField>(
+                receiver, HObjectAccess::ForArrayLength(kind),
+                new_length, STORE_TO_INITIALIZED_ENTRY);
+          }
+          if_inline.Else();
+          {
+            Add<HPushArguments>(receiver);
+            result = Add<HCallJSFunction>(function, 1);
+            if (!ast_context()->IsEffect()) Push(result);
+          }
+          if_inline.End();
+        }
+        if_lengthiszero.End();
+      }
+      result = ast_context()->IsEffect() ? graph()->GetConstant0() : Top();
+      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+      if (!ast_context()->IsEffect()) Drop(1);
+      ast_context()->ReturnValue(result);
+      return true;
+    }
+    case kArrayIndexOf:
+    case kArrayLastIndexOf: {
+      if (receiver_map.is_null()) return false;
+      if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
+      ElementsKind kind = receiver_map->elements_kind();
+      if (!IsFastElementsKind(kind)) return false;
+      if (receiver_map->is_observed()) return false;
+      if (argument_count != 2) return false;
+      if (!receiver_map->is_extensible()) return false;
+
+      // If there may be elements accessors in the prototype chain, the fast
+      // inlined version can't be used.
+      if (receiver_map->DictionaryElementsInPrototypeChainOnly()) return false;
+
+      // If there currently can be no elements accessors on the prototype chain,
+      // it doesn't mean that there won't be any later. Install a full prototype
+      // chain check to trap element accessors being installed on the prototype
+      // chain, which would cause elements to go to dictionary mode and result
+      // in a map change.
+      BuildCheckPrototypeMaps(
+          handle(JSObject::cast(receiver_map->prototype()), isolate()),
+          Handle<JSObject>::null());
+
+      HValue* search_element = Pop();
+      HValue* receiver = Pop();
+      Drop(1);  // Drop function.
+
+      ArrayIndexOfMode mode = (id == kArrayIndexOf)
+          ? kFirstIndexOf : kLastIndexOf;
+      HValue* index = BuildArrayIndexOf(receiver, search_element, kind, mode);
+
+      if (!ast_context()->IsEffect()) Push(index);
+      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+      if (!ast_context()->IsEffect()) Drop(1);
+      ast_context()->ReturnValue(index);
+      return true;
+    }
+    default:
+      // Not yet supported for inlining.
+      break;
+  }
+  return false;
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiFunctionCall(Call* expr,
+                                                      HValue* receiver) {
+  Handle<JSFunction> function = expr->target();
+  int argc = expr->arguments()->length();
+  SmallMapList receiver_maps;
+  return TryInlineApiCall(function,
+                          receiver,
+                          &receiver_maps,
+                          argc,
+                          expr->id(),
+                          kCallApiFunction);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiMethodCall(
+    Call* expr,
+    HValue* receiver,
+    SmallMapList* receiver_maps) {
+  Handle<JSFunction> function = expr->target();
+  int argc = expr->arguments()->length();
+  return TryInlineApiCall(function,
+                          receiver,
+                          receiver_maps,
+                          argc,
+                          expr->id(),
+                          kCallApiMethod);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiGetter(Handle<JSFunction> function,
+                                                Handle<Map> receiver_map,
+                                                BailoutId ast_id) {
+  SmallMapList receiver_maps(1, zone());
+  receiver_maps.Add(receiver_map, zone());
+  return TryInlineApiCall(function,
+                          NULL,  // Receiver is on expression stack.
+                          &receiver_maps,
+                          0,
+                          ast_id,
+                          kCallApiGetter);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiSetter(Handle<JSFunction> function,
+                                                Handle<Map> receiver_map,
+                                                BailoutId ast_id) {
+  SmallMapList receiver_maps(1, zone());
+  receiver_maps.Add(receiver_map, zone());
+  return TryInlineApiCall(function,
+                          NULL,  // Receiver is on expression stack.
+                          &receiver_maps,
+                          1,
+                          ast_id,
+                          kCallApiSetter);
+}
+
+
+bool HOptimizedGraphBuilder::TryInlineApiCall(Handle<JSFunction> function,
+                                               HValue* receiver,
+                                               SmallMapList* receiver_maps,
+                                               int argc,
+                                               BailoutId ast_id,
+                                               ApiCallType call_type) {
+  if (function->context()->native_context() !=
+      top_info()->closure()->context()->native_context()) {
+    return false;
+  }
+  CallOptimization optimization(function);
+  if (!optimization.is_simple_api_call()) return false;
+  Handle<Map> holder_map;
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    auto map = receiver_maps->at(i);
+    // Don't inline calls to receivers requiring accesschecks.
+    if (map->is_access_check_needed()) return false;
+  }
+  if (call_type == kCallApiFunction) {
+    // Cannot embed a direct reference to the global proxy map
+    // as it maybe dropped on deserialization.
+    CHECK(!isolate()->serializer_enabled());
+    DCHECK_EQ(0, receiver_maps->length());
+    receiver_maps->Add(handle(function->global_proxy()->map()), zone());
+  }
+  CallOptimization::HolderLookup holder_lookup =
+      CallOptimization::kHolderNotFound;
+  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
+      receiver_maps->first(), &holder_lookup);
+  if (holder_lookup == CallOptimization::kHolderNotFound) return false;
+
+  if (FLAG_trace_inlining) {
+    PrintF("Inlining api function ");
+    function->ShortPrint();
+    PrintF("\n");
+  }
+
+  bool is_function = false;
+  bool is_store = false;
+  switch (call_type) {
+    case kCallApiFunction:
+    case kCallApiMethod:
+      // Need to check that none of the receiver maps could have changed.
+      Add<HCheckMaps>(receiver, receiver_maps);
+      // Need to ensure the chain between receiver and api_holder is intact.
+      if (holder_lookup == CallOptimization::kHolderFound) {
+        AddCheckPrototypeMaps(api_holder, receiver_maps->first());
+      } else {
+        DCHECK_EQ(holder_lookup, CallOptimization::kHolderIsReceiver);
+      }
+      // Includes receiver.
+      PushArgumentsFromEnvironment(argc + 1);
+      is_function = true;
+      break;
+    case kCallApiGetter:
+      // Receiver and prototype chain cannot have changed.
+      DCHECK_EQ(0, argc);
+      DCHECK_NULL(receiver);
+      // Receiver is on expression stack.
+      receiver = Pop();
+      Add<HPushArguments>(receiver);
+      break;
+    case kCallApiSetter:
+      {
+        is_store = true;
+        // Receiver and prototype chain cannot have changed.
+        DCHECK_EQ(1, argc);
+        DCHECK_NULL(receiver);
+        // Receiver and value are on expression stack.
+        HValue* value = Pop();
+        receiver = Pop();
+        Add<HPushArguments>(receiver, value);
+        break;
+     }
+  }
+
+  HValue* holder = NULL;
+  switch (holder_lookup) {
+    case CallOptimization::kHolderFound:
+      holder = Add<HConstant>(api_holder);
+      break;
+    case CallOptimization::kHolderIsReceiver:
+      holder = receiver;
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate());
+  bool call_data_undefined = call_data_obj->IsUndefined();
+  HValue* call_data = Add<HConstant>(call_data_obj);
+  ApiFunction fun(v8::ToCData<Address>(api_call_info->callback()));
+  ExternalReference ref = ExternalReference(&fun,
+                                            ExternalReference::DIRECT_API_CALL,
+                                            isolate());
+  HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
+
+  HValue* op_vals[] = {context(), Add<HConstant>(function), call_data, holder,
+                       api_function_address, nullptr};
+
+  HInstruction* call = nullptr;
+  if (!is_function) {
+    CallApiAccessorStub stub(isolate(), is_store, call_data_undefined);
+    Handle<Code> code = stub.GetCode();
+    HConstant* code_value = Add<HConstant>(code);
+    ApiAccessorDescriptor descriptor(isolate());
+    call = New<HCallWithDescriptor>(
+        code_value, argc + 1, descriptor,
+        Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
+  } else if (argc <= CallApiFunctionWithFixedArgsStub::kMaxFixedArgs) {
+    CallApiFunctionWithFixedArgsStub stub(isolate(), argc, call_data_undefined);
+    Handle<Code> code = stub.GetCode();
+    HConstant* code_value = Add<HConstant>(code);
+    ApiFunctionWithFixedArgsDescriptor descriptor(isolate());
+    call = New<HCallWithDescriptor>(
+        code_value, argc + 1, descriptor,
+        Vector<HValue*>(op_vals, arraysize(op_vals) - 1));
+    Drop(1);  // Drop function.
+  } else {
+    op_vals[arraysize(op_vals) - 1] = Add<HConstant>(argc);
+    CallApiFunctionStub stub(isolate(), call_data_undefined);
+    Handle<Code> code = stub.GetCode();
+    HConstant* code_value = Add<HConstant>(code);
+    ApiFunctionDescriptor descriptor(isolate());
+    call =
+        New<HCallWithDescriptor>(code_value, argc + 1, descriptor,
+                                 Vector<HValue*>(op_vals, arraysize(op_vals)));
+    Drop(1);  // Drop function.
+  }
+
+  ast_context()->ReturnInstruction(call, ast_id);
+  return true;
+}
+
+
+void HOptimizedGraphBuilder::HandleIndirectCall(Call* expr, HValue* function,
+                                                int arguments_count) {
+  Handle<JSFunction> known_function;
+  int args_count_no_receiver = arguments_count - 1;
+  if (function->IsConstant() &&
+      HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+    known_function =
+        Handle<JSFunction>::cast(HConstant::cast(function)->handle(isolate()));
+    if (TryInlineBuiltinMethodCall(expr, known_function, Handle<Map>(),
+                                   args_count_no_receiver)) {
+      if (FLAG_trace_inlining) {
+        PrintF("Inlining builtin ");
+        known_function->ShortPrint();
+        PrintF("\n");
+      }
+      return;
+    }
+
+    if (TryInlineIndirectCall(known_function, expr, args_count_no_receiver)) {
+      return;
+    }
+  }
+
+  PushArgumentsFromEnvironment(arguments_count);
+  HInvokeFunction* call =
+      New<HInvokeFunction>(function, known_function, arguments_count);
+  Drop(1);  // Function
+  ast_context()->ReturnInstruction(call, expr->id());
+}
+
+
+bool HOptimizedGraphBuilder::TryIndirectCall(Call* expr) {
+  DCHECK(expr->expression()->IsProperty());
+
+  if (!expr->IsMonomorphic()) {
+    return false;
+  }
+  Handle<Map> function_map = expr->GetReceiverTypes()->first();
+  if (function_map->instance_type() != JS_FUNCTION_TYPE ||
+      !expr->target()->shared()->HasBuiltinFunctionId()) {
+    return false;
+  }
+
+  switch (expr->target()->shared()->builtin_function_id()) {
+    case kFunctionCall: {
+      if (expr->arguments()->length() == 0) return false;
+      BuildFunctionCall(expr);
+      return true;
+    }
+    case kFunctionApply: {
+      // For .apply, only the pattern f.apply(receiver, arguments)
+      // is supported.
+      if (current_info()->scope()->arguments() == NULL) return false;
+
+      if (!CanBeFunctionApplyArguments(expr)) return false;
+
+      BuildFunctionApply(expr);
+      return true;
+    }
+    default: { return false; }
+  }
+  UNREACHABLE();
+}
+
+
+// f.apply(...)
+void HOptimizedGraphBuilder::BuildFunctionApply(Call* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  CHECK_ALIVE(VisitForValue(args->at(0)));
+  HValue* receiver = Pop();  // receiver
+  HValue* function = Pop();  // f
+  Drop(1);  // apply
+
+  Handle<Map> function_map = expr->GetReceiverTypes()->first();
+  HValue* checked_function = AddCheckMap(function, function_map);
+
+  if (function_state()->outer() == NULL) {
+    HInstruction* elements = Add<HArgumentsElements>(false);
+    HInstruction* length = Add<HArgumentsLength>(elements);
+    HValue* wrapped_receiver = BuildWrapReceiver(receiver, checked_function);
+    HInstruction* result = New<HApplyArguments>(function,
+                                                wrapped_receiver,
+                                                length,
+                                                elements);
+    ast_context()->ReturnInstruction(result, expr->id());
+  } else {
+    // We are inside inlined function and we know exactly what is inside
+    // arguments object. But we need to be able to materialize at deopt.
+    DCHECK_EQ(environment()->arguments_environment()->parameter_count(),
+              function_state()->entry()->arguments_object()->arguments_count());
+    HArgumentsObject* args = function_state()->entry()->arguments_object();
+    const ZoneList<HValue*>* arguments_values = args->arguments_values();
+    int arguments_count = arguments_values->length();
+    Push(function);
+    Push(BuildWrapReceiver(receiver, checked_function));
+    for (int i = 1; i < arguments_count; i++) {
+      Push(arguments_values->at(i));
+    }
+    HandleIndirectCall(expr, function, arguments_count);
+  }
+}
+
+
+// f.call(...)
+void HOptimizedGraphBuilder::BuildFunctionCall(Call* expr) {
+  HValue* function = Top();  // f
+  Handle<Map> function_map = expr->GetReceiverTypes()->first();
+  HValue* checked_function = AddCheckMap(function, function_map);
+
+  // f and call are on the stack in the unoptimized code
+  // during evaluation of the arguments.
+  CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+  int args_length = expr->arguments()->length();
+  int receiver_index = args_length - 1;
+  // Patch the receiver.
+  HValue* receiver = BuildWrapReceiver(
+      environment()->ExpressionStackAt(receiver_index), checked_function);
+  environment()->SetExpressionStackAt(receiver_index, receiver);
+
+  // Call must not be on the stack from now on.
+  int call_index = args_length + 1;
+  environment()->RemoveExpressionStackAt(call_index);
+
+  HandleIndirectCall(expr, function, args_length);
+}
+
+
+HValue* HOptimizedGraphBuilder::ImplicitReceiverFor(HValue* function,
+                                                    Handle<JSFunction> target) {
+  SharedFunctionInfo* shared = target->shared();
+  if (is_sloppy(shared->language_mode()) && !shared->native()) {
+    // Cannot embed a direct reference to the global proxy
+    // as is it dropped on deserialization.
+    CHECK(!isolate()->serializer_enabled());
+    Handle<JSObject> global_proxy(target->context()->global_proxy());
+    return Add<HConstant>(global_proxy);
+  }
+  return graph()->GetConstantUndefined();
+}
+
+
+void HOptimizedGraphBuilder::BuildArrayCall(Expression* expression,
+                                            int arguments_count,
+                                            HValue* function,
+                                            Handle<AllocationSite> site) {
+  Add<HCheckValue>(function, array_function());
+
+  if (IsCallArrayInlineable(arguments_count, site)) {
+    BuildInlinedCallArray(expression, arguments_count, site);
+    return;
+  }
+
+  HInstruction* call = PreProcessCall(New<HCallNewArray>(
+      function, arguments_count + 1, site->GetElementsKind(), site));
+  if (expression->IsCall()) {
+    Drop(1);
+  }
+  ast_context()->ReturnInstruction(call, expression->id());
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
+                                                  HValue* search_element,
+                                                  ElementsKind kind,
+                                                  ArrayIndexOfMode mode) {
+  DCHECK(IsFastElementsKind(kind));
+
+  NoObservableSideEffectsScope no_effects(this);
+
+  HValue* elements = AddLoadElements(receiver);
+  HValue* length = AddLoadArrayLength(receiver, kind);
+
+  HValue* initial;
+  HValue* terminating;
+  Token::Value token;
+  LoopBuilder::Direction direction;
+  if (mode == kFirstIndexOf) {
+    initial = graph()->GetConstant0();
+    terminating = length;
+    token = Token::LT;
+    direction = LoopBuilder::kPostIncrement;
+  } else {
+    DCHECK_EQ(kLastIndexOf, mode);
+    initial = length;
+    terminating = graph()->GetConstant0();
+    token = Token::GT;
+    direction = LoopBuilder::kPreDecrement;
+  }
+
+  Push(graph()->GetConstantMinus1());
+  if (IsFastDoubleElementsKind(kind) || IsFastSmiElementsKind(kind)) {
+    // Make sure that we can actually compare numbers correctly below, see
+    // https://code.google.com/p/chromium/issues/detail?id=407946 for details.
+    search_element = AddUncasted<HForceRepresentation>(
+        search_element, IsFastSmiElementsKind(kind) ? Representation::Smi()
+                                                    : Representation::Double());
+
+    LoopBuilder loop(this, context(), direction);
+    {
+      HValue* index = loop.BeginBody(initial, terminating, token);
+      HValue* element = AddUncasted<HLoadKeyed>(
+          elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
+      IfBuilder if_issame(this);
+      if_issame.If<HCompareNumericAndBranch>(element, search_element,
+                                             Token::EQ_STRICT);
+      if_issame.Then();
+      {
+        Drop(1);
+        Push(index);
+        loop.Break();
+      }
+      if_issame.End();
+    }
+    loop.EndBody();
+  } else {
+    IfBuilder if_isstring(this);
+    if_isstring.If<HIsStringAndBranch>(search_element);
+    if_isstring.Then();
+    {
+      LoopBuilder loop(this, context(), direction);
+      {
+        HValue* index = loop.BeginBody(initial, terminating, token);
+        HValue* element = AddUncasted<HLoadKeyed>(
+            elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
+        IfBuilder if_issame(this);
+        if_issame.If<HIsStringAndBranch>(element);
+        if_issame.AndIf<HStringCompareAndBranch>(
+            element, search_element, Token::EQ_STRICT);
+        if_issame.Then();
+        {
+          Drop(1);
+          Push(index);
+          loop.Break();
+        }
+        if_issame.End();
+      }
+      loop.EndBody();
+    }
+    if_isstring.Else();
+    {
+      IfBuilder if_isnumber(this);
+      if_isnumber.If<HIsSmiAndBranch>(search_element);
+      if_isnumber.OrIf<HCompareMap>(
+          search_element, isolate()->factory()->heap_number_map());
+      if_isnumber.Then();
+      {
+        HValue* search_number =
+            AddUncasted<HForceRepresentation>(search_element,
+                                              Representation::Double());
+        LoopBuilder loop(this, context(), direction);
+        {
+          HValue* index = loop.BeginBody(initial, terminating, token);
+          HValue* element = AddUncasted<HLoadKeyed>(
+              elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
+
+          IfBuilder if_element_isnumber(this);
+          if_element_isnumber.If<HIsSmiAndBranch>(element);
+          if_element_isnumber.OrIf<HCompareMap>(
+              element, isolate()->factory()->heap_number_map());
+          if_element_isnumber.Then();
+          {
+            HValue* number =
+                AddUncasted<HForceRepresentation>(element,
+                                                  Representation::Double());
+            IfBuilder if_issame(this);
+            if_issame.If<HCompareNumericAndBranch>(
+                number, search_number, Token::EQ_STRICT);
+            if_issame.Then();
+            {
+              Drop(1);
+              Push(index);
+              loop.Break();
+            }
+            if_issame.End();
+          }
+          if_element_isnumber.End();
+        }
+        loop.EndBody();
+      }
+      if_isnumber.Else();
+      {
+        LoopBuilder loop(this, context(), direction);
+        {
+          HValue* index = loop.BeginBody(initial, terminating, token);
+          HValue* element = AddUncasted<HLoadKeyed>(
+              elements, index, nullptr, nullptr, kind, ALLOW_RETURN_HOLE);
+          IfBuilder if_issame(this);
+          if_issame.If<HCompareObjectEqAndBranch>(
+              element, search_element);
+          if_issame.Then();
+          {
+            Drop(1);
+            Push(index);
+            loop.Break();
+          }
+          if_issame.End();
+        }
+        loop.EndBody();
+      }
+      if_isnumber.End();
+    }
+    if_isstring.End();
+  }
+
+  return Pop();
+}
+
+
+bool HOptimizedGraphBuilder::TryHandleArrayCall(Call* expr, HValue* function) {
+  if (!array_function().is_identical_to(expr->target())) {
+    return false;
+  }
+
+  Handle<AllocationSite> site = expr->allocation_site();
+  if (site.is_null()) return false;
+
+  BuildArrayCall(expr,
+                 expr->arguments()->length(),
+                 function,
+                 site);
+  return true;
+}
+
+
+bool HOptimizedGraphBuilder::TryHandleArrayCallNew(CallNew* expr,
+                                                   HValue* function) {
+  if (!array_function().is_identical_to(expr->target())) {
+    return false;
+  }
+
+  Handle<AllocationSite> site = expr->allocation_site();
+  if (site.is_null()) return false;
+
+  BuildArrayCall(expr, expr->arguments()->length(), function, site);
+  return true;
+}
+
+
+bool HOptimizedGraphBuilder::CanBeFunctionApplyArguments(Call* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  if (args->length() != 2) return false;
+  VariableProxy* arg_two = args->at(1)->AsVariableProxy();
+  if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
+  HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
+  if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
+  return true;
+}
+
+
+void HOptimizedGraphBuilder::VisitCall(Call* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  Expression* callee = expr->expression();
+  int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
+  HInstruction* call = NULL;
+
+  Property* prop = callee->AsProperty();
+  if (prop != NULL) {
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    HValue* receiver = Top();
+
+    SmallMapList* maps;
+    ComputeReceiverTypes(expr, receiver, &maps, zone());
+
+    if (prop->key()->IsPropertyName() && maps->length() > 0) {
+      Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
+      PropertyAccessInfo info(this, LOAD, maps->first(), name);
+      if (!info.CanAccessAsMonomorphic(maps)) {
+        HandlePolymorphicCallNamed(expr, receiver, maps, name);
+        return;
+      }
+    }
+    HValue* key = NULL;
+    if (!prop->key()->IsPropertyName()) {
+      CHECK_ALIVE(VisitForValue(prop->key()));
+      key = Pop();
+    }
+
+    CHECK_ALIVE(PushLoad(prop, receiver, key));
+    HValue* function = Pop();
+
+    if (function->IsConstant() &&
+        HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+      // Push the function under the receiver.
+      environment()->SetExpressionStackAt(0, function);
+      Push(receiver);
+
+      Handle<JSFunction> known_function = Handle<JSFunction>::cast(
+          HConstant::cast(function)->handle(isolate()));
+      expr->set_target(known_function);
+
+      if (TryIndirectCall(expr)) return;
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+      Handle<Map> map = maps->length() == 1 ? maps->first() : Handle<Map>();
+      if (TryInlineBuiltinMethodCall(expr, known_function, map,
+                                     expr->arguments()->length())) {
+        if (FLAG_trace_inlining) {
+          PrintF("Inlining builtin ");
+          known_function->ShortPrint();
+          PrintF("\n");
+        }
+        return;
+      }
+      if (TryInlineApiMethodCall(expr, receiver, maps)) return;
+
+      // Wrap the receiver if necessary.
+      if (NeedsWrapping(maps->first(), known_function)) {
+        // Since HWrapReceiver currently cannot actually wrap numbers and
+        // strings, use the regular CallFunctionStub for method calls to wrap
+        // the receiver.
+        // TODO(verwaest): Support creation of value wrappers directly in
+        // HWrapReceiver.
+        call = New<HCallFunction>(function, argument_count,
+                                  ConvertReceiverMode::kNotNullOrUndefined);
+      } else if (TryInlineCall(expr)) {
+        return;
+      } else {
+        call = BuildCallConstantFunction(known_function, argument_count);
+      }
+
+    } else {
+      ArgumentsAllowedFlag arguments_flag = ARGUMENTS_NOT_ALLOWED;
+      if (CanBeFunctionApplyArguments(expr) && expr->is_uninitialized()) {
+        // We have to use EAGER deoptimization here because Deoptimizer::SOFT
+        // gets ignored by the always-opt flag, which leads to incorrect code.
+        Add<HDeoptimize>(
+            Deoptimizer::kInsufficientTypeFeedbackForCallWithArguments,
+            Deoptimizer::EAGER);
+        arguments_flag = ARGUMENTS_FAKED;
+      }
+
+      // Push the function under the receiver.
+      environment()->SetExpressionStackAt(0, function);
+      Push(receiver);
+
+      CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
+      call = New<HCallFunction>(function, argument_count,
+                                ConvertReceiverMode::kNotNullOrUndefined);
+    }
+    PushArgumentsFromEnvironment(argument_count);
+
+  } else {
+    VariableProxy* proxy = expr->expression()->AsVariableProxy();
+    if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+      return Bailout(kPossibleDirectCallToEval);
+    }
+
+    // The function is on the stack in the unoptimized code during
+    // evaluation of the arguments.
+    CHECK_ALIVE(VisitForValue(expr->expression()));
+    HValue* function = Top();
+    if (function->IsConstant() &&
+        HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+      Handle<Object> constant = HConstant::cast(function)->handle(isolate());
+      Handle<JSFunction> target = Handle<JSFunction>::cast(constant);
+      expr->SetKnownGlobalTarget(target);
+    }
+
+    // Placeholder for the receiver.
+    Push(graph()->GetConstantUndefined());
+    CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+    if (expr->IsMonomorphic() &&
+        !IsClassConstructor(expr->target()->shared()->kind())) {
+      Add<HCheckValue>(function, expr->target());
+
+      // Patch the global object on the stack by the expected receiver.
+      HValue* receiver = ImplicitReceiverFor(function, expr->target());
+      const int receiver_index = argument_count - 1;
+      environment()->SetExpressionStackAt(receiver_index, receiver);
+
+      if (TryInlineBuiltinFunctionCall(expr)) {
+        if (FLAG_trace_inlining) {
+          PrintF("Inlining builtin ");
+          expr->target()->ShortPrint();
+          PrintF("\n");
+        }
+        return;
+      }
+      if (TryInlineApiFunctionCall(expr, receiver)) return;
+      if (TryHandleArrayCall(expr, function)) return;
+      if (TryInlineCall(expr)) return;
+
+      PushArgumentsFromEnvironment(argument_count);
+      call = BuildCallConstantFunction(expr->target(), argument_count);
+    } else {
+      PushArgumentsFromEnvironment(argument_count);
+      HCallFunction* call_function = New<HCallFunction>(
+          function, argument_count, ConvertReceiverMode::kNullOrUndefined);
+      call = call_function;
+      if (expr->is_uninitialized() &&
+          expr->IsUsingCallFeedbackICSlot(isolate())) {
+        // We've never seen this call before, so let's have Crankshaft learn
+        // through the type vector.
+        Handle<TypeFeedbackVector> vector =
+            handle(current_feedback_vector(), isolate());
+        FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
+        call_function->SetVectorAndSlot(vector, slot);
+      }
+    }
+  }
+
+  Drop(1);  // Drop the function.
+  return ast_context()->ReturnInstruction(call, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::BuildInlinedCallArray(
+    Expression* expression,
+    int argument_count,
+    Handle<AllocationSite> site) {
+  DCHECK(!site.is_null());
+  DCHECK(argument_count >= 0 && argument_count <= 1);
+  NoObservableSideEffectsScope no_effects(this);
+
+  // We should at least have the constructor on the expression stack.
+  HValue* constructor = environment()->ExpressionStackAt(argument_count);
+
+  // Register on the site for deoptimization if the transition feedback changes.
+  top_info()->dependencies()->AssumeTransitionStable(site);
+  ElementsKind kind = site->GetElementsKind();
+  HInstruction* site_instruction = Add<HConstant>(site);
+
+  // In the single constant argument case, we may have to adjust elements kind
+  // to avoid creating a packed non-empty array.
+  if (argument_count == 1 && !IsHoleyElementsKind(kind)) {
+    HValue* argument = environment()->Top();
+    if (argument->IsConstant()) {
+      HConstant* constant_argument = HConstant::cast(argument);
+      DCHECK(constant_argument->HasSmiValue());
+      int constant_array_size = constant_argument->Integer32Value();
+      if (constant_array_size != 0) {
+        kind = GetHoleyElementsKind(kind);
+      }
+    }
+  }
+
+  // Build the array.
+  JSArrayBuilder array_builder(this,
+                               kind,
+                               site_instruction,
+                               constructor,
+                               DISABLE_ALLOCATION_SITES);
+  HValue* new_object = argument_count == 0
+      ? array_builder.AllocateEmptyArray()
+      : BuildAllocateArrayFromLength(&array_builder, Top());
+
+  int args_to_drop = argument_count + (expression->IsCall() ? 2 : 1);
+  Drop(args_to_drop);
+  ast_context()->ReturnValue(new_object);
+}
+
+
+// Checks whether allocation using the given constructor can be inlined.
+static bool IsAllocationInlineable(Handle<JSFunction> constructor) {
+  return constructor->has_initial_map() &&
+         !IsClassConstructor(constructor->shared()->kind()) &&
+         constructor->initial_map()->instance_type() == JS_OBJECT_TYPE &&
+         constructor->initial_map()->instance_size() <
+             HAllocate::kMaxInlineSize;
+}
+
+
+bool HOptimizedGraphBuilder::IsCallArrayInlineable(
+    int argument_count,
+    Handle<AllocationSite> site) {
+  Handle<JSFunction> caller = current_info()->closure();
+  Handle<JSFunction> target = array_function();
+  // We should have the function plus array arguments on the environment stack.
+  DCHECK(environment()->length() >= (argument_count + 1));
+  DCHECK(!site.is_null());
+
+  bool inline_ok = false;
+  if (site->CanInlineCall()) {
+    // We also want to avoid inlining in certain 1 argument scenarios.
+    if (argument_count == 1) {
+      HValue* argument = Top();
+      if (argument->IsConstant()) {
+        // Do not inline if the constant length argument is not a smi or
+        // outside the valid range for unrolled loop initialization.
+        HConstant* constant_argument = HConstant::cast(argument);
+        if (constant_argument->HasSmiValue()) {
+          int value = constant_argument->Integer32Value();
+          inline_ok = value >= 0 && value <= kElementLoopUnrollThreshold;
+          if (!inline_ok) {
+            TraceInline(target, caller,
+                        "Constant length outside of valid inlining range.");
+          }
+        }
+      } else {
+        TraceInline(target, caller,
+                    "Dont inline [new] Array(n) where n isn't constant.");
+      }
+    } else if (argument_count == 0) {
+      inline_ok = true;
+    } else {
+      TraceInline(target, caller, "Too many arguments to inline.");
+    }
+  } else {
+    TraceInline(target, caller, "AllocationSite requested no inlining.");
+  }
+
+  if (inline_ok) {
+    TraceInline(target, caller, NULL);
+  }
+  return inline_ok;
+}
+
+
+void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
+  Factory* factory = isolate()->factory();
+
+  // The constructor function is on the stack in the unoptimized code
+  // during evaluation of the arguments.
+  CHECK_ALIVE(VisitForValue(expr->expression()));
+  HValue* function = Top();
+  CHECK_ALIVE(VisitExpressions(expr->arguments()));
+
+  if (function->IsConstant() &&
+      HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+    Handle<Object> constant = HConstant::cast(function)->handle(isolate());
+    expr->SetKnownGlobalTarget(Handle<JSFunction>::cast(constant));
+  }
+
+  if (FLAG_inline_construct &&
+      expr->IsMonomorphic() &&
+      IsAllocationInlineable(expr->target())) {
+    Handle<JSFunction> constructor = expr->target();
+    DCHECK(
+        constructor->shared()->construct_stub() ==
+            isolate()->builtins()->builtin(Builtins::kJSConstructStubGeneric) ||
+        constructor->shared()->construct_stub() ==
+            isolate()->builtins()->builtin(Builtins::kJSConstructStubApi));
+    HValue* check = Add<HCheckValue>(function, constructor);
+
+    // Force completion of inobject slack tracking before generating
+    // allocation code to finalize instance size.
+    constructor->CompleteInobjectSlackTrackingIfActive();
+
+    // Calculate instance size from initial map of constructor.
+    DCHECK(constructor->has_initial_map());
+    Handle<Map> initial_map(constructor->initial_map());
+    int instance_size = initial_map->instance_size();
+
+    // Allocate an instance of the implicit receiver object.
+    HValue* size_in_bytes = Add<HConstant>(instance_size);
+    HAllocationMode allocation_mode;
+    HAllocate* receiver = BuildAllocate(
+        size_in_bytes, HType::JSObject(), JS_OBJECT_TYPE, allocation_mode);
+    receiver->set_known_initial_map(initial_map);
+
+    // Initialize map and fields of the newly allocated object.
+    { NoObservableSideEffectsScope no_effects(this);
+      DCHECK(initial_map->instance_type() == JS_OBJECT_TYPE);
+      Add<HStoreNamedField>(receiver,
+          HObjectAccess::ForMapAndOffset(initial_map, JSObject::kMapOffset),
+          Add<HConstant>(initial_map));
+      HValue* empty_fixed_array = Add<HConstant>(factory->empty_fixed_array());
+      Add<HStoreNamedField>(receiver,
+          HObjectAccess::ForMapAndOffset(initial_map,
+                                         JSObject::kPropertiesOffset),
+          empty_fixed_array);
+      Add<HStoreNamedField>(receiver,
+          HObjectAccess::ForMapAndOffset(initial_map,
+                                         JSObject::kElementsOffset),
+          empty_fixed_array);
+      BuildInitializeInobjectProperties(receiver, initial_map);
+    }
+
+    // Replace the constructor function with a newly allocated receiver using
+    // the index of the receiver from the top of the expression stack.
+    const int receiver_index = argument_count - 1;
+    DCHECK(environment()->ExpressionStackAt(receiver_index) == function);
+    environment()->SetExpressionStackAt(receiver_index, receiver);
+
+    if (TryInlineConstruct(expr, receiver)) {
+      // Inlining worked, add a dependency on the initial map to make sure that
+      // this code is deoptimized whenever the initial map of the constructor
+      // changes.
+      top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
+      return;
+    }
+
+    // TODO(mstarzinger): For now we remove the previous HAllocate and all
+    // corresponding instructions and instead add HPushArguments for the
+    // arguments in case inlining failed.  What we actually should do is for
+    // inlining to try to build a subgraph without mutating the parent graph.
+    HInstruction* instr = current_block()->last();
+    do {
+      HInstruction* prev_instr = instr->previous();
+      instr->DeleteAndReplaceWith(NULL);
+      instr = prev_instr;
+    } while (instr != check);
+    environment()->SetExpressionStackAt(receiver_index, function);
+  } else {
+    // The constructor function is both an operand to the instruction and an
+    // argument to the construct call.
+    if (TryHandleArrayCallNew(expr, function)) return;
+  }
+
+  HValue* arity = Add<HConstant>(argument_count - 1);
+  HValue* op_vals[] = {context(), function, function, arity};
+  Callable callable = CodeFactory::Construct(isolate());
+  HConstant* stub = Add<HConstant>(callable.code());
+  PushArgumentsFromEnvironment(argument_count);
+  HInstruction* construct =
+      New<HCallWithDescriptor>(stub, argument_count, callable.descriptor(),
+                               Vector<HValue*>(op_vals, arraysize(op_vals)));
+  return ast_context()->ReturnInstruction(construct, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::BuildInitializeInobjectProperties(
+    HValue* receiver, Handle<Map> initial_map) {
+  if (initial_map->GetInObjectProperties() != 0) {
+    HConstant* undefined = graph()->GetConstantUndefined();
+    for (int i = 0; i < initial_map->GetInObjectProperties(); i++) {
+      int property_offset = initial_map->GetInObjectPropertyOffset(i);
+      Add<HStoreNamedField>(receiver, HObjectAccess::ForMapAndOffset(
+                                          initial_map, property_offset),
+                            undefined);
+    }
+  }
+}
+
+
+HValue* HGraphBuilder::BuildAllocateEmptyArrayBuffer(HValue* byte_length) {
+  // We HForceRepresentation here to avoid allocations during an *-to-tagged
+  // HChange that could cause GC while the array buffer object is not fully
+  // initialized.
+  HObjectAccess byte_length_access(HObjectAccess::ForJSArrayBufferByteLength());
+  byte_length = AddUncasted<HForceRepresentation>(
+      byte_length, byte_length_access.representation());
+  HAllocate* result =
+      BuildAllocate(Add<HConstant>(JSArrayBuffer::kSizeWithInternalFields),
+                    HType::JSObject(), JS_ARRAY_BUFFER_TYPE, HAllocationMode());
+
+  HValue* native_context = BuildGetNativeContext();
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForMap(),
+      Add<HLoadNamedField>(
+          native_context, nullptr,
+          HObjectAccess::ForContextSlot(Context::ARRAY_BUFFER_MAP_INDEX)));
+
+  HConstant* empty_fixed_array =
+      Add<HConstant>(isolate()->factory()->empty_fixed_array());
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSArray::kPropertiesOffset),
+      empty_fixed_array);
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
+      empty_fixed_array);
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayBufferBackingStore().WithRepresentation(
+                  Representation::Smi()),
+      graph()->GetConstant0());
+  Add<HStoreNamedField>(result, byte_length_access, byte_length);
+  Add<HStoreNamedField>(result, HObjectAccess::ForJSArrayBufferBitFieldSlot(),
+                        graph()->GetConstant0());
+  Add<HStoreNamedField>(
+      result, HObjectAccess::ForJSArrayBufferBitField(),
+      Add<HConstant>((1 << JSArrayBuffer::IsExternal::kShift) |
+                     (1 << JSArrayBuffer::IsNeuterable::kShift)));
+
+  for (int field = 0; field < v8::ArrayBuffer::kInternalFieldCount; ++field) {
+    Add<HStoreNamedField>(
+        result,
+        HObjectAccess::ForObservableJSObjectOffset(
+            JSArrayBuffer::kSize + field * kPointerSize, Representation::Smi()),
+        graph()->GetConstant0());
+  }
+
+  return result;
+}
+
+
+template <class ViewClass>
+void HGraphBuilder::BuildArrayBufferViewInitialization(
+    HValue* obj,
+    HValue* buffer,
+    HValue* byte_offset,
+    HValue* byte_length) {
+
+  for (int offset = ViewClass::kSize;
+       offset < ViewClass::kSizeWithInternalFields;
+       offset += kPointerSize) {
+    Add<HStoreNamedField>(obj,
+        HObjectAccess::ForObservableJSObjectOffset(offset),
+        graph()->GetConstant0());
+  }
+
+  Add<HStoreNamedField>(
+      obj,
+      HObjectAccess::ForJSArrayBufferViewByteOffset(),
+      byte_offset);
+  Add<HStoreNamedField>(
+      obj,
+      HObjectAccess::ForJSArrayBufferViewByteLength(),
+      byte_length);
+  Add<HStoreNamedField>(obj, HObjectAccess::ForJSArrayBufferViewBuffer(),
+                        buffer);
+}
+
+
+void HOptimizedGraphBuilder::GenerateDataViewInitialize(
+    CallRuntime* expr) {
+  ZoneList<Expression*>* arguments = expr->arguments();
+
+  DCHECK(arguments->length()== 4);
+  CHECK_ALIVE(VisitForValue(arguments->at(0)));
+  HValue* obj = Pop();
+
+  CHECK_ALIVE(VisitForValue(arguments->at(1)));
+  HValue* buffer = Pop();
+
+  CHECK_ALIVE(VisitForValue(arguments->at(2)));
+  HValue* byte_offset = Pop();
+
+  CHECK_ALIVE(VisitForValue(arguments->at(3)));
+  HValue* byte_length = Pop();
+
+  {
+    NoObservableSideEffectsScope scope(this);
+    BuildArrayBufferViewInitialization<JSDataView>(
+        obj, buffer, byte_offset, byte_length);
+  }
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
+    ExternalArrayType array_type,
+    bool is_zero_byte_offset,
+    HValue* buffer, HValue* byte_offset, HValue* length) {
+  Handle<Map> external_array_map(
+      isolate()->heap()->MapForFixedTypedArray(array_type));
+
+  // The HForceRepresentation is to prevent possible deopt on int-smi
+  // conversion after allocation but before the new object fields are set.
+  length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
+  HValue* elements = Add<HAllocate>(
+      Add<HConstant>(FixedTypedArrayBase::kHeaderSize), HType::HeapObject(),
+      NOT_TENURED, external_array_map->instance_type());
+
+  AddStoreMapConstant(elements, external_array_map);
+  Add<HStoreNamedField>(elements,
+      HObjectAccess::ForFixedArrayLength(), length);
+
+  HValue* backing_store = Add<HLoadNamedField>(
+      buffer, nullptr, HObjectAccess::ForJSArrayBufferBackingStore());
+
+  HValue* typed_array_start;
+  if (is_zero_byte_offset) {
+    typed_array_start = backing_store;
+  } else {
+    HInstruction* external_pointer =
+        AddUncasted<HAdd>(backing_store, byte_offset);
+    // Arguments are checked prior to call to TypedArrayInitialize,
+    // including byte_offset.
+    external_pointer->ClearFlag(HValue::kCanOverflow);
+    typed_array_start = external_pointer;
+  }
+
+  Add<HStoreNamedField>(elements,
+                        HObjectAccess::ForFixedTypedArrayBaseBasePointer(),
+                        graph()->GetConstant0());
+  Add<HStoreNamedField>(elements,
+                        HObjectAccess::ForFixedTypedArrayBaseExternalPointer(),
+                        typed_array_start);
+
+  return elements;
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildAllocateFixedTypedArray(
+    ExternalArrayType array_type, size_t element_size,
+    ElementsKind fixed_elements_kind, HValue* byte_length, HValue* length,
+    bool initialize) {
+  STATIC_ASSERT(
+      (FixedTypedArrayBase::kHeaderSize & kObjectAlignmentMask) == 0);
+  HValue* total_size;
+
+  // if fixed array's elements are not aligned to object's alignment,
+  // we need to align the whole array to object alignment.
+  if (element_size % kObjectAlignment != 0) {
+    total_size = BuildObjectSizeAlignment(
+        byte_length, FixedTypedArrayBase::kHeaderSize);
+  } else {
+    total_size = AddUncasted<HAdd>(byte_length,
+        Add<HConstant>(FixedTypedArrayBase::kHeaderSize));
+    total_size->ClearFlag(HValue::kCanOverflow);
+  }
+
+  // The HForceRepresentation is to prevent possible deopt on int-smi
+  // conversion after allocation but before the new object fields are set.
+  length = AddUncasted<HForceRepresentation>(length, Representation::Smi());
+  Handle<Map> fixed_typed_array_map(
+      isolate()->heap()->MapForFixedTypedArray(array_type));
+  HAllocate* elements =
+      Add<HAllocate>(total_size, HType::HeapObject(), NOT_TENURED,
+                     fixed_typed_array_map->instance_type());
+
+#ifndef V8_HOST_ARCH_64_BIT
+  if (array_type == kExternalFloat64Array) {
+    elements->MakeDoubleAligned();
+  }
+#endif
+
+  AddStoreMapConstant(elements, fixed_typed_array_map);
+
+  Add<HStoreNamedField>(elements,
+      HObjectAccess::ForFixedArrayLength(),
+      length);
+  Add<HStoreNamedField>(
+      elements, HObjectAccess::ForFixedTypedArrayBaseBasePointer(), elements);
+
+  Add<HStoreNamedField>(
+      elements, HObjectAccess::ForFixedTypedArrayBaseExternalPointer(),
+      Add<HConstant>(ExternalReference::fixed_typed_array_base_data_offset()));
+
+  HValue* filler = Add<HConstant>(static_cast<int32_t>(0));
+
+  if (initialize) {
+    LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
+
+    HValue* backing_store = AddUncasted<HAdd>(
+        Add<HConstant>(ExternalReference::fixed_typed_array_base_data_offset()),
+        elements, Strength::WEAK, AddOfExternalAndTagged);
+
+    HValue* key = builder.BeginBody(
+        Add<HConstant>(static_cast<int32_t>(0)),
+        length, Token::LT);
+    Add<HStoreKeyed>(backing_store, key, filler, elements, fixed_elements_kind);
+
+    builder.EndBody();
+  }
+  return elements;
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayInitialize(
+    CallRuntime* expr) {
+  ZoneList<Expression*>* arguments = expr->arguments();
+
+  static const int kObjectArg = 0;
+  static const int kArrayIdArg = 1;
+  static const int kBufferArg = 2;
+  static const int kByteOffsetArg = 3;
+  static const int kByteLengthArg = 4;
+  static const int kInitializeArg = 5;
+  static const int kArgsLength = 6;
+  DCHECK(arguments->length() == kArgsLength);
+
+
+  CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg)));
+  HValue* obj = Pop();
+
+  if (!arguments->at(kArrayIdArg)->IsLiteral()) {
+    // This should never happen in real use, but can happen when fuzzing.
+    // Just bail out.
+    Bailout(kNeedSmiLiteral);
+    return;
+  }
+  Handle<Object> value =
+      static_cast<Literal*>(arguments->at(kArrayIdArg))->value();
+  if (!value->IsSmi()) {
+    // This should never happen in real use, but can happen when fuzzing.
+    // Just bail out.
+    Bailout(kNeedSmiLiteral);
+    return;
+  }
+  int array_id = Smi::cast(*value)->value();
+
+  HValue* buffer;
+  if (!arguments->at(kBufferArg)->IsNullLiteral()) {
+    CHECK_ALIVE(VisitForValue(arguments->at(kBufferArg)));
+    buffer = Pop();
+  } else {
+    buffer = NULL;
+  }
+
+  HValue* byte_offset;
+  bool is_zero_byte_offset;
+
+  if (arguments->at(kByteOffsetArg)->IsLiteral()
+      && Smi::FromInt(0) ==
+      *static_cast<Literal*>(arguments->at(kByteOffsetArg))->value()) {
+    byte_offset = Add<HConstant>(static_cast<int32_t>(0));
+    is_zero_byte_offset = true;
+  } else {
+    CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
+    byte_offset = Pop();
+    is_zero_byte_offset = false;
+    DCHECK(buffer != NULL);
+  }
+
+  CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
+  HValue* byte_length = Pop();
+
+  CHECK(arguments->at(kInitializeArg)->IsLiteral());
+  bool initialize = static_cast<Literal*>(arguments->at(kInitializeArg))
+                        ->value()
+                        ->BooleanValue();
+
+  NoObservableSideEffectsScope scope(this);
+  IfBuilder byte_offset_smi(this);
+
+  if (!is_zero_byte_offset) {
+    byte_offset_smi.If<HIsSmiAndBranch>(byte_offset);
+    byte_offset_smi.Then();
+  }
+
+  ExternalArrayType array_type =
+      kExternalInt8Array;  // Bogus initialization.
+  size_t element_size = 1;  // Bogus initialization.
+  ElementsKind fixed_elements_kind =  // Bogus initialization.
+      INT8_ELEMENTS;
+  Runtime::ArrayIdToTypeAndSize(array_id,
+      &array_type,
+      &fixed_elements_kind,
+      &element_size);
+
+
+  { //  byte_offset is Smi.
+    HValue* allocated_buffer = buffer;
+    if (buffer == NULL) {
+      allocated_buffer = BuildAllocateEmptyArrayBuffer(byte_length);
+    }
+    BuildArrayBufferViewInitialization<JSTypedArray>(obj, allocated_buffer,
+                                                     byte_offset, byte_length);
+
+
+    HInstruction* length = AddUncasted<HDiv>(byte_length,
+        Add<HConstant>(static_cast<int32_t>(element_size)));
+
+    Add<HStoreNamedField>(obj,
+        HObjectAccess::ForJSTypedArrayLength(),
+        length);
+
+    HValue* elements;
+    if (buffer != NULL) {
+      elements = BuildAllocateExternalElements(
+          array_type, is_zero_byte_offset, buffer, byte_offset, length);
+    } else {
+      DCHECK(is_zero_byte_offset);
+      elements = BuildAllocateFixedTypedArray(array_type, element_size,
+                                              fixed_elements_kind, byte_length,
+                                              length, initialize);
+    }
+    Add<HStoreNamedField>(
+        obj, HObjectAccess::ForElementsPointer(), elements);
+  }
+
+  if (!is_zero_byte_offset) {
+    byte_offset_smi.Else();
+    { //  byte_offset is not Smi.
+      Push(obj);
+      CHECK_ALIVE(VisitForValue(arguments->at(kArrayIdArg)));
+      Push(buffer);
+      Push(byte_offset);
+      Push(byte_length);
+      CHECK_ALIVE(VisitForValue(arguments->at(kInitializeArg)));
+      PushArgumentsFromEnvironment(kArgsLength);
+      Add<HCallRuntime>(expr->function(), kArgsLength);
+    }
+  }
+  byte_offset_smi.End();
+}
+
+
+void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
+  return ast_context()->ReturnInstruction(max_smi, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap(
+    CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  HConstant* result = New<HConstant>(static_cast<int32_t>(
+        FLAG_typed_array_max_size_in_heap));
+  return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateArrayBufferGetByteLength(
+    CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+  HValue* buffer = Pop();
+  HInstruction* result = New<HLoadNamedField>(
+      buffer, nullptr, HObjectAccess::ForJSArrayBufferByteLength());
+  return ast_context()->ReturnInstruction(result, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteLength(
+    CallRuntime* expr) {
+  NoObservableSideEffectsScope scope(this);
+  DCHECK(expr->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+  HValue* view = Pop();
+
+  return ast_context()->ReturnValue(BuildArrayBufferViewFieldAccessor(
+      view, nullptr,
+      FieldIndex::ForInObjectOffset(JSArrayBufferView::kByteLengthOffset)));
+}
+
+
+void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset(
+    CallRuntime* expr) {
+  NoObservableSideEffectsScope scope(this);
+  DCHECK(expr->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+  HValue* view = Pop();
+
+  return ast_context()->ReturnValue(BuildArrayBufferViewFieldAccessor(
+      view, nullptr,
+      FieldIndex::ForInObjectOffset(JSArrayBufferView::kByteOffsetOffset)));
+}
+
+
+void HOptimizedGraphBuilder::GenerateTypedArrayGetLength(
+    CallRuntime* expr) {
+  NoObservableSideEffectsScope scope(this);
+  DCHECK(expr->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
+  HValue* view = Pop();
+
+  return ast_context()->ReturnValue(BuildArrayBufferViewFieldAccessor(
+      view, nullptr,
+      FieldIndex::ForInObjectOffset(JSTypedArray::kLengthOffset)));
+}
+
+
+void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (expr->is_jsruntime()) {
+    return Bailout(kCallToAJavaScriptRuntimeFunction);
+  }
+
+  const Runtime::Function* function = expr->function();
+  DCHECK(function != NULL);
+  switch (function->function_id) {
+#define CALL_INTRINSIC_GENERATOR(Name) \
+  case Runtime::kInline##Name:         \
+    return Generate##Name(expr);
+
+    FOR_EACH_HYDROGEN_INTRINSIC(CALL_INTRINSIC_GENERATOR)
+#undef CALL_INTRINSIC_GENERATOR
+    default: {
+      int argument_count = expr->arguments()->length();
+      CHECK_ALIVE(VisitExpressions(expr->arguments()));
+      PushArgumentsFromEnvironment(argument_count);
+      HCallRuntime* call = New<HCallRuntime>(function, argument_count);
+      return ast_context()->ReturnInstruction(call, expr->id());
+    }
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  switch (expr->op()) {
+    case Token::DELETE: return VisitDelete(expr);
+    case Token::VOID: return VisitVoid(expr);
+    case Token::TYPEOF: return VisitTypeof(expr);
+    case Token::NOT: return VisitNot(expr);
+    default: UNREACHABLE();
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitDelete(UnaryOperation* expr) {
+  Property* prop = expr->expression()->AsProperty();
+  VariableProxy* proxy = expr->expression()->AsVariableProxy();
+  if (prop != NULL) {
+    CHECK_ALIVE(VisitForValue(prop->obj()));
+    CHECK_ALIVE(VisitForValue(prop->key()));
+    HValue* key = Pop();
+    HValue* obj = Pop();
+    Add<HPushArguments>(obj, key);
+    HInstruction* instr = New<HCallRuntime>(
+        Runtime::FunctionForId(is_strict(function_language_mode())
+                                   ? Runtime::kDeleteProperty_Strict
+                                   : Runtime::kDeleteProperty_Sloppy),
+        2);
+    return ast_context()->ReturnInstruction(instr, expr->id());
+  } else if (proxy != NULL) {
+    Variable* var = proxy->var();
+    if (var->IsUnallocatedOrGlobalSlot()) {
+      Bailout(kDeleteWithGlobalVariable);
+    } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+      // Result of deleting non-global variables is false.  'this' is not really
+      // a variable, though we implement it as one.  The subexpression does not
+      // have side effects.
+      HValue* value = var->HasThisName(isolate()) ? graph()->GetConstantTrue()
+                                                  : graph()->GetConstantFalse();
+      return ast_context()->ReturnValue(value);
+    } else {
+      Bailout(kDeleteWithNonGlobalVariable);
+    }
+  } else {
+    // Result of deleting non-property, non-variable reference is true.
+    // Evaluate the subexpression for side effects.
+    CHECK_ALIVE(VisitForEffect(expr->expression()));
+    return ast_context()->ReturnValue(graph()->GetConstantTrue());
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitVoid(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForEffect(expr->expression()));
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HOptimizedGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  CHECK_ALIVE(VisitForTypeOf(expr->expression()));
+  HValue* value = Pop();
+  HInstruction* instr = New<HTypeof>(value);
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::VisitNot(UnaryOperation* expr) {
+  if (ast_context()->IsTest()) {
+    TestContext* context = TestContext::cast(ast_context());
+    VisitForControl(expr->expression(),
+                    context->if_false(),
+                    context->if_true());
+    return;
+  }
+
+  if (ast_context()->IsEffect()) {
+    VisitForEffect(expr->expression());
+    return;
+  }
+
+  DCHECK(ast_context()->IsValue());
+  HBasicBlock* materialize_false = graph()->CreateBasicBlock();
+  HBasicBlock* materialize_true = graph()->CreateBasicBlock();
+  CHECK_BAILOUT(VisitForControl(expr->expression(),
+                                materialize_false,
+                                materialize_true));
+
+  if (materialize_false->HasPredecessor()) {
+    materialize_false->SetJoinId(expr->MaterializeFalseId());
+    set_current_block(materialize_false);
+    Push(graph()->GetConstantFalse());
+  } else {
+    materialize_false = NULL;
+  }
+
+  if (materialize_true->HasPredecessor()) {
+    materialize_true->SetJoinId(expr->MaterializeTrueId());
+    set_current_block(materialize_true);
+    Push(graph()->GetConstantTrue());
+  } else {
+    materialize_true = NULL;
+  }
+
+  HBasicBlock* join =
+    CreateJoin(materialize_false, materialize_true, expr->id());
+  set_current_block(join);
+  if (join != NULL) return ast_context()->ReturnValue(Pop());
+}
+
+
+static Representation RepresentationFor(Type* type) {
+  DisallowHeapAllocation no_allocation;
+  if (type->Is(Type::None())) return Representation::None();
+  if (type->Is(Type::SignedSmall())) return Representation::Smi();
+  if (type->Is(Type::Signed32())) return Representation::Integer32();
+  if (type->Is(Type::Number())) return Representation::Double();
+  return Representation::Tagged();
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildIncrement(
+    bool returns_original_input,
+    CountOperation* expr) {
+  // The input to the count operation is on top of the expression stack.
+  Representation rep = RepresentationFor(expr->type());
+  if (rep.IsNone() || rep.IsTagged()) {
+    rep = Representation::Smi();
+  }
+
+  if (returns_original_input && !is_strong(function_language_mode())) {
+    // We need an explicit HValue representing ToNumber(input).  The
+    // actual HChange instruction we need is (sometimes) added in a later
+    // phase, so it is not available now to be used as an input to HAdd and
+    // as the return value.
+    HInstruction* number_input = AddUncasted<HForceRepresentation>(Pop(), rep);
+    if (!rep.IsDouble()) {
+      number_input->SetFlag(HInstruction::kFlexibleRepresentation);
+      number_input->SetFlag(HInstruction::kCannotBeTagged);
+    }
+    Push(number_input);
+  }
+
+  // The addition has no side effects, so we do not need
+  // to simulate the expression stack after this instruction.
+  // Any later failures deopt to the load of the input or earlier.
+  HConstant* delta = (expr->op() == Token::INC)
+      ? graph()->GetConstant1()
+      : graph()->GetConstantMinus1();
+  HInstruction* instr =
+      AddUncasted<HAdd>(Top(), delta, strength(function_language_mode()));
+  if (instr->IsAdd()) {
+    HAdd* add = HAdd::cast(instr);
+    add->set_observed_input_representation(1, rep);
+    add->set_observed_input_representation(2, Representation::Smi());
+  }
+  if (!is_strong(function_language_mode())) {
+    instr->ClearAllSideEffects();
+  } else {
+    Add<HSimulate>(expr->ToNumberId(), REMOVABLE_SIMULATE);
+  }
+  instr->SetFlag(HInstruction::kCannotBeTagged);
+  return instr;
+}
+
+
+void HOptimizedGraphBuilder::BuildStoreForEffect(
+    Expression* expr, Property* prop, FeedbackVectorSlot slot, BailoutId ast_id,
+    BailoutId return_id, HValue* object, HValue* key, HValue* value) {
+  EffectContext for_effect(this);
+  Push(object);
+  if (key != NULL) Push(key);
+  Push(value);
+  BuildStore(expr, prop, slot, ast_id, return_id);
+}
+
+
+void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  Expression* target = expr->expression();
+  VariableProxy* proxy = target->AsVariableProxy();
+  Property* prop = target->AsProperty();
+  if (proxy == NULL && prop == NULL) {
+    return Bailout(kInvalidLhsInCountOperation);
+  }
+
+  // Match the full code generator stack by simulating an extra stack
+  // element for postfix operations in a non-effect context.  The return
+  // value is ToNumber(input).
+  bool returns_original_input =
+      expr->is_postfix() && !ast_context()->IsEffect();
+  HValue* input = NULL;  // ToNumber(original_input).
+  HValue* after = NULL;  // The result after incrementing or decrementing.
+
+  if (proxy != NULL) {
+    Variable* var = proxy->var();
+    if (var->mode() == CONST_LEGACY)  {
+      return Bailout(kUnsupportedCountOperationWithConst);
+    }
+    if (var->mode() == CONST) {
+      return Bailout(kNonInitializerAssignmentToConst);
+    }
+    // Argument of the count operation is a variable, not a property.
+    DCHECK(prop == NULL);
+    CHECK_ALIVE(VisitForValue(target));
+
+    after = BuildIncrement(returns_original_input, expr);
+    input = returns_original_input ? Top() : Pop();
+    Push(after);
+
+    switch (var->location()) {
+      case VariableLocation::GLOBAL:
+      case VariableLocation::UNALLOCATED:
+        HandleGlobalVariableAssignment(var, after, expr->CountSlot(),
+                                       expr->AssignmentId());
+        break;
+
+      case VariableLocation::PARAMETER:
+      case VariableLocation::LOCAL:
+        BindIfLive(var, after);
+        break;
+
+      case VariableLocation::CONTEXT: {
+        // Bail out if we try to mutate a parameter value in a function
+        // using the arguments object.  We do not (yet) correctly handle the
+        // arguments property of the function.
+        if (current_info()->scope()->arguments() != NULL) {
+          // Parameters will rewrite to context slots.  We have no direct
+          // way to detect that the variable is a parameter so we use a
+          // linear search of the parameter list.
+          int count = current_info()->scope()->num_parameters();
+          for (int i = 0; i < count; ++i) {
+            if (var == current_info()->scope()->parameter(i)) {
+              return Bailout(kAssignmentToParameterInArgumentsObject);
+            }
+          }
+        }
+
+        HValue* context = BuildContextChainWalk(var);
+        HStoreContextSlot::Mode mode = IsLexicalVariableMode(var->mode())
+            ? HStoreContextSlot::kCheckDeoptimize : HStoreContextSlot::kNoCheck;
+        HStoreContextSlot* instr = Add<HStoreContextSlot>(context, var->index(),
+                                                          mode, after);
+        if (instr->HasObservableSideEffects()) {
+          Add<HSimulate>(expr->AssignmentId(), REMOVABLE_SIMULATE);
+        }
+        break;
+      }
+
+      case VariableLocation::LOOKUP:
+        return Bailout(kLookupVariableInCountOperation);
+    }
+
+    Drop(returns_original_input ? 2 : 1);
+    return ast_context()->ReturnValue(expr->is_postfix() ? input : after);
+  }
+
+  // Argument of the count operation is a property.
+  DCHECK(prop != NULL);
+  if (returns_original_input) Push(graph()->GetConstantUndefined());
+
+  CHECK_ALIVE(VisitForValue(prop->obj()));
+  HValue* object = Top();
+
+  HValue* key = NULL;
+  if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) {
+    CHECK_ALIVE(VisitForValue(prop->key()));
+    key = Top();
+  }
+
+  CHECK_ALIVE(PushLoad(prop, object, key));
+
+  after = BuildIncrement(returns_original_input, expr);
+
+  if (returns_original_input) {
+    input = Pop();
+    // Drop object and key to push it again in the effect context below.
+    Drop(key == NULL ? 1 : 2);
+    environment()->SetExpressionStackAt(0, input);
+    CHECK_ALIVE(BuildStoreForEffect(expr, prop, expr->CountSlot(), expr->id(),
+                                    expr->AssignmentId(), object, key, after));
+    return ast_context()->ReturnValue(Pop());
+  }
+
+  environment()->SetExpressionStackAt(0, after);
+  return BuildStore(expr, prop, expr->CountSlot(), expr->id(),
+                    expr->AssignmentId());
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildStringCharCodeAt(
+    HValue* string,
+    HValue* index) {
+  if (string->IsConstant() && index->IsConstant()) {
+    HConstant* c_string = HConstant::cast(string);
+    HConstant* c_index = HConstant::cast(index);
+    if (c_string->HasStringValue() && c_index->HasNumberValue()) {
+      int32_t i = c_index->NumberValueAsInteger32();
+      Handle<String> s = c_string->StringValue();
+      if (i < 0 || i >= s->length()) {
+        return New<HConstant>(std::numeric_limits<double>::quiet_NaN());
+      }
+      return New<HConstant>(s->Get(i));
+    }
+  }
+  string = BuildCheckString(string);
+  index = Add<HBoundsCheck>(index, AddLoadStringLength(string));
+  return New<HStringCharCodeAt>(string, index);
+}
+
+
+// Checks if the given shift amounts have following forms:
+// (N1) and (N2) with N1 + N2 = 32; (sa) and (32 - sa).
+static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
+                                             HValue* const32_minus_sa) {
+  if (sa->IsConstant() && const32_minus_sa->IsConstant()) {
+    const HConstant* c1 = HConstant::cast(sa);
+    const HConstant* c2 = HConstant::cast(const32_minus_sa);
+    return c1->HasInteger32Value() && c2->HasInteger32Value() &&
+        (c1->Integer32Value() + c2->Integer32Value() == 32);
+  }
+  if (!const32_minus_sa->IsSub()) return false;
+  HSub* sub = HSub::cast(const32_minus_sa);
+  return sub->left()->EqualsInteger32Constant(32) && sub->right() == sa;
+}
+
+
+// Checks if the left and the right are shift instructions with the oposite
+// directions that can be replaced by one rotate right instruction or not.
+// Returns the operand and the shift amount for the rotate instruction in the
+// former case.
+bool HGraphBuilder::MatchRotateRight(HValue* left,
+                                     HValue* right,
+                                     HValue** operand,
+                                     HValue** shift_amount) {
+  HShl* shl;
+  HShr* shr;
+  if (left->IsShl() && right->IsShr()) {
+    shl = HShl::cast(left);
+    shr = HShr::cast(right);
+  } else if (left->IsShr() && right->IsShl()) {
+    shl = HShl::cast(right);
+    shr = HShr::cast(left);
+  } else {
+    return false;
+  }
+  if (shl->left() != shr->left()) return false;
+
+  if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
+      !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
+    return false;
+  }
+  *operand = shr->left();
+  *shift_amount = shr->right();
+  return true;
+}
+
+
+bool CanBeZero(HValue* right) {
+  if (right->IsConstant()) {
+    HConstant* right_const = HConstant::cast(right);
+    if (right_const->HasInteger32Value() &&
+       (right_const->Integer32Value() & 0x1f) != 0) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+HValue* HGraphBuilder::EnforceNumberType(HValue* number,
+                                         Type* expected) {
+  if (expected->Is(Type::SignedSmall())) {
+    return AddUncasted<HForceRepresentation>(number, Representation::Smi());
+  }
+  if (expected->Is(Type::Signed32())) {
+    return AddUncasted<HForceRepresentation>(number,
+                                             Representation::Integer32());
+  }
+  return number;
+}
+
+
+HValue* HGraphBuilder::TruncateToNumber(HValue* value, Type** expected) {
+  if (value->IsConstant()) {
+    HConstant* constant = HConstant::cast(value);
+    Maybe<HConstant*> number =
+        constant->CopyToTruncatedNumber(isolate(), zone());
+    if (number.IsJust()) {
+      *expected = Type::Number(zone());
+      return AddInstruction(number.FromJust());
+    }
+  }
+
+  // We put temporary values on the stack, which don't correspond to anything
+  // in baseline code. Since nothing is observable we avoid recording those
+  // pushes with a NoObservableSideEffectsScope.
+  NoObservableSideEffectsScope no_effects(this);
+
+  Type* expected_type = *expected;
+
+  // Separate the number type from the rest.
+  Type* expected_obj =
+      Type::Intersect(expected_type, Type::NonNumber(zone()), zone());
+  Type* expected_number =
+      Type::Intersect(expected_type, Type::Number(zone()), zone());
+
+  // We expect to get a number.
+  // (We need to check first, since Type::None->Is(Type::Any()) == true.
+  if (expected_obj->Is(Type::None())) {
+    DCHECK(!expected_number->Is(Type::None(zone())));
+    return value;
+  }
+
+  if (expected_obj->Is(Type::Undefined(zone()))) {
+    // This is already done by HChange.
+    *expected = Type::Union(expected_number, Type::Number(zone()), zone());
+    return value;
+  }
+
+  return value;
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildBinaryOperation(
+    BinaryOperation* expr,
+    HValue* left,
+    HValue* right,
+    PushBeforeSimulateBehavior push_sim_result) {
+  Type* left_type = expr->left()->bounds().lower;
+  Type* right_type = expr->right()->bounds().lower;
+  Type* result_type = expr->bounds().lower;
+  Maybe<int> fixed_right_arg = expr->fixed_right_arg();
+  Handle<AllocationSite> allocation_site = expr->allocation_site();
+
+  HAllocationMode allocation_mode;
+  if (FLAG_allocation_site_pretenuring && !allocation_site.is_null()) {
+    allocation_mode = HAllocationMode(allocation_site);
+  }
+  HValue* result = HGraphBuilder::BuildBinaryOperation(
+      expr->op(), left, right, left_type, right_type, result_type,
+      fixed_right_arg, allocation_mode, strength(function_language_mode()),
+      expr->id());
+  // Add a simulate after instructions with observable side effects, and
+  // after phis, which are the result of BuildBinaryOperation when we
+  // inlined some complex subgraph.
+  if (result->HasObservableSideEffects() || result->IsPhi()) {
+    if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+      Push(result);
+      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+      Drop(1);
+    } else {
+      Add<HSimulate>(expr->id(), REMOVABLE_SIMULATE);
+    }
+  }
+  return result;
+}
+
+
+HValue* HGraphBuilder::BuildBinaryOperation(
+    Token::Value op, HValue* left, HValue* right, Type* left_type,
+    Type* right_type, Type* result_type, Maybe<int> fixed_right_arg,
+    HAllocationMode allocation_mode, Strength strength, BailoutId opt_id) {
+  bool maybe_string_add = false;
+  if (op == Token::ADD) {
+    // If we are adding constant string with something for which we don't have
+    // a feedback yet, assume that it's also going to be a string and don't
+    // generate deopt instructions.
+    if (!left_type->IsInhabited() && right->IsConstant() &&
+        HConstant::cast(right)->HasStringValue()) {
+      left_type = Type::String();
+    }
+
+    if (!right_type->IsInhabited() && left->IsConstant() &&
+        HConstant::cast(left)->HasStringValue()) {
+      right_type = Type::String();
+    }
+
+    maybe_string_add = (left_type->Maybe(Type::String()) ||
+                        left_type->Maybe(Type::Receiver()) ||
+                        right_type->Maybe(Type::String()) ||
+                        right_type->Maybe(Type::Receiver()));
+  }
+
+  Representation left_rep = RepresentationFor(left_type);
+  Representation right_rep = RepresentationFor(right_type);
+
+  if (!left_type->IsInhabited()) {
+    Add<HDeoptimize>(
+        Deoptimizer::kInsufficientTypeFeedbackForLHSOfBinaryOperation,
+        Deoptimizer::SOFT);
+    left_type = Type::Any(zone());
+    left_rep = RepresentationFor(left_type);
+    maybe_string_add = op == Token::ADD;
+  }
+
+  if (!right_type->IsInhabited()) {
+    Add<HDeoptimize>(
+        Deoptimizer::kInsufficientTypeFeedbackForRHSOfBinaryOperation,
+        Deoptimizer::SOFT);
+    right_type = Type::Any(zone());
+    right_rep = RepresentationFor(right_type);
+    maybe_string_add = op == Token::ADD;
+  }
+
+  if (!maybe_string_add && !is_strong(strength)) {
+    left = TruncateToNumber(left, &left_type);
+    right = TruncateToNumber(right, &right_type);
+  }
+
+  // Special case for string addition here.
+  if (op == Token::ADD &&
+      (left_type->Is(Type::String()) || right_type->Is(Type::String()))) {
+    if (is_strong(strength)) {
+      // In strong mode, if the one side of an addition is a string,
+      // the other side must be a string too.
+      left = BuildCheckString(left);
+      right = BuildCheckString(right);
+    } else {
+      // Validate type feedback for left argument.
+      if (left_type->Is(Type::String())) {
+        left = BuildCheckString(left);
+      }
+
+      // Validate type feedback for right argument.
+      if (right_type->Is(Type::String())) {
+        right = BuildCheckString(right);
+      }
+
+      // Convert left argument as necessary.
+      if (left_type->Is(Type::Number())) {
+        DCHECK(right_type->Is(Type::String()));
+        left = BuildNumberToString(left, left_type);
+      } else if (!left_type->Is(Type::String())) {
+        DCHECK(right_type->Is(Type::String()));
+        return AddUncasted<HStringAdd>(
+            left, right, allocation_mode.GetPretenureMode(),
+            STRING_ADD_CONVERT_LEFT, allocation_mode.feedback_site());
+      }
+
+      // Convert right argument as necessary.
+      if (right_type->Is(Type::Number())) {
+        DCHECK(left_type->Is(Type::String()));
+        right = BuildNumberToString(right, right_type);
+      } else if (!right_type->Is(Type::String())) {
+        DCHECK(left_type->Is(Type::String()));
+        return AddUncasted<HStringAdd>(
+            left, right, allocation_mode.GetPretenureMode(),
+            STRING_ADD_CONVERT_RIGHT, allocation_mode.feedback_site());
+      }
+    }
+
+    // Fast paths for empty constant strings.
+    Handle<String> left_string =
+        left->IsConstant() && HConstant::cast(left)->HasStringValue()
+            ? HConstant::cast(left)->StringValue()
+            : Handle<String>();
+    Handle<String> right_string =
+        right->IsConstant() && HConstant::cast(right)->HasStringValue()
+            ? HConstant::cast(right)->StringValue()
+            : Handle<String>();
+    if (!left_string.is_null() && left_string->length() == 0) return right;
+    if (!right_string.is_null() && right_string->length() == 0) return left;
+    if (!left_string.is_null() && !right_string.is_null()) {
+      return AddUncasted<HStringAdd>(
+          left, right, allocation_mode.GetPretenureMode(),
+          STRING_ADD_CHECK_NONE, allocation_mode.feedback_site());
+    }
+
+    // Register the dependent code with the allocation site.
+    if (!allocation_mode.feedback_site().is_null()) {
+      DCHECK(!graph()->info()->IsStub());
+      Handle<AllocationSite> site(allocation_mode.feedback_site());
+      top_info()->dependencies()->AssumeTenuringDecision(site);
+    }
+
+    // Inline the string addition into the stub when creating allocation
+    // mementos to gather allocation site feedback, or if we can statically
+    // infer that we're going to create a cons string.
+    if ((graph()->info()->IsStub() &&
+         allocation_mode.CreateAllocationMementos()) ||
+        (left->IsConstant() &&
+         HConstant::cast(left)->HasStringValue() &&
+         HConstant::cast(left)->StringValue()->length() + 1 >=
+           ConsString::kMinLength) ||
+        (right->IsConstant() &&
+         HConstant::cast(right)->HasStringValue() &&
+         HConstant::cast(right)->StringValue()->length() + 1 >=
+           ConsString::kMinLength)) {
+      return BuildStringAdd(left, right, allocation_mode);
+    }
+
+    // Fallback to using the string add stub.
+    return AddUncasted<HStringAdd>(
+        left, right, allocation_mode.GetPretenureMode(), STRING_ADD_CHECK_NONE,
+        allocation_mode.feedback_site());
+  }
+
+  if (graph()->info()->IsStub()) {
+    left = EnforceNumberType(left, left_type);
+    right = EnforceNumberType(right, right_type);
+  }
+
+  Representation result_rep = RepresentationFor(result_type);
+
+  bool is_non_primitive = (left_rep.IsTagged() && !left_rep.IsSmi()) ||
+                          (right_rep.IsTagged() && !right_rep.IsSmi());
+
+  HInstruction* instr = NULL;
+  // Only the stub is allowed to call into the runtime, since otherwise we would
+  // inline several instructions (including the two pushes) for every tagged
+  // operation in optimized code, which is more expensive, than a stub call.
+  if (graph()->info()->IsStub() && is_non_primitive) {
+    Runtime::FunctionId function_id;
+    switch (op) {
+      default:
+        UNREACHABLE();
+      case Token::ADD:
+        function_id =
+            is_strong(strength) ? Runtime::kAdd_Strong : Runtime::kAdd;
+        break;
+      case Token::SUB:
+        function_id = is_strong(strength) ? Runtime::kSubtract_Strong
+                                          : Runtime::kSubtract;
+        break;
+      case Token::MUL:
+        function_id = is_strong(strength) ? Runtime::kMultiply_Strong
+                                          : Runtime::kMultiply;
+        break;
+      case Token::DIV:
+        function_id =
+            is_strong(strength) ? Runtime::kDivide_Strong : Runtime::kDivide;
+        break;
+      case Token::MOD:
+        function_id =
+            is_strong(strength) ? Runtime::kModulus_Strong : Runtime::kModulus;
+        break;
+      case Token::BIT_OR:
+        function_id = is_strong(strength) ? Runtime::kBitwiseOr_Strong
+                                          : Runtime::kBitwiseOr;
+        break;
+      case Token::BIT_AND:
+        function_id = is_strong(strength) ? Runtime::kBitwiseAnd_Strong
+                                          : Runtime::kBitwiseAnd;
+        break;
+      case Token::BIT_XOR:
+        function_id = is_strong(strength) ? Runtime::kBitwiseXor_Strong
+                                          : Runtime::kBitwiseXor;
+        break;
+      case Token::SAR:
+        function_id = is_strong(strength) ? Runtime::kShiftRight_Strong
+                                          : Runtime::kShiftRight;
+        break;
+      case Token::SHR:
+        function_id = is_strong(strength) ? Runtime::kShiftRightLogical_Strong
+                                          : Runtime::kShiftRightLogical;
+        break;
+      case Token::SHL:
+        function_id = is_strong(strength) ? Runtime::kShiftLeft_Strong
+                                          : Runtime::kShiftLeft;
+        break;
+    }
+    Add<HPushArguments>(left, right);
+    instr = AddUncasted<HCallRuntime>(Runtime::FunctionForId(function_id), 2);
+  } else {
+    if (is_strong(strength) && Token::IsBitOp(op)) {
+      // TODO(conradw): This is not efficient, but is necessary to prevent
+      // conversion of oddball values to numbers in strong mode. It would be
+      // better to prevent the conversion rather than adding a runtime check.
+      IfBuilder if_builder(this);
+      if_builder.If<HHasInstanceTypeAndBranch>(left, ODDBALL_TYPE);
+      if_builder.OrIf<HHasInstanceTypeAndBranch>(right, ODDBALL_TYPE);
+      if_builder.Then();
+      Add<HCallRuntime>(
+          Runtime::FunctionForId(Runtime::kThrowStrongModeImplicitConversion),
+          0);
+      if (!graph()->info()->IsStub()) {
+        Add<HSimulate>(opt_id, REMOVABLE_SIMULATE);
+      }
+      if_builder.End();
+    }
+    switch (op) {
+      case Token::ADD:
+        instr = AddUncasted<HAdd>(left, right, strength);
+        break;
+      case Token::SUB:
+        instr = AddUncasted<HSub>(left, right, strength);
+        break;
+      case Token::MUL:
+        instr = AddUncasted<HMul>(left, right, strength);
+        break;
+      case Token::MOD: {
+        if (fixed_right_arg.IsJust() &&
+            !right->EqualsInteger32Constant(fixed_right_arg.FromJust())) {
+          HConstant* fixed_right =
+              Add<HConstant>(static_cast<int>(fixed_right_arg.FromJust()));
+          IfBuilder if_same(this);
+          if_same.If<HCompareNumericAndBranch>(right, fixed_right, Token::EQ);
+          if_same.Then();
+          if_same.ElseDeopt(Deoptimizer::kUnexpectedRHSOfBinaryOperation);
+          right = fixed_right;
+        }
+        instr = AddUncasted<HMod>(left, right, strength);
+        break;
+      }
+      case Token::DIV:
+        instr = AddUncasted<HDiv>(left, right, strength);
+        break;
+      case Token::BIT_XOR:
+      case Token::BIT_AND:
+        instr = AddUncasted<HBitwise>(op, left, right, strength);
+        break;
+      case Token::BIT_OR: {
+        HValue *operand, *shift_amount;
+        if (left_type->Is(Type::Signed32()) &&
+            right_type->Is(Type::Signed32()) &&
+            MatchRotateRight(left, right, &operand, &shift_amount)) {
+          instr = AddUncasted<HRor>(operand, shift_amount, strength);
+        } else {
+          instr = AddUncasted<HBitwise>(op, left, right, strength);
+        }
+        break;
+      }
+      case Token::SAR:
+        instr = AddUncasted<HSar>(left, right, strength);
+        break;
+      case Token::SHR:
+        instr = AddUncasted<HShr>(left, right, strength);
+        if (instr->IsShr() && CanBeZero(right)) {
+          graph()->RecordUint32Instruction(instr);
+        }
+        break;
+      case Token::SHL:
+        instr = AddUncasted<HShl>(left, right, strength);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  if (instr->IsBinaryOperation()) {
+    HBinaryOperation* binop = HBinaryOperation::cast(instr);
+    binop->set_observed_input_representation(1, left_rep);
+    binop->set_observed_input_representation(2, right_rep);
+    binop->initialize_output_representation(result_rep);
+    if (graph()->info()->IsStub()) {
+      // Stub should not call into stub.
+      instr->SetFlag(HValue::kCannotBeTagged);
+      // And should truncate on HForceRepresentation already.
+      if (left->IsForceRepresentation()) {
+        left->CopyFlag(HValue::kTruncatingToSmi, instr);
+        left->CopyFlag(HValue::kTruncatingToInt32, instr);
+      }
+      if (right->IsForceRepresentation()) {
+        right->CopyFlag(HValue::kTruncatingToSmi, instr);
+        right->CopyFlag(HValue::kTruncatingToInt32, instr);
+      }
+    }
+  }
+  return instr;
+}
+
+
+// Check for the form (%_ClassOf(foo) === 'BarClass').
+static bool IsClassOfTest(CompareOperation* expr) {
+  if (expr->op() != Token::EQ_STRICT) return false;
+  CallRuntime* call = expr->left()->AsCallRuntime();
+  if (call == NULL) return false;
+  Literal* literal = expr->right()->AsLiteral();
+  if (literal == NULL) return false;
+  if (!literal->value()->IsString()) return false;
+  if (!call->is_jsruntime() &&
+      call->function()->function_id != Runtime::kInlineClassOf) {
+    return false;
+  }
+  DCHECK(call->arguments()->length() == 1);
+  return true;
+}
+
+
+void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  switch (expr->op()) {
+    case Token::COMMA:
+      return VisitComma(expr);
+    case Token::OR:
+    case Token::AND:
+      return VisitLogicalExpression(expr);
+    default:
+      return VisitArithmeticExpression(expr);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitComma(BinaryOperation* expr) {
+  CHECK_ALIVE(VisitForEffect(expr->left()));
+  // Visit the right subexpression in the same AST context as the entire
+  // expression.
+  Visit(expr->right());
+}
+
+
+void HOptimizedGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+  bool is_logical_and = expr->op() == Token::AND;
+  if (ast_context()->IsTest()) {
+    TestContext* context = TestContext::cast(ast_context());
+    // Translate left subexpression.
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    if (is_logical_and) {
+      CHECK_BAILOUT(VisitForControl(expr->left(),
+                                    eval_right,
+                                    context->if_false()));
+    } else {
+      CHECK_BAILOUT(VisitForControl(expr->left(),
+                                    context->if_true(),
+                                    eval_right));
+    }
+
+    // Translate right subexpression by visiting it in the same AST
+    // context as the entire expression.
+    if (eval_right->HasPredecessor()) {
+      eval_right->SetJoinId(expr->RightId());
+      set_current_block(eval_right);
+      Visit(expr->right());
+    }
+
+  } else if (ast_context()->IsValue()) {
+    CHECK_ALIVE(VisitForValue(expr->left()));
+    DCHECK(current_block() != NULL);
+    HValue* left_value = Top();
+
+    // Short-circuit left values that always evaluate to the same boolean value.
+    if (expr->left()->ToBooleanIsTrue() || expr->left()->ToBooleanIsFalse()) {
+      // l (evals true)  && r -> r
+      // l (evals true)  || r -> l
+      // l (evals false) && r -> l
+      // l (evals false) || r -> r
+      if (is_logical_and == expr->left()->ToBooleanIsTrue()) {
+        Drop(1);
+        CHECK_ALIVE(VisitForValue(expr->right()));
+      }
+      return ast_context()->ReturnValue(Pop());
+    }
+
+    // We need an extra block to maintain edge-split form.
+    HBasicBlock* empty_block = graph()->CreateBasicBlock();
+    HBasicBlock* eval_right = graph()->CreateBasicBlock();
+    ToBooleanStub::Types expected(expr->left()->to_boolean_types());
+    HBranch* test = is_logical_and
+        ? New<HBranch>(left_value, expected, eval_right, empty_block)
+        : New<HBranch>(left_value, expected, empty_block, eval_right);
+    FinishCurrentBlock(test);
+
+    set_current_block(eval_right);
+    Drop(1);  // Value of the left subexpression.
+    CHECK_BAILOUT(VisitForValue(expr->right()));
+
+    HBasicBlock* join_block =
+      CreateJoin(empty_block, current_block(), expr->id());
+    set_current_block(join_block);
+    return ast_context()->ReturnValue(Pop());
+
+  } else {
+    DCHECK(ast_context()->IsEffect());
+    // In an effect context, we don't need the value of the left subexpression,
+    // only its control flow and side effects.  We need an extra block to
+    // maintain edge-split form.
+    HBasicBlock* empty_block = graph()->CreateBasicBlock();
+    HBasicBlock* right_block = graph()->CreateBasicBlock();
+    if (is_logical_and) {
+      CHECK_BAILOUT(VisitForControl(expr->left(), right_block, empty_block));
+    } else {
+      CHECK_BAILOUT(VisitForControl(expr->left(), empty_block, right_block));
+    }
+
+    // TODO(kmillikin): Find a way to fix this.  It's ugly that there are
+    // actually two empty blocks (one here and one inserted by
+    // TestContext::BuildBranch, and that they both have an HSimulate though the
+    // second one is not a merge node, and that we really have no good AST ID to
+    // put on that first HSimulate.
+
+    if (empty_block->HasPredecessor()) {
+      empty_block->SetJoinId(expr->id());
+    } else {
+      empty_block = NULL;
+    }
+
+    if (right_block->HasPredecessor()) {
+      right_block->SetJoinId(expr->RightId());
+      set_current_block(right_block);
+      CHECK_BAILOUT(VisitForEffect(expr->right()));
+      right_block = current_block();
+    } else {
+      right_block = NULL;
+    }
+
+    HBasicBlock* join_block =
+      CreateJoin(empty_block, right_block, expr->id());
+    set_current_block(join_block);
+    // We did not materialize any value in the predecessor environments,
+    // so there is no need to handle it here.
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
+  CHECK_ALIVE(VisitForValue(expr->left()));
+  CHECK_ALIVE(VisitForValue(expr->right()));
+  SetSourcePosition(expr->position());
+  HValue* right = Pop();
+  HValue* left = Pop();
+  HValue* result =
+      BuildBinaryOperation(expr, left, right,
+          ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+                                    : PUSH_BEFORE_SIMULATE);
+  if (top_info()->is_tracking_positions() && result->IsBinaryOperation()) {
+    HBinaryOperation::cast(result)->SetOperandPositions(
+        zone(),
+        ScriptPositionToSourcePosition(expr->left()->position()),
+        ScriptPositionToSourcePosition(expr->right()->position()));
+  }
+  return ast_context()->ReturnValue(result);
+}
+
+
+void HOptimizedGraphBuilder::HandleLiteralCompareTypeof(CompareOperation* expr,
+                                                        Expression* sub_expr,
+                                                        Handle<String> check) {
+  CHECK_ALIVE(VisitForTypeOf(sub_expr));
+  SetSourcePosition(expr->position());
+  HValue* value = Pop();
+  HTypeofIsAndBranch* instr = New<HTypeofIsAndBranch>(value, check);
+  return ast_context()->ReturnControl(instr, expr->id());
+}
+
+
+static bool IsLiteralCompareBool(Isolate* isolate,
+                                 HValue* left,
+                                 Token::Value op,
+                                 HValue* right) {
+  return op == Token::EQ_STRICT &&
+      ((left->IsConstant() &&
+          HConstant::cast(left)->handle(isolate)->IsBoolean()) ||
+       (right->IsConstant() &&
+           HConstant::cast(right)->handle(isolate)->IsBoolean()));
+}
+
+
+void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+
+  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+
+  // Check for a few fast cases. The AST visiting behavior must be in sync
+  // with the full codegen: We don't push both left and right values onto
+  // the expression stack when one side is a special-case literal.
+  Expression* sub_expr = NULL;
+  Handle<String> check;
+  if (expr->IsLiteralCompareTypeof(&sub_expr, &check)) {
+    return HandleLiteralCompareTypeof(expr, sub_expr, check);
+  }
+  if (expr->IsLiteralCompareUndefined(&sub_expr, isolate())) {
+    return HandleLiteralCompareNil(expr, sub_expr, kUndefinedValue);
+  }
+  if (expr->IsLiteralCompareNull(&sub_expr)) {
+    return HandleLiteralCompareNil(expr, sub_expr, kNullValue);
+  }
+
+  if (IsClassOfTest(expr)) {
+    CallRuntime* call = expr->left()->AsCallRuntime();
+    DCHECK(call->arguments()->length() == 1);
+    CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+    HValue* value = Pop();
+    Literal* literal = expr->right()->AsLiteral();
+    Handle<String> rhs = Handle<String>::cast(literal->value());
+    HClassOfTestAndBranch* instr = New<HClassOfTestAndBranch>(value, rhs);
+    return ast_context()->ReturnControl(instr, expr->id());
+  }
+
+  Type* left_type = expr->left()->bounds().lower;
+  Type* right_type = expr->right()->bounds().lower;
+  Type* combined_type = expr->combined_type();
+
+  CHECK_ALIVE(VisitForValue(expr->left()));
+  CHECK_ALIVE(VisitForValue(expr->right()));
+
+  HValue* right = Pop();
+  HValue* left = Pop();
+  Token::Value op = expr->op();
+
+  if (IsLiteralCompareBool(isolate(), left, op, right)) {
+    HCompareObjectEqAndBranch* result =
+        New<HCompareObjectEqAndBranch>(left, right);
+    return ast_context()->ReturnControl(result, expr->id());
+  }
+
+  if (op == Token::INSTANCEOF) {
+    // Check to see if the rhs of the instanceof is a known function.
+    if (right->IsConstant() &&
+        HConstant::cast(right)->handle(isolate())->IsJSFunction()) {
+      Handle<JSFunction> constructor =
+          Handle<JSFunction>::cast(HConstant::cast(right)->handle(isolate()));
+      if (constructor->IsConstructor() &&
+          !constructor->map()->has_non_instance_prototype()) {
+        JSFunction::EnsureHasInitialMap(constructor);
+        DCHECK(constructor->has_initial_map());
+        Handle<Map> initial_map(constructor->initial_map(), isolate());
+        top_info()->dependencies()->AssumeInitialMapCantChange(initial_map);
+        HInstruction* prototype =
+            Add<HConstant>(handle(initial_map->prototype(), isolate()));
+        HHasInPrototypeChainAndBranch* result =
+            New<HHasInPrototypeChainAndBranch>(left, prototype);
+        return ast_context()->ReturnControl(result, expr->id());
+      }
+    }
+
+    HInstanceOf* result = New<HInstanceOf>(left, right);
+    return ast_context()->ReturnInstruction(result, expr->id());
+
+  } else if (op == Token::IN) {
+    Add<HPushArguments>(left, right);
+    HInstruction* result =
+        New<HCallRuntime>(Runtime::FunctionForId(Runtime::kHasProperty), 2);
+    return ast_context()->ReturnInstruction(result, expr->id());
+  }
+
+  PushBeforeSimulateBehavior push_behavior =
+    ast_context()->IsEffect() ? NO_PUSH_BEFORE_SIMULATE
+                              : PUSH_BEFORE_SIMULATE;
+  HControlInstruction* compare = BuildCompareInstruction(
+      op, left, right, left_type, right_type, combined_type,
+      ScriptPositionToSourcePosition(expr->left()->position()),
+      ScriptPositionToSourcePosition(expr->right()->position()),
+      push_behavior, expr->id());
+  if (compare == NULL) return;  // Bailed out.
+  return ast_context()->ReturnControl(compare, expr->id());
+}
+
+
+HControlInstruction* HOptimizedGraphBuilder::BuildCompareInstruction(
+    Token::Value op, HValue* left, HValue* right, Type* left_type,
+    Type* right_type, Type* combined_type, SourcePosition left_position,
+    SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
+    BailoutId bailout_id) {
+  // Cases handled below depend on collected type feedback. They should
+  // soft deoptimize when there is no type feedback.
+  if (!combined_type->IsInhabited()) {
+    Add<HDeoptimize>(
+        Deoptimizer::kInsufficientTypeFeedbackForCombinedTypeOfBinaryOperation,
+        Deoptimizer::SOFT);
+    combined_type = left_type = right_type = Type::Any(zone());
+  }
+
+  Representation left_rep = RepresentationFor(left_type);
+  Representation right_rep = RepresentationFor(right_type);
+  Representation combined_rep = RepresentationFor(combined_type);
+
+  if (combined_type->Is(Type::Receiver())) {
+    if (Token::IsEqualityOp(op)) {
+      // HCompareObjectEqAndBranch can only deal with object, so
+      // exclude numbers.
+      if ((left->IsConstant() &&
+           HConstant::cast(left)->HasNumberValue()) ||
+          (right->IsConstant() &&
+           HConstant::cast(right)->HasNumberValue())) {
+        Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
+                         Deoptimizer::SOFT);
+        // The caller expects a branch instruction, so make it happy.
+        return New<HBranch>(graph()->GetConstantTrue());
+      }
+      // Can we get away with map check and not instance type check?
+      HValue* operand_to_check =
+          left->block()->block_id() < right->block()->block_id() ? left : right;
+      if (combined_type->IsClass()) {
+        Handle<Map> map = combined_type->AsClass()->Map();
+        AddCheckMap(operand_to_check, map);
+        HCompareObjectEqAndBranch* result =
+            New<HCompareObjectEqAndBranch>(left, right);
+        if (top_info()->is_tracking_positions()) {
+          result->set_operand_position(zone(), 0, left_position);
+          result->set_operand_position(zone(), 1, right_position);
+        }
+        return result;
+      } else {
+        BuildCheckHeapObject(operand_to_check);
+        Add<HCheckInstanceType>(operand_to_check,
+                                HCheckInstanceType::IS_JS_RECEIVER);
+        HCompareObjectEqAndBranch* result =
+            New<HCompareObjectEqAndBranch>(left, right);
+        return result;
+      }
+    } else {
+      if (combined_type->IsClass()) {
+        // TODO(bmeurer): This is an optimized version of an x < y, x > y,
+        // x <= y or x >= y, where both x and y are spec objects with the
+        // same map. The CompareIC collects this map for us. So if we know
+        // that there's no @@toPrimitive on the map (including the prototype
+        // chain), and both valueOf and toString are the default initial
+        // implementations (on the %ObjectPrototype%), then we can reduce
+        // the comparison to map checks on x and y, because the comparison
+        // will turn into a comparison of "[object CLASS]" to itself (the
+        // default outcome of toString, since valueOf returns a spec object).
+        // This is pretty much adhoc, so in TurboFan we could do a lot better
+        // and inline the interesting parts of ToPrimitive (actually we could
+        // even do that in Crankshaft but we don't want to waste too much
+        // time on this now).
+        DCHECK(Token::IsOrderedRelationalCompareOp(op));
+        Handle<Map> map = combined_type->AsClass()->Map();
+        PropertyAccessInfo value_of(this, LOAD, map,
+                                    isolate()->factory()->valueOf_string());
+        PropertyAccessInfo to_primitive(
+            this, LOAD, map, isolate()->factory()->to_primitive_symbol());
+        PropertyAccessInfo to_string(this, LOAD, map,
+                                     isolate()->factory()->toString_string());
+        PropertyAccessInfo to_string_tag(
+            this, LOAD, map, isolate()->factory()->to_string_tag_symbol());
+        if (to_primitive.CanAccessMonomorphic() && !to_primitive.IsFound() &&
+            to_string_tag.CanAccessMonomorphic() &&
+            (!to_string_tag.IsFound() || to_string_tag.IsData() ||
+             to_string_tag.IsDataConstant()) &&
+            value_of.CanAccessMonomorphic() && value_of.IsDataConstant() &&
+            value_of.constant().is_identical_to(isolate()->object_value_of()) &&
+            to_string.CanAccessMonomorphic() && to_string.IsDataConstant() &&
+            to_string.constant().is_identical_to(
+                isolate()->object_to_string())) {
+          // We depend on the prototype chain to stay the same, because we
+          // also need to deoptimize when someone installs @@toPrimitive
+          // or @@toStringTag somewhere in the prototype chain.
+          BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())),
+                                  Handle<JSObject>::null());
+          AddCheckMap(left, map);
+          AddCheckMap(right, map);
+          // The caller expects a branch instruction, so make it happy.
+          return New<HBranch>(
+              graph()->GetConstantBool(op == Token::LTE || op == Token::GTE));
+        }
+      }
+      Bailout(kUnsupportedNonPrimitiveCompare);
+      return NULL;
+    }
+  } else if (combined_type->Is(Type::InternalizedString()) &&
+             Token::IsEqualityOp(op)) {
+    // If we have a constant argument, it should be consistent with the type
+    // feedback (otherwise we fail assertions in HCompareObjectEqAndBranch).
+    if ((left->IsConstant() &&
+         !HConstant::cast(left)->HasInternalizedStringValue()) ||
+        (right->IsConstant() &&
+         !HConstant::cast(right)->HasInternalizedStringValue())) {
+      Add<HDeoptimize>(Deoptimizer::kTypeMismatchBetweenFeedbackAndConstant,
+                       Deoptimizer::SOFT);
+      // The caller expects a branch instruction, so make it happy.
+      return New<HBranch>(graph()->GetConstantTrue());
+    }
+    BuildCheckHeapObject(left);
+    Add<HCheckInstanceType>(left, HCheckInstanceType::IS_INTERNALIZED_STRING);
+    BuildCheckHeapObject(right);
+    Add<HCheckInstanceType>(right, HCheckInstanceType::IS_INTERNALIZED_STRING);
+    HCompareObjectEqAndBranch* result =
+        New<HCompareObjectEqAndBranch>(left, right);
+    return result;
+  } else if (combined_type->Is(Type::String())) {
+    BuildCheckHeapObject(left);
+    Add<HCheckInstanceType>(left, HCheckInstanceType::IS_STRING);
+    BuildCheckHeapObject(right);
+    Add<HCheckInstanceType>(right, HCheckInstanceType::IS_STRING);
+    HStringCompareAndBranch* result =
+        New<HStringCompareAndBranch>(left, right, op);
+    return result;
+  } else if (combined_type->Is(Type::Boolean())) {
+    AddCheckMap(left, isolate()->factory()->boolean_map());
+    AddCheckMap(right, isolate()->factory()->boolean_map());
+    if (Token::IsEqualityOp(op)) {
+      HCompareObjectEqAndBranch* result =
+          New<HCompareObjectEqAndBranch>(left, right);
+      return result;
+    }
+    left = Add<HLoadNamedField>(
+        left, nullptr,
+        HObjectAccess::ForOddballToNumber(Representation::Smi()));
+    right = Add<HLoadNamedField>(
+        right, nullptr,
+        HObjectAccess::ForOddballToNumber(Representation::Smi()));
+    HCompareNumericAndBranch* result =
+        New<HCompareNumericAndBranch>(left, right, op);
+    return result;
+  } else {
+    if (combined_rep.IsTagged() || combined_rep.IsNone()) {
+      HCompareGeneric* result = Add<HCompareGeneric>(
+          left, right, op, strength(function_language_mode()));
+      result->set_observed_input_representation(1, left_rep);
+      result->set_observed_input_representation(2, right_rep);
+      if (result->HasObservableSideEffects()) {
+        if (push_sim_result == PUSH_BEFORE_SIMULATE) {
+          Push(result);
+          AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+          Drop(1);
+        } else {
+          AddSimulate(bailout_id, REMOVABLE_SIMULATE);
+        }
+      }
+      // TODO(jkummerow): Can we make this more efficient?
+      HBranch* branch = New<HBranch>(result);
+      return branch;
+    } else {
+      HCompareNumericAndBranch* result = New<HCompareNumericAndBranch>(
+          left, right, op, strength(function_language_mode()));
+      result->set_observed_input_representation(left_rep, right_rep);
+      if (top_info()->is_tracking_positions()) {
+        result->SetOperandPositions(zone(), left_position, right_position);
+      }
+      return result;
+    }
+  }
+}
+
+
+void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
+                                                     Expression* sub_expr,
+                                                     NilValue nil) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  DCHECK(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
+  if (!top_info()->is_tracking_positions()) SetSourcePosition(expr->position());
+  CHECK_ALIVE(VisitForValue(sub_expr));
+  HValue* value = Pop();
+  if (expr->op() == Token::EQ_STRICT) {
+    HConstant* nil_constant = nil == kNullValue
+        ? graph()->GetConstantNull()
+        : graph()->GetConstantUndefined();
+    HCompareObjectEqAndBranch* instr =
+        New<HCompareObjectEqAndBranch>(value, nil_constant);
+    return ast_context()->ReturnControl(instr, expr->id());
+  } else {
+    DCHECK_EQ(Token::EQ, expr->op());
+    Type* type = expr->combined_type()->Is(Type::None())
+        ? Type::Any(zone()) : expr->combined_type();
+    HIfContinuation continuation;
+    BuildCompareNil(value, type, &continuation);
+    return ast_context()->ReturnContinuation(&continuation, expr->id());
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitSpread(Spread* expr) { UNREACHABLE(); }
+
+
+void HOptimizedGraphBuilder::VisitEmptyParentheses(EmptyParentheses* expr) {
+  UNREACHABLE();
+}
+
+
+HValue* HOptimizedGraphBuilder::AddThisFunction() {
+  return AddInstruction(BuildThisFunction());
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
+  // If we share optimized code between different closures, the
+  // this-function is not a constant, except inside an inlined body.
+  if (function_state()->outer() != NULL) {
+      return New<HConstant>(
+          function_state()->compilation_info()->closure());
+  } else {
+      return New<HThisFunction>();
+  }
+}
+
+
+HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
+    Handle<JSObject> boilerplate_object,
+    AllocationSiteUsageContext* site_context) {
+  NoObservableSideEffectsScope no_effects(this);
+  Handle<Map> initial_map(boilerplate_object->map());
+  InstanceType instance_type = initial_map->instance_type();
+  DCHECK(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
+
+  HType type = instance_type == JS_ARRAY_TYPE
+      ? HType::JSArray() : HType::JSObject();
+  HValue* object_size_constant = Add<HConstant>(initial_map->instance_size());
+
+  PretenureFlag pretenure_flag = NOT_TENURED;
+  Handle<AllocationSite> top_site(*site_context->top(), isolate());
+  if (FLAG_allocation_site_pretenuring) {
+    pretenure_flag = top_site->GetPretenureMode();
+  }
+
+  Handle<AllocationSite> current_site(*site_context->current(), isolate());
+  if (*top_site == *current_site) {
+    // We install a dependency for pretenuring only on the outermost literal.
+    top_info()->dependencies()->AssumeTenuringDecision(top_site);
+  }
+  top_info()->dependencies()->AssumeTransitionStable(current_site);
+
+  HInstruction* object = Add<HAllocate>(
+      object_size_constant, type, pretenure_flag, instance_type, top_site);
+
+  // If allocation folding reaches Page::kMaxRegularHeapObjectSize the
+  // elements array may not get folded into the object. Hence, we set the
+  // elements pointer to empty fixed array and let store elimination remove
+  // this store in the folding case.
+  HConstant* empty_fixed_array = Add<HConstant>(
+      isolate()->factory()->empty_fixed_array());
+  Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+      empty_fixed_array);
+
+  BuildEmitObjectHeader(boilerplate_object, object);
+
+  // Similarly to the elements pointer, there is no guarantee that all
+  // property allocations can get folded, so pre-initialize all in-object
+  // properties to a safe value.
+  BuildInitializeInobjectProperties(object, initial_map);
+
+  Handle<FixedArrayBase> elements(boilerplate_object->elements());
+  int elements_size = (elements->length() > 0 &&
+      elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
+          elements->Size() : 0;
+
+  if (pretenure_flag == TENURED &&
+      elements->map() == isolate()->heap()->fixed_cow_array_map() &&
+      isolate()->heap()->InNewSpace(*elements)) {
+    // If we would like to pretenure a fixed cow array, we must ensure that the
+    // array is already in old space, otherwise we'll create too many old-to-
+    // new-space pointers (overflowing the store buffer).
+    elements = Handle<FixedArrayBase>(
+        isolate()->factory()->CopyAndTenureFixedCOWArray(
+            Handle<FixedArray>::cast(elements)));
+    boilerplate_object->set_elements(*elements);
+  }
+
+  HInstruction* object_elements = NULL;
+  if (elements_size > 0) {
+    HValue* object_elements_size = Add<HConstant>(elements_size);
+    InstanceType instance_type = boilerplate_object->HasFastDoubleElements()
+        ? FIXED_DOUBLE_ARRAY_TYPE : FIXED_ARRAY_TYPE;
+    object_elements = Add<HAllocate>(object_elements_size, HType::HeapObject(),
+                                     pretenure_flag, instance_type, top_site);
+    BuildEmitElements(boilerplate_object, elements, object_elements,
+                      site_context);
+    Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+                          object_elements);
+  } else {
+    Handle<Object> elements_field =
+        Handle<Object>(boilerplate_object->elements(), isolate());
+    HInstruction* object_elements_cow = Add<HConstant>(elements_field);
+    Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+                          object_elements_cow);
+  }
+
+  // Copy in-object properties.
+  if (initial_map->NumberOfFields() != 0 ||
+      initial_map->unused_property_fields() > 0) {
+    BuildEmitInObjectProperties(boilerplate_object, object, site_context,
+                                pretenure_flag);
+  }
+  return object;
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitObjectHeader(
+    Handle<JSObject> boilerplate_object,
+    HInstruction* object) {
+  DCHECK(boilerplate_object->properties()->length() == 0);
+
+  Handle<Map> boilerplate_object_map(boilerplate_object->map());
+  AddStoreMapConstant(object, boilerplate_object_map);
+
+  Handle<Object> properties_field =
+      Handle<Object>(boilerplate_object->properties(), isolate());
+  DCHECK(*properties_field == isolate()->heap()->empty_fixed_array());
+  HInstruction* properties = Add<HConstant>(properties_field);
+  HObjectAccess access = HObjectAccess::ForPropertiesPointer();
+  Add<HStoreNamedField>(object, access, properties);
+
+  if (boilerplate_object->IsJSArray()) {
+    Handle<JSArray> boilerplate_array =
+        Handle<JSArray>::cast(boilerplate_object);
+    Handle<Object> length_field =
+        Handle<Object>(boilerplate_array->length(), isolate());
+    HInstruction* length = Add<HConstant>(length_field);
+
+    DCHECK(boilerplate_array->length()->IsSmi());
+    Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
+        boilerplate_array->GetElementsKind()), length);
+  }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
+    Handle<JSObject> boilerplate_object,
+    HInstruction* object,
+    AllocationSiteUsageContext* site_context,
+    PretenureFlag pretenure_flag) {
+  Handle<Map> boilerplate_map(boilerplate_object->map());
+  Handle<DescriptorArray> descriptors(boilerplate_map->instance_descriptors());
+  int limit = boilerplate_map->NumberOfOwnDescriptors();
+
+  int copied_fields = 0;
+  for (int i = 0; i < limit; i++) {
+    PropertyDetails details = descriptors->GetDetails(i);
+    if (details.type() != DATA) continue;
+    copied_fields++;
+    FieldIndex field_index = FieldIndex::ForDescriptor(*boilerplate_map, i);
+
+
+    int property_offset = field_index.offset();
+    Handle<Name> name(descriptors->GetKey(i));
+
+    // The access for the store depends on the type of the boilerplate.
+    HObjectAccess access = boilerplate_object->IsJSArray() ?
+        HObjectAccess::ForJSArrayOffset(property_offset) :
+        HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
+
+    if (boilerplate_object->IsUnboxedDoubleField(field_index)) {
+      CHECK(!boilerplate_object->IsJSArray());
+      double value = boilerplate_object->RawFastDoublePropertyAt(field_index);
+      access = access.WithRepresentation(Representation::Double());
+      Add<HStoreNamedField>(object, access, Add<HConstant>(value));
+      continue;
+    }
+    Handle<Object> value(boilerplate_object->RawFastPropertyAt(field_index),
+                         isolate());
+
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      Handle<AllocationSite> current_site = site_context->EnterNewScope();
+      HInstruction* result =
+          BuildFastLiteral(value_object, site_context);
+      site_context->ExitScope(current_site, value_object);
+      Add<HStoreNamedField>(object, access, result);
+    } else {
+      Representation representation = details.representation();
+      HInstruction* value_instruction;
+
+      if (representation.IsDouble()) {
+        // Allocate a HeapNumber box and store the value into it.
+        HValue* heap_number_constant = Add<HConstant>(HeapNumber::kSize);
+        HInstruction* double_box =
+            Add<HAllocate>(heap_number_constant, HType::HeapObject(),
+                pretenure_flag, MUTABLE_HEAP_NUMBER_TYPE);
+        AddStoreMapConstant(double_box,
+            isolate()->factory()->mutable_heap_number_map());
+        // Unwrap the mutable heap number from the boilerplate.
+        HValue* double_value =
+            Add<HConstant>(Handle<HeapNumber>::cast(value)->value());
+        Add<HStoreNamedField>(
+            double_box, HObjectAccess::ForHeapNumberValue(), double_value);
+        value_instruction = double_box;
+      } else if (representation.IsSmi()) {
+        value_instruction = value->IsUninitialized()
+            ? graph()->GetConstant0()
+            : Add<HConstant>(value);
+        // Ensure that value is stored as smi.
+        access = access.WithRepresentation(representation);
+      } else {
+        value_instruction = Add<HConstant>(value);
+      }
+
+      Add<HStoreNamedField>(object, access, value_instruction);
+    }
+  }
+
+  int inobject_properties = boilerplate_object->map()->GetInObjectProperties();
+  HInstruction* value_instruction =
+      Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
+  for (int i = copied_fields; i < inobject_properties; i++) {
+    DCHECK(boilerplate_object->IsJSObject());
+    int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
+    HObjectAccess access =
+        HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
+    Add<HStoreNamedField>(object, access, value_instruction);
+  }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitElements(
+    Handle<JSObject> boilerplate_object,
+    Handle<FixedArrayBase> elements,
+    HValue* object_elements,
+    AllocationSiteUsageContext* site_context) {
+  ElementsKind kind = boilerplate_object->map()->elements_kind();
+  int elements_length = elements->length();
+  HValue* object_elements_length = Add<HConstant>(elements_length);
+  BuildInitializeElementsHeader(object_elements, kind, object_elements_length);
+
+  // Copy elements backing store content.
+  if (elements->IsFixedDoubleArray()) {
+    BuildEmitFixedDoubleArray(elements, kind, object_elements);
+  } else if (elements->IsFixedArray()) {
+    BuildEmitFixedArray(elements, kind, object_elements,
+                        site_context);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
+    Handle<FixedArrayBase> elements,
+    ElementsKind kind,
+    HValue* object_elements) {
+  HInstruction* boilerplate_elements = Add<HConstant>(elements);
+  int elements_length = elements->length();
+  for (int i = 0; i < elements_length; i++) {
+    HValue* key_constant = Add<HConstant>(i);
+    HInstruction* value_instruction =
+        Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
+                        kind, ALLOW_RETURN_HOLE);
+    HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
+                                           value_instruction, nullptr, kind);
+    store->SetFlag(HValue::kAllowUndefinedAsNaN);
+  }
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitFixedArray(
+    Handle<FixedArrayBase> elements,
+    ElementsKind kind,
+    HValue* object_elements,
+    AllocationSiteUsageContext* site_context) {
+  HInstruction* boilerplate_elements = Add<HConstant>(elements);
+  int elements_length = elements->length();
+  Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+  for (int i = 0; i < elements_length; i++) {
+    Handle<Object> value(fast_elements->get(i), isolate());
+    HValue* key_constant = Add<HConstant>(i);
+    if (value->IsJSObject()) {
+      Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+      Handle<AllocationSite> current_site = site_context->EnterNewScope();
+      HInstruction* result =
+          BuildFastLiteral(value_object, site_context);
+      site_context->ExitScope(current_site, value_object);
+      Add<HStoreKeyed>(object_elements, key_constant, result, nullptr, kind);
+    } else {
+      ElementsKind copy_kind =
+          kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
+      HInstruction* value_instruction =
+          Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr, nullptr,
+                          copy_kind, ALLOW_RETURN_HOLE);
+      Add<HStoreKeyed>(object_elements, key_constant, value_instruction,
+                       nullptr, copy_kind);
+    }
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  HInstruction* instr = BuildThisFunction();
+  return ast_context()->ReturnInstruction(instr, expr->id());
+}
+
+
+void HOptimizedGraphBuilder::VisitSuperPropertyReference(
+    SuperPropertyReference* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kSuperReference);
+}
+
+
+void HOptimizedGraphBuilder::VisitSuperCallReference(SuperCallReference* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kSuperReference);
+}
+
+
+void HOptimizedGraphBuilder::VisitDeclarations(
+    ZoneList<Declaration*>* declarations) {
+  DCHECK(globals_.is_empty());
+  AstVisitor::VisitDeclarations(declarations);
+  if (!globals_.is_empty()) {
+    Handle<FixedArray> array =
+       isolate()->factory()->NewFixedArray(globals_.length(), TENURED);
+    for (int i = 0; i < globals_.length(); ++i) array->set(i, *globals_.at(i));
+    int flags =
+        DeclareGlobalsEvalFlag::encode(current_info()->is_eval()) |
+        DeclareGlobalsNativeFlag::encode(current_info()->is_native()) |
+        DeclareGlobalsLanguageMode::encode(current_info()->language_mode());
+    Add<HDeclareGlobals>(array, flags);
+    globals_.Rewind(0);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
+  Variable* variable = proxy->var();
+  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  switch (variable->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED:
+      globals_.Add(variable->name(), zone());
+      globals_.Add(variable->binding_needs_init()
+                       ? isolate()->factory()->the_hole_value()
+                       : isolate()->factory()->undefined_value(), zone());
+      return;
+    case VariableLocation::PARAMETER:
+    case VariableLocation::LOCAL:
+      if (hole_init) {
+        HValue* value = graph()->GetConstantHole();
+        environment()->Bind(variable, value);
+      }
+      break;
+    case VariableLocation::CONTEXT:
+      if (hole_init) {
+        HValue* value = graph()->GetConstantHole();
+        HValue* context = environment()->context();
+        HStoreContextSlot* store = Add<HStoreContextSlot>(
+            context, variable->index(), HStoreContextSlot::kNoCheck, value);
+        if (store->HasObservableSideEffects()) {
+          Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
+        }
+      }
+      break;
+    case VariableLocation::LOOKUP:
+      return Bailout(kUnsupportedLookupSlotInDeclaration);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case VariableLocation::GLOBAL:
+    case VariableLocation::UNALLOCATED: {
+      globals_.Add(variable->name(), zone());
+      Handle<SharedFunctionInfo> function = Compiler::GetSharedFunctionInfo(
+          declaration->fun(), current_info()->script(), top_info());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_.Add(function, zone());
+      return;
+    }
+    case VariableLocation::PARAMETER:
+    case VariableLocation::LOCAL: {
+      CHECK_ALIVE(VisitForValue(declaration->fun()));
+      HValue* value = Pop();
+      BindIfLive(variable, value);
+      break;
+    }
+    case VariableLocation::CONTEXT: {
+      CHECK_ALIVE(VisitForValue(declaration->fun()));
+      HValue* value = Pop();
+      HValue* context = environment()->context();
+      HStoreContextSlot* store = Add<HStoreContextSlot>(
+          context, variable->index(), HStoreContextSlot::kNoCheck, value);
+      if (store->HasObservableSideEffects()) {
+        Add<HSimulate>(proxy->id(), REMOVABLE_SIMULATE);
+      }
+      break;
+    }
+    case VariableLocation::LOOKUP:
+      return Bailout(kUnsupportedLookupSlotInDeclaration);
+  }
+}
+
+
+void HOptimizedGraphBuilder::VisitImportDeclaration(
+    ImportDeclaration* declaration) {
+  UNREACHABLE();
+}
+
+
+void HOptimizedGraphBuilder::VisitExportDeclaration(
+    ExportDeclaration* declaration) {
+  UNREACHABLE();
+}
+
+
+void HOptimizedGraphBuilder::VisitRewritableAssignmentExpression(
+    RewritableAssignmentExpression* node) {
+  CHECK_ALIVE(Visit(node->expression()));
+}
+
+
+// Generators for inline runtime functions.
+// Support for types.
+void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsJSReceiver(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HHasInstanceTypeAndBranch* result =
+      New<HHasInstanceTypeAndBranch>(value,
+                                     FIRST_JS_RECEIVER_TYPE,
+                                     LAST_JS_RECEIVER_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HHasInstanceTypeAndBranch* result = New<HHasInstanceTypeAndBranch>(
+      value, FIRST_FUNCTION_TYPE, LAST_FUNCTION_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HHasCachedArrayIndexAndBranch* result =
+      New<HHasCachedArrayIndexAndBranch>(value);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HHasInstanceTypeAndBranch* result =
+      New<HHasInstanceTypeAndBranch>(value, JS_ARRAY_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsTypedArray(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HHasInstanceTypeAndBranch* result =
+      New<HHasInstanceTypeAndBranch>(value, JS_TYPED_ARRAY_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HHasInstanceTypeAndBranch* result =
+      New<HHasInstanceTypeAndBranch>(value, JS_REGEXP_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateToInteger(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* input = Pop();
+  if (input->type().IsSmi()) {
+    return ast_context()->ReturnValue(input);
+  } else {
+    IfBuilder if_inputissmi(this);
+    if_inputissmi.If<HIsSmiAndBranch>(input);
+    if_inputissmi.Then();
+    {
+      // Return the input value.
+      Push(input);
+      Add<HSimulate>(call->id(), FIXED_SIMULATE);
+    }
+    if_inputissmi.Else();
+    {
+      Add<HPushArguments>(input);
+      Push(Add<HCallRuntime>(Runtime::FunctionForId(Runtime::kToInteger), 1));
+      Add<HSimulate>(call->id(), FIXED_SIMULATE);
+    }
+    if_inputissmi.End();
+    return ast_context()->ReturnValue(Pop());
+  }
+}
+
+
+void HOptimizedGraphBuilder::GenerateToObject(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HValue* result = BuildToObject(value);
+  return ast_context()->ReturnValue(result);
+}
+
+
+void HOptimizedGraphBuilder::GenerateToString(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  Callable callable = CodeFactory::ToString(isolate());
+  HValue* input = Pop();
+  if (input->type().IsString()) {
+    return ast_context()->ReturnValue(input);
+  } else {
+    HValue* stub = Add<HConstant>(callable.code());
+    HValue* values[] = {context(), input};
+    HInstruction* result =
+        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+                                 Vector<HValue*>(values, arraysize(values)));
+    return ast_context()->ReturnInstruction(result, call->id());
+  }
+}
+
+
+void HOptimizedGraphBuilder::GenerateToLength(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  Callable callable = CodeFactory::ToLength(isolate());
+  HValue* input = Pop();
+  HValue* stub = Add<HConstant>(callable.code());
+  HValue* values[] = {context(), input};
+  HInstruction* result =
+      New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+                               Vector<HValue*>(values, arraysize(values)));
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateToNumber(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  Callable callable = CodeFactory::ToNumber(isolate());
+  HValue* input = Pop();
+  if (input->type().IsTaggedNumber()) {
+    return ast_context()->ReturnValue(input);
+  } else {
+    HValue* stub = Add<HConstant>(callable.code());
+    HValue* values[] = {context(), input};
+    HInstruction* result =
+        New<HCallWithDescriptor>(stub, 0, callable.descriptor(),
+                                 Vector<HValue*>(values, arraysize(values)));
+    return ast_context()->ReturnInstruction(result, call->id());
+  }
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HIfContinuation continuation;
+  IfBuilder if_proxy(this);
+
+  HValue* smicheck = if_proxy.IfNot<HIsSmiAndBranch>(value);
+  if_proxy.And();
+  HValue* map = Add<HLoadNamedField>(value, smicheck, HObjectAccess::ForMap());
+  HValue* instance_type =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
+  if_proxy.If<HCompareNumericAndBranch>(
+      instance_type, Add<HConstant>(JS_PROXY_TYPE), Token::EQ);
+
+  if_proxy.CaptureContinuation(&continuation);
+  return ast_context()->ReturnContinuation(&continuation, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* object = Pop();
+  HIfContinuation continuation(graph()->CreateBasicBlock(),
+                               graph()->CreateBasicBlock());
+  IfBuilder if_not_smi(this);
+  if_not_smi.IfNot<HIsSmiAndBranch>(object);
+  if_not_smi.Then();
+  {
+    NoObservableSideEffectsScope no_effects(this);
+
+    IfBuilder if_fast_packed(this);
+    HValue* elements_kind = BuildGetElementsKind(object);
+    if_fast_packed.If<HCompareNumericAndBranch>(
+        elements_kind, Add<HConstant>(FAST_SMI_ELEMENTS), Token::EQ);
+    if_fast_packed.Or();
+    if_fast_packed.If<HCompareNumericAndBranch>(
+        elements_kind, Add<HConstant>(FAST_ELEMENTS), Token::EQ);
+    if_fast_packed.Or();
+    if_fast_packed.If<HCompareNumericAndBranch>(
+        elements_kind, Add<HConstant>(FAST_DOUBLE_ELEMENTS), Token::EQ);
+    if_fast_packed.JoinContinuation(&continuation);
+  }
+  if_not_smi.JoinContinuation(&continuation);
+  return ast_context()->ReturnContinuation(&continuation, call->id());
+}
+
+
+// Support for arguments.length and arguments[?].
+void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 0);
+  HInstruction* result = NULL;
+  if (function_state()->outer() == NULL) {
+    HInstruction* elements = Add<HArgumentsElements>(false);
+    result = New<HArgumentsLength>(elements);
+  } else {
+    // Number of arguments without receiver.
+    int argument_count = environment()->
+        arguments_environment()->parameter_count() - 1;
+    result = New<HConstant>(argument_count);
+  }
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* index = Pop();
+  HInstruction* result = NULL;
+  if (function_state()->outer() == NULL) {
+    HInstruction* elements = Add<HArgumentsElements>(false);
+    HInstruction* length = Add<HArgumentsLength>(elements);
+    HInstruction* checked_index = Add<HBoundsCheck>(index, length);
+    result = New<HAccessArgumentsAt>(elements, length, checked_index);
+  } else {
+    EnsureArgumentsArePushedForAccess();
+
+    // Number of arguments without receiver.
+    HInstruction* elements = function_state()->arguments_elements();
+    int argument_count = environment()->
+        arguments_environment()->parameter_count() - 1;
+    HInstruction* length = Add<HConstant>(argument_count);
+    HInstruction* checked_key = Add<HBoundsCheck>(index, length);
+    result = New<HAccessArgumentsAt>(elements, length, checked_key);
+  }
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* object = Pop();
+
+  IfBuilder if_objectisvalue(this);
+  HValue* objectisvalue = if_objectisvalue.If<HHasInstanceTypeAndBranch>(
+      object, JS_VALUE_TYPE);
+  if_objectisvalue.Then();
+  {
+    // Return the actual value.
+    Push(Add<HLoadNamedField>(
+            object, objectisvalue,
+            HObjectAccess::ForObservableJSObjectOffset(
+                JSValue::kValueOffset)));
+    Add<HSimulate>(call->id(), FIXED_SIMULATE);
+  }
+  if_objectisvalue.Else();
+  {
+    // If the object is not a value return the object.
+    Push(object);
+    Add<HSimulate>(call->id(), FIXED_SIMULATE);
+  }
+  if_objectisvalue.End();
+  return ast_context()->ReturnValue(Pop());
+}
+
+
+void HOptimizedGraphBuilder::GenerateJSValueGetValue(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HInstruction* result = Add<HLoadNamedField>(
+      value, nullptr,
+      HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset));
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateIsDate(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HHasInstanceTypeAndBranch* result =
+      New<HHasInstanceTypeAndBranch>(value, JS_DATE_TYPE);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
+    CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 3);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+  HValue* string = Pop();
+  HValue* value = Pop();
+  HValue* index = Pop();
+  Add<HSeqStringSetChar>(String::ONE_BYTE_ENCODING, string,
+                         index, value);
+  Add<HSimulate>(call->id(), FIXED_SIMULATE);
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
+    CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 3);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+  HValue* string = Pop();
+  HValue* value = Pop();
+  HValue* index = Pop();
+  Add<HSeqStringSetChar>(String::TWO_BYTE_ENCODING, string,
+                         index, value);
+  Add<HSimulate>(call->id(), FIXED_SIMULATE);
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* value = Pop();
+  HValue* object = Pop();
+
+  // Check if object is a JSValue.
+  IfBuilder if_objectisvalue(this);
+  if_objectisvalue.If<HHasInstanceTypeAndBranch>(object, JS_VALUE_TYPE);
+  if_objectisvalue.Then();
+  {
+    // Create in-object property store to kValueOffset.
+    Add<HStoreNamedField>(object,
+        HObjectAccess::ForObservableJSObjectOffset(JSValue::kValueOffset),
+        value);
+    if (!ast_context()->IsEffect()) {
+      Push(value);
+    }
+    Add<HSimulate>(call->id(), FIXED_SIMULATE);
+  }
+  if_objectisvalue.Else();
+  {
+    // Nothing to do in this case.
+    if (!ast_context()->IsEffect()) {
+      Push(value);
+    }
+    Add<HSimulate>(call->id(), FIXED_SIMULATE);
+  }
+  if_objectisvalue.End();
+  if (!ast_context()->IsEffect()) {
+    Drop(1);
+  }
+  return ast_context()->ReturnValue(value);
+}
+
+
+// Fast support for charCodeAt(n).
+void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* index = Pop();
+  HValue* string = Pop();
+  HInstruction* result = BuildStringCharCodeAt(string, index);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+// Fast support for string.charAt(n) and string[n].
+void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* char_code = Pop();
+  HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+// Fast support for string.charAt(n) and string[n].
+void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* index = Pop();
+  HValue* string = Pop();
+  HInstruction* char_code = BuildStringCharCodeAt(string, index);
+  AddInstruction(char_code);
+  HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+// Fast support for object equality testing.
+void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* right = Pop();
+  HValue* left = Pop();
+  HCompareObjectEqAndBranch* result =
+      New<HCompareObjectEqAndBranch>(left, right);
+  return ast_context()->ReturnControl(result, call->id());
+}
+
+
+// Fast support for SubString.
+void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
+  DCHECK_EQ(3, call->arguments()->length());
+  CHECK_ALIVE(VisitExpressions(call->arguments()));
+  PushArgumentsFromEnvironment(call->arguments()->length());
+  HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+// Support for direct calls from JavaScript to native RegExp code.
+void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
+  DCHECK_EQ(4, call->arguments()->length());
+  CHECK_ALIVE(VisitExpressions(call->arguments()));
+  PushArgumentsFromEnvironment(call->arguments()->length());
+  HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateRegExpFlags(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitExpressions(call->arguments()));
+  HValue* regexp = Pop();
+  HInstruction* result =
+      New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpFlags());
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateRegExpSource(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitExpressions(call->arguments()));
+  HValue* regexp = Pop();
+  HInstruction* result =
+      New<HLoadNamedField>(regexp, nullptr, HObjectAccess::ForJSRegExpSource());
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
+  DCHECK_EQ(2, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* lo = Pop();
+  HValue* hi = Pop();
+  HInstruction* result = NewUncasted<HConstructDouble>(hi, lo);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+// Construct a RegExp exec result with two in-object properties.
+void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
+  DCHECK_EQ(3, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+  HValue* input = Pop();
+  HValue* index = Pop();
+  HValue* length = Pop();
+  HValue* result = BuildRegExpConstructResult(length, index, input);
+  return ast_context()->ReturnValue(result);
+}
+
+
+// Fast support for number to string.
+void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
+  DCHECK_EQ(1, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* number = Pop();
+  HValue* result = BuildNumberToString(number, Type::Any(zone()));
+  return ast_context()->ReturnValue(result);
+}
+
+
+// Fast support for calls.
+void HOptimizedGraphBuilder::GenerateCall(CallRuntime* call) {
+  DCHECK_LE(2, call->arguments()->length());
+  CHECK_ALIVE(VisitExpressions(call->arguments()));
+  CallTrampolineDescriptor descriptor(isolate());
+  PushArgumentsFromEnvironment(call->arguments()->length() - 1);
+  HValue* trampoline = Add<HConstant>(isolate()->builtins()->Call());
+  HValue* target = Pop();
+  HValue* values[] = {context(), target,
+                      Add<HConstant>(call->arguments()->length() - 2)};
+  HInstruction* result = New<HCallWithDescriptor>(
+      trampoline, call->arguments()->length() - 1, descriptor,
+      Vector<HValue*>(values, arraysize(values)));
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+// Fast call to math functions.
+void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
+  DCHECK_EQ(2, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* right = Pop();
+  HValue* left = Pop();
+  HInstruction* result = NewUncasted<HPower>(left, right);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMathClz32(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathClz32);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMathFloor(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathFloor);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateFixedArrayGet(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* index = Pop();
+  HValue* object = Pop();
+  HInstruction* result = New<HLoadKeyed>(
+      object, index, nullptr, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateFixedArraySet(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 3);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+  HValue* value = Pop();
+  HValue* index = Pop();
+  HValue* object = Pop();
+  NoObservableSideEffectsScope no_effects(this);
+  Add<HStoreKeyed>(object, index, value, nullptr, FAST_HOLEY_ELEMENTS);
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HOptimizedGraphBuilder::GenerateTheHole(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 0);
+  return ast_context()->ReturnValue(graph()->GetConstantHole());
+}
+
+
+void HOptimizedGraphBuilder::GenerateCreateIterResultObject(CallRuntime* call) {
+  DCHECK_EQ(2, call->arguments()->length());
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* done = Pop();
+  HValue* value = Pop();
+  HValue* result = BuildCreateIterResultObject(value, done);
+  return ast_context()->ReturnValue(result);
+}
+
+
+void HOptimizedGraphBuilder::GenerateJSCollectionGetTable(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+  HInstruction* result = New<HLoadNamedField>(
+      receiver, nullptr, HObjectAccess::ForJSCollectionTable());
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateStringGetRawHashField(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* object = Pop();
+  HInstruction* result = New<HLoadNamedField>(
+      object, nullptr, HObjectAccess::ForStringHashField());
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+template <typename CollectionType>
+HValue* HOptimizedGraphBuilder::BuildAllocateOrderedHashTable() {
+  static const int kCapacity = CollectionType::kMinCapacity;
+  static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
+  static const int kFixedArrayLength = CollectionType::kHashTableStartIndex +
+                                       kBucketCount +
+                                       (kCapacity * CollectionType::kEntrySize);
+  static const int kSizeInBytes =
+      FixedArray::kHeaderSize + (kFixedArrayLength * kPointerSize);
+
+  // Allocate the table and add the proper map.
+  HValue* table =
+      Add<HAllocate>(Add<HConstant>(kSizeInBytes), HType::HeapObject(),
+                     NOT_TENURED, FIXED_ARRAY_TYPE);
+  AddStoreMapConstant(table, isolate()->factory()->ordered_hash_table_map());
+
+  // Initialize the FixedArray...
+  HValue* length = Add<HConstant>(kFixedArrayLength);
+  Add<HStoreNamedField>(table, HObjectAccess::ForFixedArrayLength(), length);
+
+  // ...and the OrderedHashTable fields.
+  Add<HStoreNamedField>(
+      table,
+      HObjectAccess::ForOrderedHashTableNumberOfBuckets<CollectionType>(),
+      Add<HConstant>(kBucketCount));
+  Add<HStoreNamedField>(
+      table,
+      HObjectAccess::ForOrderedHashTableNumberOfElements<CollectionType>(),
+      graph()->GetConstant0());
+  Add<HStoreNamedField>(
+      table, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
+                 CollectionType>(),
+      graph()->GetConstant0());
+
+  // Fill the buckets with kNotFound.
+  HValue* not_found = Add<HConstant>(CollectionType::kNotFound);
+  for (int i = 0; i < kBucketCount; ++i) {
+    Add<HStoreNamedField>(
+        table, HObjectAccess::ForOrderedHashTableBucket<CollectionType>(i),
+        not_found);
+  }
+
+  // Fill the data table with undefined.
+  HValue* undefined = graph()->GetConstantUndefined();
+  for (int i = 0; i < (kCapacity * CollectionType::kEntrySize); ++i) {
+    Add<HStoreNamedField>(table,
+                          HObjectAccess::ForOrderedHashTableDataTableIndex<
+                              CollectionType, kBucketCount>(i),
+                          undefined);
+  }
+
+  return table;
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetInitialize(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  HValue* table = BuildAllocateOrderedHashTable<OrderedHashSet>();
+  Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(), table);
+  return ast_context()->ReturnValue(receiver);
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapInitialize(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  HValue* table = BuildAllocateOrderedHashTable<OrderedHashMap>();
+  Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(), table);
+  return ast_context()->ReturnValue(receiver);
+}
+
+
+template <typename CollectionType>
+void HOptimizedGraphBuilder::BuildOrderedHashTableClear(HValue* receiver) {
+  HValue* old_table = Add<HLoadNamedField>(
+      receiver, nullptr, HObjectAccess::ForJSCollectionTable());
+  HValue* new_table = BuildAllocateOrderedHashTable<CollectionType>();
+  Add<HStoreNamedField>(
+      old_table, HObjectAccess::ForOrderedHashTableNextTable<CollectionType>(),
+      new_table);
+  Add<HStoreNamedField>(
+      old_table, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
+                     CollectionType>(),
+      Add<HConstant>(CollectionType::kClearedTableSentinel));
+  Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(),
+                        new_table);
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetClear(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  BuildOrderedHashTableClear<OrderedHashSet>(receiver);
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapClear(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  BuildOrderedHashTableClear<OrderedHashMap>(receiver);
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* value = Pop();
+  HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateFastOneByteArrayJoin(CallRuntime* call) {
+  // Simply returning undefined here would be semantically correct and even
+  // avoid the bailout. Nevertheless, some ancient benchmarks like SunSpider's
+  // string-fasta would tank, because fullcode contains an optimized version.
+  // Obviously the fullcode => Crankshaft => bailout => fullcode dance is
+  // faster... *sigh*
+  return Bailout(kInlinedRuntimeFunctionFastOneByteArrayJoin);
+}
+
+
+void HOptimizedGraphBuilder::GenerateDebugBreakInOptimizedCode(
+    CallRuntime* call) {
+  Add<HDebugBreak>();
+  return ast_context()->ReturnValue(graph()->GetConstant0());
+}
+
+
+void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 0);
+  HValue* ref =
+      Add<HConstant>(ExternalReference::debug_is_active_address(isolate()));
+  HValue* value =
+      Add<HLoadNamedField>(ref, nullptr, HObjectAccess::ForExternalUInteger8());
+  return ast_context()->ReturnValue(value);
+}
+
+
+#undef CHECK_BAILOUT
+#undef CHECK_ALIVE
+
+
+HEnvironment::HEnvironment(HEnvironment* outer,
+                           Scope* scope,
+                           Handle<JSFunction> closure,
+                           Zone* zone)
+    : closure_(closure),
+      values_(0, zone),
+      frame_type_(JS_FUNCTION),
+      parameter_count_(0),
+      specials_count_(1),
+      local_count_(0),
+      outer_(outer),
+      entry_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(BailoutId::None()),
+      zone_(zone) {
+  Scope* declaration_scope = scope->DeclarationScope();
+  Initialize(declaration_scope->num_parameters() + 1,
+             declaration_scope->num_stack_slots(), 0);
+}
+
+
+HEnvironment::HEnvironment(Zone* zone, int parameter_count)
+    : values_(0, zone),
+      frame_type_(STUB),
+      parameter_count_(parameter_count),
+      specials_count_(1),
+      local_count_(0),
+      outer_(NULL),
+      entry_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(BailoutId::None()),
+      zone_(zone) {
+  Initialize(parameter_count, 0, 0);
+}
+
+
+HEnvironment::HEnvironment(const HEnvironment* other, Zone* zone)
+    : values_(0, zone),
+      frame_type_(JS_FUNCTION),
+      parameter_count_(0),
+      specials_count_(0),
+      local_count_(0),
+      outer_(NULL),
+      entry_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(other->ast_id()),
+      zone_(zone) {
+  Initialize(other);
+}
+
+
+HEnvironment::HEnvironment(HEnvironment* outer,
+                           Handle<JSFunction> closure,
+                           FrameType frame_type,
+                           int arguments,
+                           Zone* zone)
+    : closure_(closure),
+      values_(arguments, zone),
+      frame_type_(frame_type),
+      parameter_count_(arguments),
+      specials_count_(0),
+      local_count_(0),
+      outer_(outer),
+      entry_(NULL),
+      pop_count_(0),
+      push_count_(0),
+      ast_id_(BailoutId::None()),
+      zone_(zone) {
+}
+
+
+void HEnvironment::Initialize(int parameter_count,
+                              int local_count,
+                              int stack_height) {
+  parameter_count_ = parameter_count;
+  local_count_ = local_count;
+
+  // Avoid reallocating the temporaries' backing store on the first Push.
+  int total = parameter_count + specials_count_ + local_count + stack_height;
+  values_.Initialize(total + 4, zone());
+  for (int i = 0; i < total; ++i) values_.Add(NULL, zone());
+}
+
+
+void HEnvironment::Initialize(const HEnvironment* other) {
+  closure_ = other->closure();
+  values_.AddAll(other->values_, zone());
+  assigned_variables_.Union(other->assigned_variables_, zone());
+  frame_type_ = other->frame_type_;
+  parameter_count_ = other->parameter_count_;
+  local_count_ = other->local_count_;
+  if (other->outer_ != NULL) outer_ = other->outer_->Copy();  // Deep copy.
+  entry_ = other->entry_;
+  pop_count_ = other->pop_count_;
+  push_count_ = other->push_count_;
+  specials_count_ = other->specials_count_;
+  ast_id_ = other->ast_id_;
+}
+
+
+void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
+  DCHECK(!block->IsLoopHeader());
+  DCHECK(values_.length() == other->values_.length());
+
+  int length = values_.length();
+  for (int i = 0; i < length; ++i) {
+    HValue* value = values_[i];
+    if (value != NULL && value->IsPhi() && value->block() == block) {
+      // There is already a phi for the i'th value.
+      HPhi* phi = HPhi::cast(value);
+      // Assert index is correct and that we haven't missed an incoming edge.
+      DCHECK(phi->merged_index() == i || !phi->HasMergedIndex());
+      DCHECK(phi->OperandCount() == block->predecessors()->length());
+      phi->AddInput(other->values_[i]);
+    } else if (values_[i] != other->values_[i]) {
+      // There is a fresh value on the incoming edge, a phi is needed.
+      DCHECK(values_[i] != NULL && other->values_[i] != NULL);
+      HPhi* phi = block->AddNewPhi(i);
+      HValue* old_value = values_[i];
+      for (int j = 0; j < block->predecessors()->length(); j++) {
+        phi->AddInput(old_value);
+      }
+      phi->AddInput(other->values_[i]);
+      this->values_[i] = phi;
+    }
+  }
+}
+
+
+void HEnvironment::Bind(int index, HValue* value) {
+  DCHECK(value != NULL);
+  assigned_variables_.Add(index, zone());
+  values_[index] = value;
+}
+
+
+bool HEnvironment::HasExpressionAt(int index) const {
+  return index >= parameter_count_ + specials_count_ + local_count_;
+}
+
+
+bool HEnvironment::ExpressionStackIsEmpty() const {
+  DCHECK(length() >= first_expression_index());
+  return length() == first_expression_index();
+}
+
+
+void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) {
+  int count = index_from_top + 1;
+  int index = values_.length() - count;
+  DCHECK(HasExpressionAt(index));
+  // The push count must include at least the element in question or else
+  // the new value will not be included in this environment's history.
+  if (push_count_ < count) {
+    // This is the same effect as popping then re-pushing 'count' elements.
+    pop_count_ += (count - push_count_);
+    push_count_ = count;
+  }
+  values_[index] = value;
+}
+
+
+HValue* HEnvironment::RemoveExpressionStackAt(int index_from_top) {
+  int count = index_from_top + 1;
+  int index = values_.length() - count;
+  DCHECK(HasExpressionAt(index));
+  // Simulate popping 'count' elements and then
+  // pushing 'count - 1' elements back.
+  pop_count_ += Max(count - push_count_, 0);
+  push_count_ = Max(push_count_ - count, 0) + (count - 1);
+  return values_.Remove(index);
+}
+
+
+void HEnvironment::Drop(int count) {
+  for (int i = 0; i < count; ++i) {
+    Pop();
+  }
+}
+
+
+void HEnvironment::Print() const {
+  OFStream os(stdout);
+  os << *this << "\n";
+}
+
+
+HEnvironment* HEnvironment::Copy() const {
+  return new(zone()) HEnvironment(this, zone());
+}
+
+
+HEnvironment* HEnvironment::CopyWithoutHistory() const {
+  HEnvironment* result = Copy();
+  result->ClearHistory();
+  return result;
+}
+
+
+HEnvironment* HEnvironment::CopyAsLoopHeader(HBasicBlock* loop_header) const {
+  HEnvironment* new_env = Copy();
+  for (int i = 0; i < values_.length(); ++i) {
+    HPhi* phi = loop_header->AddNewPhi(i);
+    phi->AddInput(values_[i]);
+    new_env->values_[i] = phi;
+  }
+  new_env->ClearHistory();
+  return new_env;
+}
+
+
+HEnvironment* HEnvironment::CreateStubEnvironment(HEnvironment* outer,
+                                                  Handle<JSFunction> target,
+                                                  FrameType frame_type,
+                                                  int arguments) const {
+  HEnvironment* new_env =
+      new(zone()) HEnvironment(outer, target, frame_type,
+                               arguments + 1, zone());
+  for (int i = 0; i <= arguments; ++i) {  // Include receiver.
+    new_env->Push(ExpressionStackAt(arguments - i));
+  }
+  new_env->ClearHistory();
+  return new_env;
+}
+
+
+HEnvironment* HEnvironment::CopyForInlining(
+    Handle<JSFunction> target,
+    int arguments,
+    FunctionLiteral* function,
+    HConstant* undefined,
+    InliningKind inlining_kind) const {
+  DCHECK(frame_type() == JS_FUNCTION);
+
+  // Outer environment is a copy of this one without the arguments.
+  int arity = function->scope()->num_parameters();
+
+  HEnvironment* outer = Copy();
+  outer->Drop(arguments + 1);  // Including receiver.
+  outer->ClearHistory();
+
+  if (inlining_kind == CONSTRUCT_CALL_RETURN) {
+    // Create artificial constructor stub environment.  The receiver should
+    // actually be the constructor function, but we pass the newly allocated
+    // object instead, DoComputeConstructStubFrame() relies on that.
+    outer = CreateStubEnvironment(outer, target, JS_CONSTRUCT, arguments);
+  } else if (inlining_kind == GETTER_CALL_RETURN) {
+    // We need an additional StackFrame::INTERNAL frame for restoring the
+    // correct context.
+    outer = CreateStubEnvironment(outer, target, JS_GETTER, arguments);
+  } else if (inlining_kind == SETTER_CALL_RETURN) {
+    // We need an additional StackFrame::INTERNAL frame for temporarily saving
+    // the argument of the setter, see StoreStubCompiler::CompileStoreViaSetter.
+    outer = CreateStubEnvironment(outer, target, JS_SETTER, arguments);
+  }
+
+  if (arity != arguments) {
+    // Create artificial arguments adaptation environment.
+    outer = CreateStubEnvironment(outer, target, ARGUMENTS_ADAPTOR, arguments);
+  }
+
+  HEnvironment* inner =
+      new(zone()) HEnvironment(outer, function->scope(), target, zone());
+  // Get the argument values from the original environment.
+  for (int i = 0; i <= arity; ++i) {  // Include receiver.
+    HValue* push = (i <= arguments) ?
+        ExpressionStackAt(arguments - i) : undefined;
+    inner->SetValueAt(i, push);
+  }
+  inner->SetValueAt(arity + 1, context());
+  for (int i = arity + 2; i < inner->length(); ++i) {
+    inner->SetValueAt(i, undefined);
+  }
+
+  inner->set_ast_id(BailoutId::FunctionEntry());
+  return inner;
+}
+
+
+std::ostream& operator<<(std::ostream& os, const HEnvironment& env) {
+  for (int i = 0; i < env.length(); i++) {
+    if (i == 0) os << "parameters\n";
+    if (i == env.parameter_count()) os << "specials\n";
+    if (i == env.parameter_count() + env.specials_count()) os << "locals\n";
+    if (i == env.parameter_count() + env.specials_count() + env.local_count()) {
+      os << "expressions\n";
+    }
+    HValue* val = env.values()->at(i);
+    os << i << ": ";
+    if (val != NULL) {
+      os << val;
+    } else {
+      os << "NULL";
+    }
+    os << "\n";
+  }
+  return os << "\n";
+}
+
+
+void HTracer::TraceCompilation(CompilationInfo* info) {
+  Tag tag(this, "compilation");
+  base::SmartArrayPointer<char> name = info->GetDebugName();
+  if (info->IsOptimizing()) {
+    PrintStringProperty("name", name.get());
+    PrintIndent();
+    trace_.Add("method \"%s:%d\"\n", name.get(), info->optimization_id());
+  } else {
+    PrintStringProperty("name", name.get());
+    PrintStringProperty("method", "stub");
+  }
+  PrintLongProperty("date",
+                    static_cast<int64_t>(base::OS::TimeCurrentMillis()));
+}
+
+
+void HTracer::TraceLithium(const char* name, LChunk* chunk) {
+  DCHECK(!chunk->isolate()->concurrent_recompilation_enabled());
+  AllowHandleDereference allow_deref;
+  AllowDeferredHandleDereference allow_deferred_deref;
+  Trace(name, chunk->graph(), chunk);
+}
+
+
+void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
+  DCHECK(!graph->isolate()->concurrent_recompilation_enabled());
+  AllowHandleDereference allow_deref;
+  AllowDeferredHandleDereference allow_deferred_deref;
+  Trace(name, graph, NULL);
+}
+
+
+void HTracer::Trace(const char* name, HGraph* graph, LChunk* chunk) {
+  Tag tag(this, "cfg");
+  PrintStringProperty("name", name);
+  const ZoneList<HBasicBlock*>* blocks = graph->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* current = blocks->at(i);
+    Tag block_tag(this, "block");
+    PrintBlockProperty("name", current->block_id());
+    PrintIntProperty("from_bci", -1);
+    PrintIntProperty("to_bci", -1);
+
+    if (!current->predecessors()->is_empty()) {
+      PrintIndent();
+      trace_.Add("predecessors");
+      for (int j = 0; j < current->predecessors()->length(); ++j) {
+        trace_.Add(" \"B%d\"", current->predecessors()->at(j)->block_id());
+      }
+      trace_.Add("\n");
+    } else {
+      PrintEmptyProperty("predecessors");
+    }
+
+    if (current->end()->SuccessorCount() == 0) {
+      PrintEmptyProperty("successors");
+    } else  {
+      PrintIndent();
+      trace_.Add("successors");
+      for (HSuccessorIterator it(current->end()); !it.Done(); it.Advance()) {
+        trace_.Add(" \"B%d\"", it.Current()->block_id());
+      }
+      trace_.Add("\n");
+    }
+
+    PrintEmptyProperty("xhandlers");
+
+    {
+      PrintIndent();
+      trace_.Add("flags");
+      if (current->IsLoopSuccessorDominator()) {
+        trace_.Add(" \"dom-loop-succ\"");
+      }
+      if (current->IsUnreachable()) {
+        trace_.Add(" \"dead\"");
+      }
+      if (current->is_osr_entry()) {
+        trace_.Add(" \"osr\"");
+      }
+      trace_.Add("\n");
+    }
+
+    if (current->dominator() != NULL) {
+      PrintBlockProperty("dominator", current->dominator()->block_id());
+    }
+
+    PrintIntProperty("loop_depth", current->LoopNestingDepth());
+
+    if (chunk != NULL) {
+      int first_index = current->first_instruction_index();
+      int last_index = current->last_instruction_index();
+      PrintIntProperty(
+          "first_lir_id",
+          LifetimePosition::FromInstructionIndex(first_index).Value());
+      PrintIntProperty(
+          "last_lir_id",
+          LifetimePosition::FromInstructionIndex(last_index).Value());
+    }
+
+    {
+      Tag states_tag(this, "states");
+      Tag locals_tag(this, "locals");
+      int total = current->phis()->length();
+      PrintIntProperty("size", current->phis()->length());
+      PrintStringProperty("method", "None");
+      for (int j = 0; j < total; ++j) {
+        HPhi* phi = current->phis()->at(j);
+        PrintIndent();
+        std::ostringstream os;
+        os << phi->merged_index() << " " << NameOf(phi) << " " << *phi << "\n";
+        trace_.Add(os.str().c_str());
+      }
+    }
+
+    {
+      Tag HIR_tag(this, "HIR");
+      for (HInstructionIterator it(current); !it.Done(); it.Advance()) {
+        HInstruction* instruction = it.Current();
+        int uses = instruction->UseCount();
+        PrintIndent();
+        std::ostringstream os;
+        os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
+        if (graph->info()->is_tracking_positions() &&
+            instruction->has_position() && instruction->position().raw() != 0) {
+          const SourcePosition pos = instruction->position();
+          os << " pos:";
+          if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
+          os << pos.position();
+        }
+        os << " <|@\n";
+        trace_.Add(os.str().c_str());
+      }
+    }
+
+
+    if (chunk != NULL) {
+      Tag LIR_tag(this, "LIR");
+      int first_index = current->first_instruction_index();
+      int last_index = current->last_instruction_index();
+      if (first_index != -1 && last_index != -1) {
+        const ZoneList<LInstruction*>* instructions = chunk->instructions();
+        for (int i = first_index; i <= last_index; ++i) {
+          LInstruction* linstr = instructions->at(i);
+          if (linstr != NULL) {
+            PrintIndent();
+            trace_.Add("%d ",
+                       LifetimePosition::FromInstructionIndex(i).Value());
+            linstr->PrintTo(&trace_);
+            std::ostringstream os;
+            os << " [hir:" << NameOf(linstr->hydrogen_value()) << "] <|@\n";
+            trace_.Add(os.str().c_str());
+          }
+        }
+      }
+    }
+  }
+}
+
+
+void HTracer::TraceLiveRanges(const char* name, LAllocator* allocator) {
+  Tag tag(this, "intervals");
+  PrintStringProperty("name", name);
+
+  const Vector<LiveRange*>* fixed_d = allocator->fixed_double_live_ranges();
+  for (int i = 0; i < fixed_d->length(); ++i) {
+    TraceLiveRange(fixed_d->at(i), "fixed", allocator->zone());
+  }
+
+  const Vector<LiveRange*>* fixed = allocator->fixed_live_ranges();
+  for (int i = 0; i < fixed->length(); ++i) {
+    TraceLiveRange(fixed->at(i), "fixed", allocator->zone());
+  }
+
+  const ZoneList<LiveRange*>* live_ranges = allocator->live_ranges();
+  for (int i = 0; i < live_ranges->length(); ++i) {
+    TraceLiveRange(live_ranges->at(i), "object", allocator->zone());
+  }
+}
+
+
+void HTracer::TraceLiveRange(LiveRange* range, const char* type,
+                             Zone* zone) {
+  if (range != NULL && !range->IsEmpty()) {
+    PrintIndent();
+    trace_.Add("%d %s", range->id(), type);
+    if (range->HasRegisterAssigned()) {
+      LOperand* op = range->CreateAssignedOperand(zone);
+      int assigned_reg = op->index();
+      if (op->IsDoubleRegister()) {
+        trace_.Add(" \"%s\"",
+                   DoubleRegister::from_code(assigned_reg).ToString());
+      } else {
+        DCHECK(op->IsRegister());
+        trace_.Add(" \"%s\"", Register::from_code(assigned_reg).ToString());
+      }
+    } else if (range->IsSpilled()) {
+      LOperand* op = range->TopLevel()->GetSpillOperand();
+      if (op->IsDoubleStackSlot()) {
+        trace_.Add(" \"double_stack:%d\"", op->index());
+      } else {
+        DCHECK(op->IsStackSlot());
+        trace_.Add(" \"stack:%d\"", op->index());
+      }
+    }
+    int parent_index = -1;
+    if (range->IsChild()) {
+      parent_index = range->parent()->id();
+    } else {
+      parent_index = range->id();
+    }
+    LOperand* op = range->FirstHint();
+    int hint_index = -1;
+    if (op != NULL && op->IsUnallocated()) {
+      hint_index = LUnallocated::cast(op)->virtual_register();
+    }
+    trace_.Add(" %d %d", parent_index, hint_index);
+    UseInterval* cur_interval = range->first_interval();
+    while (cur_interval != NULL && range->Covers(cur_interval->start())) {
+      trace_.Add(" [%d, %d[",
+                 cur_interval->start().Value(),
+                 cur_interval->end().Value());
+      cur_interval = cur_interval->next();
+    }
+
+    UsePosition* current_pos = range->first_pos();
+    while (current_pos != NULL) {
+      if (current_pos->RegisterIsBeneficial() || FLAG_trace_all_uses) {
+        trace_.Add(" %d M", current_pos->pos().Value());
+      }
+      current_pos = current_pos->next();
+    }
+
+    trace_.Add(" \"\"\n");
+  }
+}
+
+
+void HTracer::FlushToFile() {
+  AppendChars(filename_.start(), trace_.ToCString().get(), trace_.length(),
+              false);
+  trace_.Reset();
+}
+
+
+void HStatistics::Initialize(CompilationInfo* info) {
+  if (!info->has_shared_info()) return;
+  source_size_ += info->shared_info()->SourceSize();
+}
+
+
+void HStatistics::Print() {
+  PrintF(
+      "\n"
+      "----------------------------------------"
+      "----------------------------------------\n"
+      "--- Hydrogen timing results:\n"
+      "----------------------------------------"
+      "----------------------------------------\n");
+  base::TimeDelta sum;
+  for (int i = 0; i < times_.length(); ++i) {
+    sum += times_[i];
+  }
+
+  for (int i = 0; i < names_.length(); ++i) {
+    PrintF("%33s", names_[i]);
+    double ms = times_[i].InMillisecondsF();
+    double percent = times_[i].PercentOf(sum);
+    PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
+
+    size_t size = sizes_[i];
+    double size_percent = static_cast<double>(size) * 100 / total_size_;
+    PrintF(" %9zu bytes / %4.1f %%\n", size, size_percent);
+  }
+
+  PrintF(
+      "----------------------------------------"
+      "----------------------------------------\n");
+  base::TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Create graph",
+         create_graph_.InMillisecondsF(), create_graph_.PercentOf(total));
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Optimize graph",
+         optimize_graph_.InMillisecondsF(), optimize_graph_.PercentOf(total));
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Generate and install code",
+         generate_code_.InMillisecondsF(), generate_code_.PercentOf(total));
+  PrintF(
+      "----------------------------------------"
+      "----------------------------------------\n");
+  PrintF("%33s %8.3f ms           %9zu bytes\n", "Total",
+         total.InMillisecondsF(), total_size_);
+  PrintF("%33s     (%.1f times slower than full code gen)\n", "",
+         total.TimesOf(full_code_gen_));
+
+  double source_size_in_kb = static_cast<double>(source_size_) / 1024;
+  double normalized_time =  source_size_in_kb > 0
+      ? total.InMillisecondsF() / source_size_in_kb
+      : 0;
+  double normalized_size_in_kb =
+      source_size_in_kb > 0
+          ? static_cast<double>(total_size_) / 1024 / source_size_in_kb
+          : 0;
+  PrintF("%33s %8.3f ms           %7.3f kB allocated\n",
+         "Average per kB source", normalized_time, normalized_size_in_kb);
+}
+
+
+void HStatistics::SaveTiming(const char* name, base::TimeDelta time,
+                             size_t size) {
+  total_size_ += size;
+  for (int i = 0; i < names_.length(); ++i) {
+    if (strcmp(names_[i], name) == 0) {
+      times_[i] += time;
+      sizes_[i] += size;
+      return;
+    }
+  }
+  names_.Add(name);
+  times_.Add(time);
+  sizes_.Add(size);
+}
+
+
+HPhase::~HPhase() {
+  if (ShouldProduceTraceOutput()) {
+    isolate()->GetHTracer()->TraceHydrogen(name(), graph_);
+  }
+
+#ifdef DEBUG
+  graph_->Verify(false);  // No full verify.
+#endif
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/hydrogen.h b/src/crankshaft/hydrogen.h
new file mode 100644
index 0000000..40a1834
--- /dev/null
+++ b/src/crankshaft/hydrogen.h
@@ -0,0 +1,3065 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_HYDROGEN_H_
+#define V8_CRANKSHAFT_HYDROGEN_H_
+
+#include "src/accessors.h"
+#include "src/allocation.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/bailout-reason.h"
+#include "src/compiler.h"
+#include "src/crankshaft/hydrogen-instructions.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class BitVector;
+class FunctionState;
+class HEnvironment;
+class HGraph;
+class HLoopInformation;
+class HOsrBuilder;
+class HTracer;
+class LAllocator;
+class LChunk;
+class LiveRange;
+
+
+class HBasicBlock final : public ZoneObject {
+ public:
+  explicit HBasicBlock(HGraph* graph);
+  ~HBasicBlock() { }
+
+  // Simple accessors.
+  int block_id() const { return block_id_; }
+  void set_block_id(int id) { block_id_ = id; }
+  HGraph* graph() const { return graph_; }
+  Isolate* isolate() const;
+  const ZoneList<HPhi*>* phis() const { return &phis_; }
+  HInstruction* first() const { return first_; }
+  HInstruction* last() const { return last_; }
+  void set_last(HInstruction* instr) { last_ = instr; }
+  HControlInstruction* end() const { return end_; }
+  HLoopInformation* loop_information() const { return loop_information_; }
+  HLoopInformation* current_loop() const {
+    return IsLoopHeader() ? loop_information()
+                          : (parent_loop_header() != NULL
+                            ? parent_loop_header()->loop_information() : NULL);
+  }
+  const ZoneList<HBasicBlock*>* predecessors() const { return &predecessors_; }
+  bool HasPredecessor() const { return predecessors_.length() > 0; }
+  const ZoneList<HBasicBlock*>* dominated_blocks() const {
+    return &dominated_blocks_;
+  }
+  const ZoneList<int>* deleted_phis() const {
+    return &deleted_phis_;
+  }
+  void RecordDeletedPhi(int merge_index) {
+    deleted_phis_.Add(merge_index, zone());
+  }
+  HBasicBlock* dominator() const { return dominator_; }
+  HEnvironment* last_environment() const { return last_environment_; }
+  int argument_count() const { return argument_count_; }
+  void set_argument_count(int count) { argument_count_ = count; }
+  int first_instruction_index() const { return first_instruction_index_; }
+  void set_first_instruction_index(int index) {
+    first_instruction_index_ = index;
+  }
+  int last_instruction_index() const { return last_instruction_index_; }
+  void set_last_instruction_index(int index) {
+    last_instruction_index_ = index;
+  }
+  bool is_osr_entry() { return is_osr_entry_; }
+  void set_osr_entry() { is_osr_entry_ = true; }
+
+  void AttachLoopInformation();
+  void DetachLoopInformation();
+  bool IsLoopHeader() const { return loop_information() != NULL; }
+  bool IsStartBlock() const { return block_id() == 0; }
+  void PostProcessLoopHeader(IterationStatement* stmt);
+
+  bool IsFinished() const { return end_ != NULL; }
+  void AddPhi(HPhi* phi);
+  void RemovePhi(HPhi* phi);
+  void AddInstruction(HInstruction* instr, SourcePosition position);
+  bool Dominates(HBasicBlock* other) const;
+  bool EqualToOrDominates(HBasicBlock* other) const;
+  int LoopNestingDepth() const;
+
+  void SetInitialEnvironment(HEnvironment* env);
+  void ClearEnvironment() {
+    DCHECK(IsFinished());
+    DCHECK(end()->SuccessorCount() == 0);
+    last_environment_ = NULL;
+  }
+  bool HasEnvironment() const { return last_environment_ != NULL; }
+  void UpdateEnvironment(HEnvironment* env);
+  HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
+
+  void set_parent_loop_header(HBasicBlock* block) {
+    DCHECK(parent_loop_header_ == NULL);
+    parent_loop_header_ = block;
+  }
+
+  bool HasParentLoopHeader() const { return parent_loop_header_ != NULL; }
+
+  void SetJoinId(BailoutId ast_id);
+
+  int PredecessorIndexOf(HBasicBlock* predecessor) const;
+  HPhi* AddNewPhi(int merged_index);
+  HSimulate* AddNewSimulate(BailoutId ast_id, SourcePosition position,
+                            RemovableSimulate removable = FIXED_SIMULATE) {
+    HSimulate* instr = CreateSimulate(ast_id, removable);
+    AddInstruction(instr, position);
+    return instr;
+  }
+  void AssignCommonDominator(HBasicBlock* other);
+  void AssignLoopSuccessorDominators();
+
+  // If a target block is tagged as an inline function return, all
+  // predecessors should contain the inlined exit sequence:
+  //
+  // LeaveInlined
+  // Simulate (caller's environment)
+  // Goto (target block)
+  bool IsInlineReturnTarget() const { return is_inline_return_target_; }
+  void MarkAsInlineReturnTarget(HBasicBlock* inlined_entry_block) {
+    is_inline_return_target_ = true;
+    inlined_entry_block_ = inlined_entry_block;
+  }
+  HBasicBlock* inlined_entry_block() { return inlined_entry_block_; }
+
+  bool IsDeoptimizing() const {
+    return end() != NULL && end()->IsDeoptimize();
+  }
+
+  void MarkUnreachable();
+  bool IsUnreachable() const { return !is_reachable_; }
+  bool IsReachable() const { return is_reachable_; }
+
+  bool IsLoopSuccessorDominator() const {
+    return dominates_loop_successors_;
+  }
+  void MarkAsLoopSuccessorDominator() {
+    dominates_loop_successors_ = true;
+  }
+
+  bool IsOrdered() const { return is_ordered_; }
+  void MarkAsOrdered() { is_ordered_ = true; }
+
+  void MarkSuccEdgeUnreachable(int succ);
+
+  inline Zone* zone() const;
+
+#ifdef DEBUG
+  void Verify();
+#endif
+
+ protected:
+  friend class HGraphBuilder;
+
+  HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
+  void Finish(HControlInstruction* last, SourcePosition position);
+  void FinishExit(HControlInstruction* instruction, SourcePosition position);
+  void Goto(HBasicBlock* block, SourcePosition position,
+            FunctionState* state = NULL, bool add_simulate = true);
+  void GotoNoSimulate(HBasicBlock* block, SourcePosition position) {
+    Goto(block, position, NULL, false);
+  }
+
+  // Add the inlined function exit sequence, adding an HLeaveInlined
+  // instruction and updating the bailout environment.
+  void AddLeaveInlined(HValue* return_value, FunctionState* state,
+                       SourcePosition position);
+
+ private:
+  void RegisterPredecessor(HBasicBlock* pred);
+  void AddDominatedBlock(HBasicBlock* block);
+
+  int block_id_;
+  HGraph* graph_;
+  ZoneList<HPhi*> phis_;
+  HInstruction* first_;
+  HInstruction* last_;
+  HControlInstruction* end_;
+  HLoopInformation* loop_information_;
+  ZoneList<HBasicBlock*> predecessors_;
+  HBasicBlock* dominator_;
+  ZoneList<HBasicBlock*> dominated_blocks_;
+  HEnvironment* last_environment_;
+  // Outgoing parameter count at block exit, set during lithium translation.
+  int argument_count_;
+  // Instruction indices into the lithium code stream.
+  int first_instruction_index_;
+  int last_instruction_index_;
+  ZoneList<int> deleted_phis_;
+  HBasicBlock* parent_loop_header_;
+  // For blocks marked as inline return target: the block with HEnterInlined.
+  HBasicBlock* inlined_entry_block_;
+  bool is_inline_return_target_ : 1;
+  bool is_reachable_ : 1;
+  bool dominates_loop_successors_ : 1;
+  bool is_osr_entry_ : 1;
+  bool is_ordered_ : 1;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const HBasicBlock& b);
+
+
+class HPredecessorIterator final BASE_EMBEDDED {
+ public:
+  explicit HPredecessorIterator(HBasicBlock* block)
+      : predecessor_list_(block->predecessors()), current_(0) { }
+
+  bool Done() { return current_ >= predecessor_list_->length(); }
+  HBasicBlock* Current() { return predecessor_list_->at(current_); }
+  void Advance() { current_++; }
+
+ private:
+  const ZoneList<HBasicBlock*>* predecessor_list_;
+  int current_;
+};
+
+
+class HInstructionIterator final BASE_EMBEDDED {
+ public:
+  explicit HInstructionIterator(HBasicBlock* block)
+      : instr_(block->first()) {
+    next_ = Done() ? NULL : instr_->next();
+  }
+
+  inline bool Done() const { return instr_ == NULL; }
+  inline HInstruction* Current() { return instr_; }
+  inline void Advance() {
+    instr_ = next_;
+    next_ = Done() ? NULL : instr_->next();
+  }
+
+ private:
+  HInstruction* instr_;
+  HInstruction* next_;
+};
+
+
+class HLoopInformation final : public ZoneObject {
+ public:
+  HLoopInformation(HBasicBlock* loop_header, Zone* zone)
+      : back_edges_(4, zone),
+        loop_header_(loop_header),
+        blocks_(8, zone),
+        stack_check_(NULL) {
+    blocks_.Add(loop_header, zone);
+  }
+  ~HLoopInformation() {}
+
+  const ZoneList<HBasicBlock*>* back_edges() const { return &back_edges_; }
+  const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
+  HBasicBlock* loop_header() const { return loop_header_; }
+  HBasicBlock* GetLastBackEdge() const;
+  void RegisterBackEdge(HBasicBlock* block);
+
+  HStackCheck* stack_check() const { return stack_check_; }
+  void set_stack_check(HStackCheck* stack_check) {
+    stack_check_ = stack_check;
+  }
+
+  bool IsNestedInThisLoop(HLoopInformation* other) {
+    while (other != NULL) {
+      if (other == this) {
+        return true;
+      }
+      other = other->parent_loop();
+    }
+    return false;
+  }
+  HLoopInformation* parent_loop() {
+    HBasicBlock* parent_header = loop_header()->parent_loop_header();
+    return parent_header != NULL ? parent_header->loop_information() : NULL;
+  }
+
+ private:
+  void AddBlock(HBasicBlock* block);
+
+  ZoneList<HBasicBlock*> back_edges_;
+  HBasicBlock* loop_header_;
+  ZoneList<HBasicBlock*> blocks_;
+  HStackCheck* stack_check_;
+};
+
+
+class BoundsCheckTable;
+class InductionVariableBlocksTable;
+class HGraph final : public ZoneObject {
+ public:
+  explicit HGraph(CompilationInfo* info);
+
+  Isolate* isolate() const { return isolate_; }
+  Zone* zone() const { return zone_; }
+  CompilationInfo* info() const { return info_; }
+
+  const ZoneList<HBasicBlock*>* blocks() const { return &blocks_; }
+  const ZoneList<HPhi*>* phi_list() const { return phi_list_; }
+  HBasicBlock* entry_block() const { return entry_block_; }
+  HEnvironment* start_environment() const { return start_environment_; }
+
+  void FinalizeUniqueness();
+  void OrderBlocks();
+  void AssignDominators();
+  void RestoreActualValues();
+
+  // Returns false if there are phi-uses of the arguments-object
+  // which are not supported by the optimizing compiler.
+  bool CheckArgumentsPhiUses();
+
+  // Returns false if there are phi-uses of an uninitialized const
+  // which are not supported by the optimizing compiler.
+  bool CheckConstPhiUses();
+
+  void CollectPhis();
+
+  HConstant* GetConstantUndefined();
+  HConstant* GetConstant0();
+  HConstant* GetConstant1();
+  HConstant* GetConstantMinus1();
+  HConstant* GetConstantTrue();
+  HConstant* GetConstantFalse();
+  HConstant* GetConstantBool(bool value);
+  HConstant* GetConstantHole();
+  HConstant* GetConstantNull();
+  HConstant* GetInvalidContext();
+
+  bool IsConstantUndefined(HConstant* constant);
+  bool IsConstant0(HConstant* constant);
+  bool IsConstant1(HConstant* constant);
+  bool IsConstantMinus1(HConstant* constant);
+  bool IsConstantTrue(HConstant* constant);
+  bool IsConstantFalse(HConstant* constant);
+  bool IsConstantHole(HConstant* constant);
+  bool IsConstantNull(HConstant* constant);
+  bool IsStandardConstant(HConstant* constant);
+
+  HBasicBlock* CreateBasicBlock();
+  HArgumentsObject* GetArgumentsObject() const {
+    return arguments_object_.get();
+  }
+
+  void SetArgumentsObject(HArgumentsObject* object) {
+    arguments_object_.set(object);
+  }
+
+  int GetMaximumValueID() const { return values_.length(); }
+  int GetNextBlockID() { return next_block_id_++; }
+  int GetNextValueID(HValue* value) {
+    DCHECK(!disallow_adding_new_values_);
+    values_.Add(value, zone());
+    return values_.length() - 1;
+  }
+  HValue* LookupValue(int id) const {
+    if (id >= 0 && id < values_.length()) return values_[id];
+    return NULL;
+  }
+  void DisallowAddingNewValues() {
+    disallow_adding_new_values_ = true;
+  }
+
+  bool Optimize(BailoutReason* bailout_reason);
+
+#ifdef DEBUG
+  void Verify(bool do_full_verify) const;
+#endif
+
+  bool has_osr() {
+    return osr_ != NULL;
+  }
+
+  void set_osr(HOsrBuilder* osr) {
+    osr_ = osr;
+  }
+
+  HOsrBuilder* osr() {
+    return osr_;
+  }
+
+  int update_type_change_checksum(int delta) {
+    type_change_checksum_ += delta;
+    return type_change_checksum_;
+  }
+
+  void update_maximum_environment_size(int environment_size) {
+    if (environment_size > maximum_environment_size_) {
+      maximum_environment_size_ = environment_size;
+    }
+  }
+  int maximum_environment_size() { return maximum_environment_size_; }
+
+  bool use_optimistic_licm() {
+    return use_optimistic_licm_;
+  }
+
+  void set_use_optimistic_licm(bool value) {
+    use_optimistic_licm_ = value;
+  }
+
+  void MarkRecursive() { is_recursive_ = true; }
+  bool is_recursive() const { return is_recursive_; }
+
+  void MarkDependsOnEmptyArrayProtoElements() {
+    // Add map dependency if not already added.
+    if (depends_on_empty_array_proto_elements_) return;
+    info()->dependencies()->AssumePropertyCell(
+        isolate()->factory()->array_protector());
+    depends_on_empty_array_proto_elements_ = true;
+  }
+
+  bool depends_on_empty_array_proto_elements() {
+    return depends_on_empty_array_proto_elements_;
+  }
+
+  bool has_uint32_instructions() {
+    DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+    return uint32_instructions_ != NULL;
+  }
+
+  ZoneList<HInstruction*>* uint32_instructions() {
+    DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+    return uint32_instructions_;
+  }
+
+  void RecordUint32Instruction(HInstruction* instr) {
+    DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+    if (uint32_instructions_ == NULL) {
+      uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
+    }
+    uint32_instructions_->Add(instr, zone());
+  }
+
+  void IncrementInNoSideEffectsScope() { no_side_effects_scope_count_++; }
+  void DecrementInNoSideEffectsScope() { no_side_effects_scope_count_--; }
+  bool IsInsideNoSideEffectsScope() { return no_side_effects_scope_count_ > 0; }
+
+  // If we are tracking source positions then this function assigns a unique
+  // identifier to each inlining and dumps function source if it was inlined
+  // for the first time during the current optimization.
+  int TraceInlinedFunction(Handle<SharedFunctionInfo> shared,
+                           SourcePosition position);
+
+  // Converts given SourcePosition to the absolute offset from the start of
+  // the corresponding script.
+  int SourcePositionToScriptPosition(SourcePosition position);
+
+ private:
+  HConstant* ReinsertConstantIfNecessary(HConstant* constant);
+  HConstant* GetConstant(SetOncePointer<HConstant>* pointer,
+                         int32_t integer_value);
+
+  template<class Phase>
+  void Run() {
+    Phase phase(this);
+    phase.Run();
+  }
+
+  Isolate* isolate_;
+  int next_block_id_;
+  HBasicBlock* entry_block_;
+  HEnvironment* start_environment_;
+  ZoneList<HBasicBlock*> blocks_;
+  ZoneList<HValue*> values_;
+  ZoneList<HPhi*>* phi_list_;
+  ZoneList<HInstruction*>* uint32_instructions_;
+  SetOncePointer<HConstant> constant_undefined_;
+  SetOncePointer<HConstant> constant_0_;
+  SetOncePointer<HConstant> constant_1_;
+  SetOncePointer<HConstant> constant_minus1_;
+  SetOncePointer<HConstant> constant_true_;
+  SetOncePointer<HConstant> constant_false_;
+  SetOncePointer<HConstant> constant_the_hole_;
+  SetOncePointer<HConstant> constant_null_;
+  SetOncePointer<HConstant> constant_invalid_context_;
+  SetOncePointer<HArgumentsObject> arguments_object_;
+
+  HOsrBuilder* osr_;
+
+  CompilationInfo* info_;
+  Zone* zone_;
+
+  bool is_recursive_;
+  bool use_optimistic_licm_;
+  bool depends_on_empty_array_proto_elements_;
+  int type_change_checksum_;
+  int maximum_environment_size_;
+  int no_side_effects_scope_count_;
+  bool disallow_adding_new_values_;
+
+  DISALLOW_COPY_AND_ASSIGN(HGraph);
+};
+
+
+Zone* HBasicBlock::zone() const { return graph_->zone(); }
+
+
+// Type of stack frame an environment might refer to.
+enum FrameType {
+  JS_FUNCTION,
+  JS_CONSTRUCT,
+  JS_GETTER,
+  JS_SETTER,
+  ARGUMENTS_ADAPTOR,
+  STUB
+};
+
+
+class HEnvironment final : public ZoneObject {
+ public:
+  HEnvironment(HEnvironment* outer,
+               Scope* scope,
+               Handle<JSFunction> closure,
+               Zone* zone);
+
+  HEnvironment(Zone* zone, int parameter_count);
+
+  HEnvironment* arguments_environment() {
+    return outer()->frame_type() == ARGUMENTS_ADAPTOR ? outer() : this;
+  }
+
+  // Simple accessors.
+  Handle<JSFunction> closure() const { return closure_; }
+  const ZoneList<HValue*>* values() const { return &values_; }
+  const GrowableBitVector* assigned_variables() const {
+    return &assigned_variables_;
+  }
+  FrameType frame_type() const { return frame_type_; }
+  int parameter_count() const { return parameter_count_; }
+  int specials_count() const { return specials_count_; }
+  int local_count() const { return local_count_; }
+  HEnvironment* outer() const { return outer_; }
+  int pop_count() const { return pop_count_; }
+  int push_count() const { return push_count_; }
+
+  BailoutId ast_id() const { return ast_id_; }
+  void set_ast_id(BailoutId id) { ast_id_ = id; }
+
+  HEnterInlined* entry() const { return entry_; }
+  void set_entry(HEnterInlined* entry) { entry_ = entry; }
+
+  int length() const { return values_.length(); }
+
+  int first_expression_index() const {
+    return parameter_count() + specials_count() + local_count();
+  }
+
+  int first_local_index() const {
+    return parameter_count() + specials_count();
+  }
+
+  void Bind(Variable* variable, HValue* value) {
+    Bind(IndexFor(variable), value);
+  }
+
+  void Bind(int index, HValue* value);
+
+  void BindContext(HValue* value) {
+    Bind(parameter_count(), value);
+  }
+
+  HValue* Lookup(Variable* variable) const {
+    return Lookup(IndexFor(variable));
+  }
+
+  HValue* Lookup(int index) const {
+    HValue* result = values_[index];
+    DCHECK(result != NULL);
+    return result;
+  }
+
+  HValue* context() const {
+    // Return first special.
+    return Lookup(parameter_count());
+  }
+
+  void Push(HValue* value) {
+    DCHECK(value != NULL);
+    ++push_count_;
+    values_.Add(value, zone());
+  }
+
+  HValue* Pop() {
+    DCHECK(!ExpressionStackIsEmpty());
+    if (push_count_ > 0) {
+      --push_count_;
+    } else {
+      ++pop_count_;
+    }
+    return values_.RemoveLast();
+  }
+
+  void Drop(int count);
+
+  HValue* Top() const { return ExpressionStackAt(0); }
+
+  bool ExpressionStackIsEmpty() const;
+
+  HValue* ExpressionStackAt(int index_from_top) const {
+    int index = length() - index_from_top - 1;
+    DCHECK(HasExpressionAt(index));
+    return values_[index];
+  }
+
+  void SetExpressionStackAt(int index_from_top, HValue* value);
+  HValue* RemoveExpressionStackAt(int index_from_top);
+
+  void Print() const;
+
+  HEnvironment* Copy() const;
+  HEnvironment* CopyWithoutHistory() const;
+  HEnvironment* CopyAsLoopHeader(HBasicBlock* block) const;
+
+  // Create an "inlined version" of this environment, where the original
+  // environment is the outer environment but the top expression stack
+  // elements are moved to an inner environment as parameters.
+  HEnvironment* CopyForInlining(Handle<JSFunction> target,
+                                int arguments,
+                                FunctionLiteral* function,
+                                HConstant* undefined,
+                                InliningKind inlining_kind) const;
+
+  HEnvironment* DiscardInlined(bool drop_extra) {
+    HEnvironment* outer = outer_;
+    while (outer->frame_type() != JS_FUNCTION) outer = outer->outer_;
+    if (drop_extra) outer->Drop(1);
+    return outer;
+  }
+
+  void AddIncomingEdge(HBasicBlock* block, HEnvironment* other);
+
+  void ClearHistory() {
+    pop_count_ = 0;
+    push_count_ = 0;
+    assigned_variables_.Clear();
+  }
+
+  void SetValueAt(int index, HValue* value) {
+    DCHECK(index < length());
+    values_[index] = value;
+  }
+
+  // Map a variable to an environment index.  Parameter indices are shifted
+  // by 1 (receiver is parameter index -1 but environment index 0).
+  // Stack-allocated local indices are shifted by the number of parameters.
+  int IndexFor(Variable* variable) const {
+    DCHECK(variable->IsStackAllocated());
+    int shift = variable->IsParameter()
+        ? 1
+        : parameter_count_ + specials_count_;
+    return variable->index() + shift;
+  }
+
+  bool is_local_index(int i) const {
+    return i >= first_local_index() && i < first_expression_index();
+  }
+
+  bool is_parameter_index(int i) const {
+    return i >= 0 && i < parameter_count();
+  }
+
+  bool is_special_index(int i) const {
+    return i >= parameter_count() && i < parameter_count() + specials_count();
+  }
+
+  Zone* zone() const { return zone_; }
+
+ private:
+  HEnvironment(const HEnvironment* other, Zone* zone);
+
+  HEnvironment(HEnvironment* outer,
+               Handle<JSFunction> closure,
+               FrameType frame_type,
+               int arguments,
+               Zone* zone);
+
+  // Create an artificial stub environment (e.g. for argument adaptor or
+  // constructor stub).
+  HEnvironment* CreateStubEnvironment(HEnvironment* outer,
+                                      Handle<JSFunction> target,
+                                      FrameType frame_type,
+                                      int arguments) const;
+
+  // True if index is included in the expression stack part of the environment.
+  bool HasExpressionAt(int index) const;
+
+  void Initialize(int parameter_count, int local_count, int stack_height);
+  void Initialize(const HEnvironment* other);
+
+  Handle<JSFunction> closure_;
+  // Value array [parameters] [specials] [locals] [temporaries].
+  ZoneList<HValue*> values_;
+  GrowableBitVector assigned_variables_;
+  FrameType frame_type_;
+  int parameter_count_;
+  int specials_count_;
+  int local_count_;
+  HEnvironment* outer_;
+  HEnterInlined* entry_;
+  int pop_count_;
+  int push_count_;
+  BailoutId ast_id_;
+  Zone* zone_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const HEnvironment& env);
+
+
+class HOptimizedGraphBuilder;
+
+enum ArgumentsAllowedFlag {
+  ARGUMENTS_NOT_ALLOWED,
+  ARGUMENTS_ALLOWED,
+  ARGUMENTS_FAKED
+};
+
+
+class HIfContinuation;
+
+// This class is not BASE_EMBEDDED because our inlining implementation uses
+// new and delete.
+class AstContext {
+ public:
+  bool IsEffect() const { return kind_ == Expression::kEffect; }
+  bool IsValue() const { return kind_ == Expression::kValue; }
+  bool IsTest() const { return kind_ == Expression::kTest; }
+
+  // 'Fill' this context with a hydrogen value.  The value is assumed to
+  // have already been inserted in the instruction stream (or not need to
+  // be, e.g., HPhi).  Call this function in tail position in the Visit
+  // functions for expressions.
+  virtual void ReturnValue(HValue* value) = 0;
+
+  // Add a hydrogen instruction to the instruction stream (recording an
+  // environment simulation if necessary) and then fill this context with
+  // the instruction as value.
+  virtual void ReturnInstruction(HInstruction* instr, BailoutId ast_id) = 0;
+
+  // Finishes the current basic block and materialize a boolean for
+  // value context, nothing for effect, generate a branch for test context.
+  // Call this function in tail position in the Visit functions for
+  // expressions.
+  virtual void ReturnControl(HControlInstruction* instr, BailoutId ast_id) = 0;
+
+  // Finishes the current basic block and materialize a boolean for
+  // value context, nothing for effect, generate a branch for test context.
+  // Call this function in tail position in the Visit functions for
+  // expressions that use an IfBuilder.
+  virtual void ReturnContinuation(HIfContinuation* continuation,
+                                  BailoutId ast_id) = 0;
+
+  void set_typeof_mode(TypeofMode typeof_mode) { typeof_mode_ = typeof_mode; }
+  TypeofMode typeof_mode() { return typeof_mode_; }
+
+ protected:
+  AstContext(HOptimizedGraphBuilder* owner, Expression::Context kind);
+  virtual ~AstContext();
+
+  HOptimizedGraphBuilder* owner() const { return owner_; }
+
+  inline Zone* zone() const;
+
+  // We want to be able to assert, in a context-specific way, that the stack
+  // height makes sense when the context is filled.
+#ifdef DEBUG
+  int original_length_;
+#endif
+
+ private:
+  HOptimizedGraphBuilder* owner_;
+  Expression::Context kind_;
+  AstContext* outer_;
+  TypeofMode typeof_mode_;
+};
+
+
+class EffectContext final : public AstContext {
+ public:
+  explicit EffectContext(HOptimizedGraphBuilder* owner)
+      : AstContext(owner, Expression::kEffect) {
+  }
+  ~EffectContext() override;
+
+  void ReturnValue(HValue* value) override;
+  void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+  void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+  void ReturnContinuation(HIfContinuation* continuation,
+                          BailoutId ast_id) override;
+};
+
+
+class ValueContext final : public AstContext {
+ public:
+  ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
+      : AstContext(owner, Expression::kValue), flag_(flag) {
+  }
+  ~ValueContext() override;
+
+  void ReturnValue(HValue* value) override;
+  void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+  void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+  void ReturnContinuation(HIfContinuation* continuation,
+                          BailoutId ast_id) override;
+
+  bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
+
+ private:
+  ArgumentsAllowedFlag flag_;
+};
+
+
+class TestContext final : public AstContext {
+ public:
+  TestContext(HOptimizedGraphBuilder* owner,
+              Expression* condition,
+              HBasicBlock* if_true,
+              HBasicBlock* if_false)
+      : AstContext(owner, Expression::kTest),
+        condition_(condition),
+        if_true_(if_true),
+        if_false_(if_false) {
+  }
+
+  void ReturnValue(HValue* value) override;
+  void ReturnInstruction(HInstruction* instr, BailoutId ast_id) override;
+  void ReturnControl(HControlInstruction* instr, BailoutId ast_id) override;
+  void ReturnContinuation(HIfContinuation* continuation,
+                          BailoutId ast_id) override;
+
+  static TestContext* cast(AstContext* context) {
+    DCHECK(context->IsTest());
+    return reinterpret_cast<TestContext*>(context);
+  }
+
+  Expression* condition() const { return condition_; }
+  HBasicBlock* if_true() const { return if_true_; }
+  HBasicBlock* if_false() const { return if_false_; }
+
+ private:
+  // Build the shared core part of the translation unpacking a value into
+  // control flow.
+  void BuildBranch(HValue* value);
+
+  Expression* condition_;
+  HBasicBlock* if_true_;
+  HBasicBlock* if_false_;
+};
+
+
+class FunctionState final {
+ public:
+  FunctionState(HOptimizedGraphBuilder* owner,
+                CompilationInfo* info,
+                InliningKind inlining_kind,
+                int inlining_id);
+  ~FunctionState();
+
+  CompilationInfo* compilation_info() { return compilation_info_; }
+  AstContext* call_context() { return call_context_; }
+  InliningKind inlining_kind() const { return inlining_kind_; }
+  HBasicBlock* function_return() { return function_return_; }
+  TestContext* test_context() { return test_context_; }
+  void ClearInlinedTestContext() {
+    delete test_context_;
+    test_context_ = NULL;
+  }
+
+  FunctionState* outer() { return outer_; }
+
+  HEnterInlined* entry() { return entry_; }
+  void set_entry(HEnterInlined* entry) { entry_ = entry; }
+
+  HArgumentsObject* arguments_object() { return arguments_object_; }
+  void set_arguments_object(HArgumentsObject* arguments_object) {
+    arguments_object_ = arguments_object;
+  }
+
+  HArgumentsElements* arguments_elements() { return arguments_elements_; }
+  void set_arguments_elements(HArgumentsElements* arguments_elements) {
+    arguments_elements_ = arguments_elements;
+  }
+
+  bool arguments_pushed() { return arguments_elements() != NULL; }
+
+  int inlining_id() const { return inlining_id_; }
+
+ private:
+  HOptimizedGraphBuilder* owner_;
+
+  CompilationInfo* compilation_info_;
+
+  // During function inlining, expression context of the call being
+  // inlined. NULL when not inlining.
+  AstContext* call_context_;
+
+  // The kind of call which is currently being inlined.
+  InliningKind inlining_kind_;
+
+  // When inlining in an effect or value context, this is the return block.
+  // It is NULL otherwise.  When inlining in a test context, there are a
+  // pair of return blocks in the context.  When not inlining, there is no
+  // local return point.
+  HBasicBlock* function_return_;
+
+  // When inlining a call in a test context, a context containing a pair of
+  // return blocks.  NULL in all other cases.
+  TestContext* test_context_;
+
+  // When inlining HEnterInlined instruction corresponding to the function
+  // entry.
+  HEnterInlined* entry_;
+
+  HArgumentsObject* arguments_object_;
+  HArgumentsElements* arguments_elements_;
+
+  int inlining_id_;
+  SourcePosition outer_source_position_;
+
+  FunctionState* outer_;
+};
+
+
+class HIfContinuation final {
+ public:
+  HIfContinuation()
+    : continuation_captured_(false),
+      true_branch_(NULL),
+      false_branch_(NULL) {}
+  HIfContinuation(HBasicBlock* true_branch,
+                  HBasicBlock* false_branch)
+      : continuation_captured_(true), true_branch_(true_branch),
+        false_branch_(false_branch) {}
+  ~HIfContinuation() { DCHECK(!continuation_captured_); }
+
+  void Capture(HBasicBlock* true_branch,
+               HBasicBlock* false_branch) {
+    DCHECK(!continuation_captured_);
+    true_branch_ = true_branch;
+    false_branch_ = false_branch;
+    continuation_captured_ = true;
+  }
+
+  void Continue(HBasicBlock** true_branch,
+                HBasicBlock** false_branch) {
+    DCHECK(continuation_captured_);
+    *true_branch = true_branch_;
+    *false_branch = false_branch_;
+    continuation_captured_ = false;
+  }
+
+  bool IsTrueReachable() { return true_branch_ != NULL; }
+  bool IsFalseReachable() { return false_branch_ != NULL; }
+  bool TrueAndFalseReachable() {
+    return IsTrueReachable() || IsFalseReachable();
+  }
+
+  HBasicBlock* true_branch() const { return true_branch_; }
+  HBasicBlock* false_branch() const { return false_branch_; }
+
+ private:
+  bool continuation_captured_;
+  HBasicBlock* true_branch_;
+  HBasicBlock* false_branch_;
+};
+
+
+class HAllocationMode final BASE_EMBEDDED {
+ public:
+  explicit HAllocationMode(Handle<AllocationSite> feedback_site)
+      : current_site_(NULL), feedback_site_(feedback_site),
+        pretenure_flag_(NOT_TENURED) {}
+  explicit HAllocationMode(HValue* current_site)
+      : current_site_(current_site), pretenure_flag_(NOT_TENURED) {}
+  explicit HAllocationMode(PretenureFlag pretenure_flag)
+      : current_site_(NULL), pretenure_flag_(pretenure_flag) {}
+  HAllocationMode()
+      : current_site_(NULL), pretenure_flag_(NOT_TENURED) {}
+
+  HValue* current_site() const { return current_site_; }
+  Handle<AllocationSite> feedback_site() const { return feedback_site_; }
+
+  bool CreateAllocationMementos() const WARN_UNUSED_RESULT {
+    return current_site() != NULL;
+  }
+
+  PretenureFlag GetPretenureMode() const WARN_UNUSED_RESULT {
+    if (!feedback_site().is_null()) return feedback_site()->GetPretenureMode();
+    return pretenure_flag_;
+  }
+
+ private:
+  HValue* current_site_;
+  Handle<AllocationSite> feedback_site_;
+  PretenureFlag pretenure_flag_;
+};
+
+
+class HGraphBuilder {
+ public:
+  explicit HGraphBuilder(CompilationInfo* info)
+      : info_(info),
+        graph_(NULL),
+        current_block_(NULL),
+        scope_(info->scope()),
+        position_(SourcePosition::Unknown()),
+        start_position_(0) {}
+  virtual ~HGraphBuilder() {}
+
+  Scope* scope() const { return scope_; }
+  void set_scope(Scope* scope) { scope_ = scope; }
+
+  HBasicBlock* current_block() const { return current_block_; }
+  void set_current_block(HBasicBlock* block) { current_block_ = block; }
+  HEnvironment* environment() const {
+    return current_block()->last_environment();
+  }
+  Zone* zone() const { return info_->zone(); }
+  HGraph* graph() const { return graph_; }
+  Isolate* isolate() const { return graph_->isolate(); }
+  CompilationInfo* top_info() { return info_; }
+
+  HGraph* CreateGraph();
+
+  // Bailout environment manipulation.
+  void Push(HValue* value) { environment()->Push(value); }
+  HValue* Pop() { return environment()->Pop(); }
+
+  virtual HValue* context() = 0;
+
+  // Adding instructions.
+  HInstruction* AddInstruction(HInstruction* instr);
+  void FinishCurrentBlock(HControlInstruction* last);
+  void FinishExitCurrentBlock(HControlInstruction* instruction);
+
+  void Goto(HBasicBlock* from,
+            HBasicBlock* target,
+            FunctionState* state = NULL,
+            bool add_simulate = true) {
+    from->Goto(target, source_position(), state, add_simulate);
+  }
+  void Goto(HBasicBlock* target,
+            FunctionState* state = NULL,
+            bool add_simulate = true) {
+    Goto(current_block(), target, state, add_simulate);
+  }
+  void GotoNoSimulate(HBasicBlock* from, HBasicBlock* target) {
+    Goto(from, target, NULL, false);
+  }
+  void GotoNoSimulate(HBasicBlock* target) {
+    Goto(target, NULL, false);
+  }
+  void AddLeaveInlined(HBasicBlock* block,
+                       HValue* return_value,
+                       FunctionState* state) {
+    block->AddLeaveInlined(return_value, state, source_position());
+  }
+  void AddLeaveInlined(HValue* return_value, FunctionState* state) {
+    return AddLeaveInlined(current_block(), return_value, state);
+  }
+
+  template <class I>
+  HInstruction* NewUncasted() {
+    return I::New(isolate(), zone(), context());
+  }
+
+  template <class I>
+  I* New() {
+    return I::New(isolate(), zone(), context());
+  }
+
+  template<class I>
+  HInstruction* AddUncasted() { return AddInstruction(NewUncasted<I>());}
+
+  template<class I>
+  I* Add() { return AddInstructionTyped(New<I>());}
+
+  template<class I, class P1>
+  HInstruction* NewUncasted(P1 p1) {
+    return I::New(isolate(), zone(), context(), p1);
+  }
+
+  template <class I, class P1>
+  I* New(P1 p1) {
+    return I::New(isolate(), zone(), context(), p1);
+  }
+
+  template<class I, class P1>
+  HInstruction* AddUncasted(P1 p1) {
+    HInstruction* result = AddInstruction(NewUncasted<I>(p1));
+    // Specializations must have their parameters properly casted
+    // to avoid landing here.
+    DCHECK(!result->IsReturn() && !result->IsSimulate() &&
+           !result->IsDeoptimize());
+    return result;
+  }
+
+  template<class I, class P1>
+  I* Add(P1 p1) {
+    I* result = AddInstructionTyped(New<I>(p1));
+    // Specializations must have their parameters properly casted
+    // to avoid landing here.
+    DCHECK(!result->IsReturn() && !result->IsSimulate() &&
+           !result->IsDeoptimize());
+    return result;
+  }
+
+  template<class I, class P1, class P2>
+  HInstruction* NewUncasted(P1 p1, P2 p2) {
+    return I::New(isolate(), zone(), context(), p1, p2);
+  }
+
+  template<class I, class P1, class P2>
+  I* New(P1 p1, P2 p2) {
+    return I::New(isolate(), zone(), context(), p1, p2);
+  }
+
+  template<class I, class P1, class P2>
+  HInstruction* AddUncasted(P1 p1, P2 p2) {
+    HInstruction* result = AddInstruction(NewUncasted<I>(p1, p2));
+    // Specializations must have their parameters properly casted
+    // to avoid landing here.
+    DCHECK(!result->IsSimulate());
+    return result;
+  }
+
+  template<class I, class P1, class P2>
+  I* Add(P1 p1, P2 p2) {
+    I* result = AddInstructionTyped(New<I>(p1, p2));
+    // Specializations must have their parameters properly casted
+    // to avoid landing here.
+    DCHECK(!result->IsSimulate());
+    return result;
+  }
+
+  template<class I, class P1, class P2, class P3>
+  HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3);
+  }
+
+  template<class I, class P1, class P2, class P3>
+  I* New(P1 p1, P2 p2, P3 p3) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3);
+  }
+
+  template<class I, class P1, class P2, class P3>
+  HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3) {
+    return AddInstruction(NewUncasted<I>(p1, p2, p3));
+  }
+
+  template<class I, class P1, class P2, class P3>
+  I* Add(P1 p1, P2 p2, P3 p3) {
+    return AddInstructionTyped(New<I>(p1, p2, p3));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4>
+  HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4>
+  I* New(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4>
+  HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return AddInstructionTyped(New<I>(p1, p2, p3, p4));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5>
+  HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5>
+  I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5>
+  HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) {
+    return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+  HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+  I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+  HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4, class P5, class P6>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6) {
+    return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4,
+      class P5, class P6, class P7>
+  HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4,
+      class P5, class P6, class P7>
+      I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7);
+  }
+
+  template<class I, class P1, class P2, class P3,
+           class P4, class P5, class P6, class P7>
+  HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7));
+  }
+
+  template<class I, class P1, class P2, class P3,
+           class P4, class P5, class P6, class P7>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7) {
+    return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4,
+      class P5, class P6, class P7, class P8>
+  HInstruction* NewUncasted(P1 p1, P2 p2, P3 p3, P4 p4,
+                            P5 p5, P6 p6, P7 p7, P8 p8) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4,
+      class P5, class P6, class P7, class P8>
+      I* New(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
+    return I::New(isolate(), zone(), context(), p1, p2, p3, p4, p5, p6, p7, p8);
+  }
+
+  template<class I, class P1, class P2, class P3, class P4,
+           class P5, class P6, class P7, class P8>
+  HInstruction* AddUncasted(P1 p1, P2 p2, P3 p3, P4 p4,
+                            P5 p5, P6 p6, P7 p7, P8 p8) {
+    return AddInstruction(NewUncasted<I>(p1, p2, p3, p4, p5, p6, p7, p8));
+  }
+
+  template<class I, class P1, class P2, class P3, class P4,
+           class P5, class P6, class P7, class P8>
+  I* Add(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5, P6 p6, P7 p7, P8 p8) {
+    return AddInstructionTyped(New<I>(p1, p2, p3, p4, p5, p6, p7, p8));
+  }
+
+  void AddSimulate(BailoutId id, RemovableSimulate removable = FIXED_SIMULATE);
+
+  // When initializing arrays, we'll unfold the loop if the number of elements
+  // is known at compile time and is <= kElementLoopUnrollThreshold.
+  static const int kElementLoopUnrollThreshold = 8;
+
+ protected:
+  virtual bool BuildGraph() = 0;
+
+  HBasicBlock* CreateBasicBlock(HEnvironment* env);
+  HBasicBlock* CreateLoopHeaderBlock();
+
+  template <class BitFieldClass>
+  HValue* BuildDecodeField(HValue* encoded_field) {
+    HValue* mask_value = Add<HConstant>(static_cast<int>(BitFieldClass::kMask));
+    HValue* masked_field =
+        AddUncasted<HBitwise>(Token::BIT_AND, encoded_field, mask_value);
+    return AddUncasted<HShr>(masked_field,
+        Add<HConstant>(static_cast<int>(BitFieldClass::kShift)));
+  }
+
+  HValue* BuildGetElementsKind(HValue* object);
+
+  HValue* BuildCheckHeapObject(HValue* object);
+  HValue* BuildCheckString(HValue* string);
+  HValue* BuildWrapReceiver(HValue* object, HValue* function);
+
+  // Building common constructs
+  HValue* BuildCheckForCapacityGrow(HValue* object,
+                                    HValue* elements,
+                                    ElementsKind kind,
+                                    HValue* length,
+                                    HValue* key,
+                                    bool is_js_array,
+                                    PropertyAccessType access_type);
+
+  HValue* BuildCheckAndGrowElementsCapacity(HValue* object, HValue* elements,
+                                            ElementsKind kind, HValue* length,
+                                            HValue* capacity, HValue* key);
+
+  HValue* BuildCopyElementsOnWrite(HValue* object,
+                                   HValue* elements,
+                                   ElementsKind kind,
+                                   HValue* length);
+
+  void BuildTransitionElementsKind(HValue* object,
+                                   HValue* map,
+                                   ElementsKind from_kind,
+                                   ElementsKind to_kind,
+                                   bool is_jsarray);
+
+  HValue* BuildNumberToString(HValue* object, Type* type);
+  HValue* BuildToObject(HValue* receiver);
+
+  void BuildJSObjectCheck(HValue* receiver,
+                          int bit_field_mask);
+
+  // Checks a key value that's being used for a keyed element access context. If
+  // the key is a index, i.e. a smi or a number in a unique string with a cached
+  // numeric value, the "true" of the continuation is joined. Otherwise,
+  // if the key is a name or a unique string, the "false" of the continuation is
+  // joined. Otherwise, a deoptimization is triggered. In both paths of the
+  // continuation, the key is pushed on the top of the environment.
+  void BuildKeyedIndexCheck(HValue* key,
+                            HIfContinuation* join_continuation);
+
+  // Checks the properties of an object if they are in dictionary case, in which
+  // case "true" of continuation is taken, otherwise the "false"
+  void BuildTestForDictionaryProperties(HValue* object,
+                                        HIfContinuation* continuation);
+
+  void BuildNonGlobalObjectCheck(HValue* receiver);
+
+  HValue* BuildKeyedLookupCacheHash(HValue* object,
+                                    HValue* key);
+
+  HValue* BuildUncheckedDictionaryElementLoad(HValue* receiver,
+                                              HValue* elements, HValue* key,
+                                              HValue* hash,
+                                              LanguageMode language_mode);
+
+  // ES6 section 7.4.7 CreateIterResultObject ( value, done )
+  HValue* BuildCreateIterResultObject(HValue* value, HValue* done);
+
+  HValue* BuildRegExpConstructResult(HValue* length,
+                                     HValue* index,
+                                     HValue* input);
+
+  // Allocates a new object according with the given allocation properties.
+  HAllocate* BuildAllocate(HValue* object_size,
+                           HType type,
+                           InstanceType instance_type,
+                           HAllocationMode allocation_mode);
+  // Computes the sum of two string lengths, taking care of overflow handling.
+  HValue* BuildAddStringLengths(HValue* left_length, HValue* right_length);
+  // Creates a cons string using the two input strings.
+  HValue* BuildCreateConsString(HValue* length,
+                                HValue* left,
+                                HValue* right,
+                                HAllocationMode allocation_mode);
+  // Copies characters from one sequential string to another.
+  void BuildCopySeqStringChars(HValue* src,
+                               HValue* src_offset,
+                               String::Encoding src_encoding,
+                               HValue* dst,
+                               HValue* dst_offset,
+                               String::Encoding dst_encoding,
+                               HValue* length);
+
+  // Align an object size to object alignment boundary
+  HValue* BuildObjectSizeAlignment(HValue* unaligned_size, int header_size);
+
+  // Both operands are non-empty strings.
+  HValue* BuildUncheckedStringAdd(HValue* left,
+                                  HValue* right,
+                                  HAllocationMode allocation_mode);
+  // Add two strings using allocation mode, validating type feedback.
+  HValue* BuildStringAdd(HValue* left,
+                         HValue* right,
+                         HAllocationMode allocation_mode);
+
+  HInstruction* BuildUncheckedMonomorphicElementAccess(
+      HValue* checked_object,
+      HValue* key,
+      HValue* val,
+      bool is_js_array,
+      ElementsKind elements_kind,
+      PropertyAccessType access_type,
+      LoadKeyedHoleMode load_mode,
+      KeyedAccessStoreMode store_mode);
+
+  HInstruction* AddElementAccess(
+      HValue* elements, HValue* checked_key, HValue* val, HValue* dependency,
+      HValue* backing_store_owner, ElementsKind elements_kind,
+      PropertyAccessType access_type,
+      LoadKeyedHoleMode load_mode = NEVER_RETURN_HOLE);
+
+  HInstruction* AddLoadStringInstanceType(HValue* string);
+  HInstruction* AddLoadStringLength(HValue* string);
+  HInstruction* BuildLoadStringLength(HValue* string);
+  HStoreNamedField* AddStoreMapConstant(HValue* object, Handle<Map> map) {
+    return Add<HStoreNamedField>(object, HObjectAccess::ForMap(),
+                                 Add<HConstant>(map));
+  }
+  HLoadNamedField* AddLoadMap(HValue* object,
+                              HValue* dependency = NULL);
+  HLoadNamedField* AddLoadElements(HValue* object,
+                                   HValue* dependency = NULL);
+
+  bool MatchRotateRight(HValue* left,
+                        HValue* right,
+                        HValue** operand,
+                        HValue** shift_amount);
+
+  HValue* BuildBinaryOperation(Token::Value op, HValue* left, HValue* right,
+                               Type* left_type, Type* right_type,
+                               Type* result_type, Maybe<int> fixed_right_arg,
+                               HAllocationMode allocation_mode,
+                               Strength strength,
+                               BailoutId opt_id = BailoutId::None());
+
+  HLoadNamedField* AddLoadFixedArrayLength(HValue *object,
+                                           HValue *dependency = NULL);
+
+  HLoadNamedField* AddLoadArrayLength(HValue *object,
+                                      ElementsKind kind,
+                                      HValue *dependency = NULL);
+
+  HValue* AddLoadJSBuiltin(int context_index);
+
+  HValue* EnforceNumberType(HValue* number, Type* expected);
+  HValue* TruncateToNumber(HValue* value, Type** expected);
+
+  void FinishExitWithHardDeoptimization(Deoptimizer::DeoptReason reason);
+
+  void AddIncrementCounter(StatsCounter* counter);
+
+  class IfBuilder final {
+   public:
+    // If using this constructor, Initialize() must be called explicitly!
+    IfBuilder();
+
+    explicit IfBuilder(HGraphBuilder* builder);
+    IfBuilder(HGraphBuilder* builder,
+              HIfContinuation* continuation);
+
+    ~IfBuilder() {
+      if (!finished_) End();
+    }
+
+    void Initialize(HGraphBuilder* builder);
+
+    template<class Condition>
+    Condition* If(HValue *p) {
+      Condition* compare = builder()->New<Condition>(p);
+      AddCompare(compare);
+      return compare;
+    }
+
+    template<class Condition, class P2>
+    Condition* If(HValue* p1, P2 p2) {
+      Condition* compare = builder()->New<Condition>(p1, p2);
+      AddCompare(compare);
+      return compare;
+    }
+
+    template<class Condition, class P2, class P3>
+    Condition* If(HValue* p1, P2 p2, P3 p3) {
+      Condition* compare = builder()->New<Condition>(p1, p2, p3);
+      AddCompare(compare);
+      return compare;
+    }
+
+    template<class Condition>
+    Condition* IfNot(HValue* p) {
+      Condition* compare = If<Condition>(p);
+      compare->Not();
+      return compare;
+    }
+
+    template<class Condition, class P2>
+    Condition* IfNot(HValue* p1, P2 p2) {
+      Condition* compare = If<Condition>(p1, p2);
+      compare->Not();
+      return compare;
+    }
+
+    template<class Condition, class P2, class P3>
+    Condition* IfNot(HValue* p1, P2 p2, P3 p3) {
+      Condition* compare = If<Condition>(p1, p2, p3);
+      compare->Not();
+      return compare;
+    }
+
+    template<class Condition>
+    Condition* OrIf(HValue *p) {
+      Or();
+      return If<Condition>(p);
+    }
+
+    template<class Condition, class P2>
+    Condition* OrIf(HValue* p1, P2 p2) {
+      Or();
+      return If<Condition>(p1, p2);
+    }
+
+    template<class Condition, class P2, class P3>
+    Condition* OrIf(HValue* p1, P2 p2, P3 p3) {
+      Or();
+      return If<Condition>(p1, p2, p3);
+    }
+
+    template<class Condition>
+    Condition* AndIf(HValue *p) {
+      And();
+      return If<Condition>(p);
+    }
+
+    template<class Condition, class P2>
+    Condition* AndIf(HValue* p1, P2 p2) {
+      And();
+      return If<Condition>(p1, p2);
+    }
+
+    template<class Condition, class P2, class P3>
+    Condition* AndIf(HValue* p1, P2 p2, P3 p3) {
+      And();
+      return If<Condition>(p1, p2, p3);
+    }
+
+    void Or();
+    void And();
+
+    // Captures the current state of this IfBuilder in the specified
+    // continuation and ends this IfBuilder.
+    void CaptureContinuation(HIfContinuation* continuation);
+
+    // Joins the specified continuation from this IfBuilder and ends this
+    // IfBuilder. This appends a Goto instruction from the true branch of
+    // this IfBuilder to the true branch of the continuation unless the
+    // true branch of this IfBuilder is already finished. And vice versa
+    // for the false branch.
+    //
+    // The basic idea is as follows: You have several nested IfBuilder's
+    // that you want to join based on two possible outcomes (i.e. success
+    // and failure, or whatever). You can do this easily using this method
+    // now, for example:
+    //
+    //   HIfContinuation cont(graph()->CreateBasicBlock(),
+    //                        graph()->CreateBasicBlock());
+    //   ...
+    //     IfBuilder if_whatever(this);
+    //     if_whatever.If<Condition>(arg);
+    //     if_whatever.Then();
+    //     ...
+    //     if_whatever.Else();
+    //     ...
+    //     if_whatever.JoinContinuation(&cont);
+    //   ...
+    //     IfBuilder if_something(this);
+    //     if_something.If<Condition>(arg1, arg2);
+    //     if_something.Then();
+    //     ...
+    //     if_something.Else();
+    //     ...
+    //     if_something.JoinContinuation(&cont);
+    //   ...
+    //   IfBuilder if_finally(this, &cont);
+    //   if_finally.Then();
+    //   // continues after then code of if_whatever or if_something.
+    //   ...
+    //   if_finally.Else();
+    //   // continues after else code of if_whatever or if_something.
+    //   ...
+    //   if_finally.End();
+    void JoinContinuation(HIfContinuation* continuation);
+
+    void Then();
+    void Else();
+    void End();
+    void EndUnreachable();
+
+    void Deopt(Deoptimizer::DeoptReason reason);
+    void ThenDeopt(Deoptimizer::DeoptReason reason) {
+      Then();
+      Deopt(reason);
+    }
+    void ElseDeopt(Deoptimizer::DeoptReason reason) {
+      Else();
+      Deopt(reason);
+    }
+
+    void Return(HValue* value);
+
+   private:
+    void InitializeDontCreateBlocks(HGraphBuilder* builder);
+
+    HControlInstruction* AddCompare(HControlInstruction* compare);
+
+    HGraphBuilder* builder() const {
+      DCHECK(builder_ != NULL);  // Have you called "Initialize"?
+      return builder_;
+    }
+
+    void AddMergeAtJoinBlock(bool deopt);
+
+    void Finish();
+    void Finish(HBasicBlock** then_continuation,
+                HBasicBlock** else_continuation);
+
+    class MergeAtJoinBlock : public ZoneObject {
+     public:
+      MergeAtJoinBlock(HBasicBlock* block,
+                       bool deopt,
+                       MergeAtJoinBlock* next)
+        : block_(block),
+          deopt_(deopt),
+          next_(next) {}
+      HBasicBlock* block_;
+      bool deopt_;
+      MergeAtJoinBlock* next_;
+    };
+
+    HGraphBuilder* builder_;
+    bool finished_ : 1;
+    bool did_then_ : 1;
+    bool did_else_ : 1;
+    bool did_else_if_ : 1;
+    bool did_and_ : 1;
+    bool did_or_ : 1;
+    bool captured_ : 1;
+    bool needs_compare_ : 1;
+    bool pending_merge_block_ : 1;
+    HBasicBlock* first_true_block_;
+    HBasicBlock* first_false_block_;
+    HBasicBlock* split_edge_merge_block_;
+    MergeAtJoinBlock* merge_at_join_blocks_;
+    int normal_merge_at_join_block_count_;
+    int deopt_merge_at_join_block_count_;
+  };
+
+  class LoopBuilder final {
+   public:
+    enum Direction {
+      kPreIncrement,
+      kPostIncrement,
+      kPreDecrement,
+      kPostDecrement,
+      kWhileTrue
+    };
+
+    explicit LoopBuilder(HGraphBuilder* builder);  // while (true) {...}
+    LoopBuilder(HGraphBuilder* builder,
+                HValue* context,
+                Direction direction);
+    LoopBuilder(HGraphBuilder* builder,
+                HValue* context,
+                Direction direction,
+                HValue* increment_amount);
+
+    ~LoopBuilder() {
+      DCHECK(finished_);
+    }
+
+    HValue* BeginBody(
+        HValue* initial,
+        HValue* terminating,
+        Token::Value token);
+
+    void BeginBody(int drop_count);
+
+    void Break();
+
+    void EndBody();
+
+   private:
+    void Initialize(HGraphBuilder* builder, HValue* context,
+                    Direction direction, HValue* increment_amount);
+    Zone* zone() { return builder_->zone(); }
+
+    HGraphBuilder* builder_;
+    HValue* context_;
+    HValue* increment_amount_;
+    HInstruction* increment_;
+    HPhi* phi_;
+    HBasicBlock* header_block_;
+    HBasicBlock* body_block_;
+    HBasicBlock* exit_block_;
+    HBasicBlock* exit_trampoline_block_;
+    Direction direction_;
+    bool finished_;
+  };
+
+  HValue* BuildNewElementsCapacity(HValue* old_capacity);
+
+  class JSArrayBuilder final {
+   public:
+    JSArrayBuilder(HGraphBuilder* builder,
+                   ElementsKind kind,
+                   HValue* allocation_site_payload,
+                   HValue* constructor_function,
+                   AllocationSiteOverrideMode override_mode);
+
+    JSArrayBuilder(HGraphBuilder* builder,
+                   ElementsKind kind,
+                   HValue* constructor_function = NULL);
+
+    enum FillMode {
+      DONT_FILL_WITH_HOLE,
+      FILL_WITH_HOLE
+    };
+
+    ElementsKind kind() { return kind_; }
+    HAllocate* elements_location() { return elements_location_; }
+
+    HAllocate* AllocateEmptyArray();
+    HAllocate* AllocateArray(HValue* capacity,
+                             HValue* length_field,
+                             FillMode fill_mode = FILL_WITH_HOLE);
+    // Use these allocators when capacity could be unknown at compile time
+    // but its limit is known. For constant |capacity| the value of
+    // |capacity_upper_bound| is ignored and the actual |capacity|
+    // value is used as an upper bound.
+    HAllocate* AllocateArray(HValue* capacity,
+                             int capacity_upper_bound,
+                             HValue* length_field,
+                             FillMode fill_mode = FILL_WITH_HOLE);
+    HAllocate* AllocateArray(HValue* capacity,
+                             HConstant* capacity_upper_bound,
+                             HValue* length_field,
+                             FillMode fill_mode = FILL_WITH_HOLE);
+    HValue* GetElementsLocation() { return elements_location_; }
+    HValue* EmitMapCode();
+
+   private:
+    Zone* zone() const { return builder_->zone(); }
+    int elements_size() const {
+      return IsFastDoubleElementsKind(kind_) ? kDoubleSize : kPointerSize;
+    }
+    HGraphBuilder* builder() { return builder_; }
+    HGraph* graph() { return builder_->graph(); }
+    int initial_capacity() {
+      STATIC_ASSERT(JSArray::kPreallocatedArrayElements > 0);
+      return JSArray::kPreallocatedArrayElements;
+    }
+
+    HValue* EmitInternalMapCode();
+
+    HGraphBuilder* builder_;
+    ElementsKind kind_;
+    AllocationSiteMode mode_;
+    HValue* allocation_site_payload_;
+    HValue* constructor_function_;
+    HAllocate* elements_location_;
+  };
+
+  HValue* BuildAllocateArrayFromLength(JSArrayBuilder* array_builder,
+                                       HValue* length_argument);
+  HValue* BuildCalculateElementsSize(ElementsKind kind,
+                                     HValue* capacity);
+  HAllocate* AllocateJSArrayObject(AllocationSiteMode mode);
+  HConstant* EstablishElementsAllocationSize(ElementsKind kind, int capacity);
+
+  HAllocate* BuildAllocateElements(ElementsKind kind, HValue* size_in_bytes);
+
+  void BuildInitializeElementsHeader(HValue* elements,
+                                     ElementsKind kind,
+                                     HValue* capacity);
+
+  // Build allocation and header initialization code for respective successor
+  // of FixedArrayBase.
+  HValue* BuildAllocateAndInitializeArray(ElementsKind kind, HValue* capacity);
+
+  // |array| must have been allocated with enough room for
+  // 1) the JSArray and 2) an AllocationMemento if mode requires it.
+  // If the |elements| value provided is NULL then the array elements storage
+  // is initialized with empty array.
+  void BuildJSArrayHeader(HValue* array,
+                          HValue* array_map,
+                          HValue* elements,
+                          AllocationSiteMode mode,
+                          ElementsKind elements_kind,
+                          HValue* allocation_site_payload,
+                          HValue* length_field);
+
+  HValue* BuildGrowElementsCapacity(HValue* object,
+                                    HValue* elements,
+                                    ElementsKind kind,
+                                    ElementsKind new_kind,
+                                    HValue* length,
+                                    HValue* new_capacity);
+
+  void BuildFillElementsWithValue(HValue* elements,
+                                  ElementsKind elements_kind,
+                                  HValue* from,
+                                  HValue* to,
+                                  HValue* value);
+
+  void BuildFillElementsWithHole(HValue* elements,
+                                 ElementsKind elements_kind,
+                                 HValue* from,
+                                 HValue* to);
+
+  void BuildCopyProperties(HValue* from_properties, HValue* to_properties,
+                           HValue* length, HValue* capacity);
+
+  void BuildCopyElements(HValue* from_elements,
+                         ElementsKind from_elements_kind,
+                         HValue* to_elements,
+                         ElementsKind to_elements_kind,
+                         HValue* length,
+                         HValue* capacity);
+
+  HValue* BuildCloneShallowArrayCow(HValue* boilerplate,
+                                    HValue* allocation_site,
+                                    AllocationSiteMode mode,
+                                    ElementsKind kind);
+
+  HValue* BuildCloneShallowArrayEmpty(HValue* boilerplate,
+                                      HValue* allocation_site,
+                                      AllocationSiteMode mode);
+
+  HValue* BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
+                                         HValue* allocation_site,
+                                         AllocationSiteMode mode,
+                                         ElementsKind kind);
+
+  HValue* BuildElementIndexHash(HValue* index);
+
+  enum MapEmbedding { kEmbedMapsDirectly, kEmbedMapsViaWeakCells };
+
+  void BuildCompareNil(HValue* value, Type* type, HIfContinuation* continuation,
+                       MapEmbedding map_embedding = kEmbedMapsDirectly);
+
+  void BuildCreateAllocationMemento(HValue* previous_object,
+                                    HValue* previous_object_size,
+                                    HValue* payload);
+
+  HInstruction* BuildConstantMapCheck(Handle<JSObject> constant);
+  HInstruction* BuildCheckPrototypeMaps(Handle<JSObject> prototype,
+                                        Handle<JSObject> holder);
+
+  HInstruction* BuildGetNativeContext(HValue* closure);
+  HInstruction* BuildGetNativeContext();
+  HInstruction* BuildGetScriptContext(int context_index);
+  // Builds a loop version if |depth| is specified or unrolls the loop to
+  // |depth_value| iterations otherwise.
+  HValue* BuildGetParentContext(HValue* depth, int depth_value);
+
+  HInstruction* BuildGetArrayFunction();
+  HValue* BuildArrayBufferViewFieldAccessor(HValue* object,
+                                            HValue* checked_object,
+                                            FieldIndex index);
+
+
+ protected:
+  void SetSourcePosition(int position) {
+    if (position != RelocInfo::kNoPosition) {
+      position_.set_position(position - start_position_);
+    }
+    // Otherwise position remains unknown.
+  }
+
+  void EnterInlinedSource(int start_position, int id) {
+    if (top_info()->is_tracking_positions()) {
+      start_position_ = start_position;
+      position_.set_inlining_id(id);
+    }
+  }
+
+  // Convert the given absolute offset from the start of the script to
+  // the SourcePosition assuming that this position corresponds to the
+  // same function as current position_.
+  SourcePosition ScriptPositionToSourcePosition(int position) {
+    if (position == RelocInfo::kNoPosition) {
+      return SourcePosition::Unknown();
+    }
+    SourcePosition pos = position_;
+    pos.set_position(position - start_position_);
+    return pos;
+  }
+
+  SourcePosition source_position() { return position_; }
+  void set_source_position(SourcePosition position) { position_ = position; }
+
+  HValue* BuildAllocateEmptyArrayBuffer(HValue* byte_length);
+  template <typename ViewClass>
+  void BuildArrayBufferViewInitialization(HValue* obj,
+                                          HValue* buffer,
+                                          HValue* byte_offset,
+                                          HValue* byte_length);
+
+ private:
+  HGraphBuilder();
+
+  template <class I>
+  I* AddInstructionTyped(I* instr) {
+    return I::cast(AddInstruction(instr));
+  }
+
+  CompilationInfo* info_;
+  HGraph* graph_;
+  HBasicBlock* current_block_;
+  Scope* scope_;
+  SourcePosition position_;
+  int start_position_;
+};
+
+
+template <>
+inline HDeoptimize* HGraphBuilder::Add<HDeoptimize>(
+    Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
+  if (type == Deoptimizer::SOFT) {
+    isolate()->counters()->soft_deopts_requested()->Increment();
+    if (FLAG_always_opt) return NULL;
+  }
+  if (current_block()->IsDeoptimizing()) return NULL;
+  HBasicBlock* after_deopt_block = CreateBasicBlock(
+      current_block()->last_environment());
+  HDeoptimize* instr = New<HDeoptimize>(reason, type, after_deopt_block);
+  if (type == Deoptimizer::SOFT) {
+    isolate()->counters()->soft_deopts_inserted()->Increment();
+  }
+  FinishCurrentBlock(instr);
+  set_current_block(after_deopt_block);
+  return instr;
+}
+
+
+template <>
+inline HInstruction* HGraphBuilder::AddUncasted<HDeoptimize>(
+    Deoptimizer::DeoptReason reason, Deoptimizer::BailoutType type) {
+  return Add<HDeoptimize>(reason, type);
+}
+
+
+template<>
+inline HSimulate* HGraphBuilder::Add<HSimulate>(
+    BailoutId id,
+    RemovableSimulate removable) {
+  HSimulate* instr = current_block()->CreateSimulate(id, removable);
+  AddInstruction(instr);
+  return instr;
+}
+
+
+template<>
+inline HSimulate* HGraphBuilder::Add<HSimulate>(
+    BailoutId id) {
+  return Add<HSimulate>(id, FIXED_SIMULATE);
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HSimulate>(BailoutId id) {
+  return Add<HSimulate>(id, FIXED_SIMULATE);
+}
+
+
+template<>
+inline HReturn* HGraphBuilder::Add<HReturn>(HValue* value) {
+  int num_parameters = graph()->info()->num_parameters();
+  HValue* params = AddUncasted<HConstant>(num_parameters);
+  HReturn* return_instruction = New<HReturn>(value, params);
+  FinishExitCurrentBlock(return_instruction);
+  return return_instruction;
+}
+
+
+template<>
+inline HReturn* HGraphBuilder::Add<HReturn>(HConstant* value) {
+  return Add<HReturn>(static_cast<HValue*>(value));
+}
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HValue* value) {
+  return Add<HReturn>(value);
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HReturn>(HConstant* value) {
+  return Add<HReturn>(value);
+}
+
+
+template<>
+inline HCallRuntime* HGraphBuilder::Add<HCallRuntime>(
+    const Runtime::Function* c_function,
+    int argument_count) {
+  HCallRuntime* instr = New<HCallRuntime>(c_function, argument_count);
+  if (graph()->info()->IsStub()) {
+    // When compiling code stubs, we don't want to save all double registers
+    // upon entry to the stub, but instead have the call runtime instruction
+    // save the double registers only on-demand (in the fallback case).
+    instr->set_save_doubles(kSaveFPRegs);
+  }
+  AddInstruction(instr);
+  return instr;
+}
+
+
+template<>
+inline HInstruction* HGraphBuilder::AddUncasted<HCallRuntime>(
+    Handle<String> name,
+    const Runtime::Function* c_function,
+    int argument_count) {
+  return Add<HCallRuntime>(c_function, argument_count);
+}
+
+
+template <>
+inline HParameter* HGraphBuilder::New<HParameter>(unsigned index) {
+  return HParameter::New(isolate(), zone(), nullptr, index);
+}
+
+
+template <>
+inline HParameter* HGraphBuilder::New<HParameter>(
+    unsigned index, HParameter::ParameterKind kind) {
+  return HParameter::New(isolate(), zone(), nullptr, index, kind);
+}
+
+
+template <>
+inline HParameter* HGraphBuilder::New<HParameter>(
+    unsigned index, HParameter::ParameterKind kind, Representation r) {
+  return HParameter::New(isolate(), zone(), nullptr, index, kind, r);
+}
+
+
+template <>
+inline HPrologue* HGraphBuilder::New<HPrologue>() {
+  return HPrologue::New(zone());
+}
+
+
+template <>
+inline HContext* HGraphBuilder::New<HContext>() {
+  return HContext::New(zone());
+}
+
+
+class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
+ public:
+  // A class encapsulating (lazily-allocated) break and continue blocks for
+  // a breakable statement.  Separated from BreakAndContinueScope so that it
+  // can have a separate lifetime.
+  class BreakAndContinueInfo final BASE_EMBEDDED {
+   public:
+    explicit BreakAndContinueInfo(BreakableStatement* target,
+                                  Scope* scope,
+                                  int drop_extra = 0)
+        : target_(target),
+          break_block_(NULL),
+          continue_block_(NULL),
+          scope_(scope),
+          drop_extra_(drop_extra) {
+    }
+
+    BreakableStatement* target() { return target_; }
+    HBasicBlock* break_block() { return break_block_; }
+    void set_break_block(HBasicBlock* block) { break_block_ = block; }
+    HBasicBlock* continue_block() { return continue_block_; }
+    void set_continue_block(HBasicBlock* block) { continue_block_ = block; }
+    Scope* scope() { return scope_; }
+    int drop_extra() { return drop_extra_; }
+
+   private:
+    BreakableStatement* target_;
+    HBasicBlock* break_block_;
+    HBasicBlock* continue_block_;
+    Scope* scope_;
+    int drop_extra_;
+  };
+
+  // A helper class to maintain a stack of current BreakAndContinueInfo
+  // structures mirroring BreakableStatement nesting.
+  class BreakAndContinueScope final BASE_EMBEDDED {
+   public:
+    BreakAndContinueScope(BreakAndContinueInfo* info,
+                          HOptimizedGraphBuilder* owner)
+        : info_(info), owner_(owner), next_(owner->break_scope()) {
+      owner->set_break_scope(this);
+    }
+
+    ~BreakAndContinueScope() { owner_->set_break_scope(next_); }
+
+    BreakAndContinueInfo* info() { return info_; }
+    HOptimizedGraphBuilder* owner() { return owner_; }
+    BreakAndContinueScope* next() { return next_; }
+
+    // Search the break stack for a break or continue target.
+    enum BreakType { BREAK, CONTINUE };
+    HBasicBlock* Get(BreakableStatement* stmt, BreakType type,
+                     Scope** scope, int* drop_extra);
+
+   private:
+    BreakAndContinueInfo* info_;
+    HOptimizedGraphBuilder* owner_;
+    BreakAndContinueScope* next_;
+  };
+
+  explicit HOptimizedGraphBuilder(CompilationInfo* info);
+
+  bool BuildGraph() override;
+
+  // Simple accessors.
+  BreakAndContinueScope* break_scope() const { return break_scope_; }
+  void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
+
+  HValue* context() override { return environment()->context(); }
+
+  HOsrBuilder* osr() const { return osr_; }
+
+  void Bailout(BailoutReason reason);
+
+  HBasicBlock* CreateJoin(HBasicBlock* first,
+                          HBasicBlock* second,
+                          BailoutId join_id);
+
+  FunctionState* function_state() const { return function_state_; }
+
+  void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+
+  void* operator new(size_t size, Zone* zone) { return zone->New(size); }
+  void operator delete(void* pointer, Zone* zone) { }
+  void operator delete(void* pointer) { }
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ protected:
+  // Forward declarations for inner scope classes.
+  class SubgraphScope;
+
+  static const int kMaxCallPolymorphism = 4;
+  static const int kMaxLoadPolymorphism = 4;
+  static const int kMaxStorePolymorphism = 4;
+
+  // Even in the 'unlimited' case we have to have some limit in order not to
+  // overflow the stack.
+  static const int kUnlimitedMaxInlinedSourceSize = 100000;
+  static const int kUnlimitedMaxInlinedNodes = 10000;
+  static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
+
+  // Maximum depth and total number of elements and properties for literal
+  // graphs to be considered for fast deep-copying.
+  static const int kMaxFastLiteralDepth = 3;
+  static const int kMaxFastLiteralProperties = 8;
+
+  // Simple accessors.
+  void set_function_state(FunctionState* state) { function_state_ = state; }
+
+  AstContext* ast_context() const { return ast_context_; }
+  void set_ast_context(AstContext* context) { ast_context_ = context; }
+
+  // Accessors forwarded to the function state.
+  CompilationInfo* current_info() const {
+    return function_state()->compilation_info();
+  }
+  AstContext* call_context() const {
+    return function_state()->call_context();
+  }
+  HBasicBlock* function_return() const {
+    return function_state()->function_return();
+  }
+  TestContext* inlined_test_context() const {
+    return function_state()->test_context();
+  }
+  Handle<SharedFunctionInfo> current_shared_info() const {
+    return current_info()->shared_info();
+  }
+  TypeFeedbackVector* current_feedback_vector() const {
+    return current_shared_info()->feedback_vector();
+  }
+  void ClearInlinedTestContext() {
+    function_state()->ClearInlinedTestContext();
+  }
+  LanguageMode function_language_mode() {
+    return function_state()->compilation_info()->language_mode();
+  }
+
+#define FOR_EACH_HYDROGEN_INTRINSIC(F) \
+  F(IsSmi)                             \
+  F(IsArray)                           \
+  F(IsTypedArray)                      \
+  F(IsRegExp)                          \
+  F(IsJSProxy)                         \
+  F(Call)                              \
+  F(ArgumentsLength)                   \
+  F(Arguments)                         \
+  F(ValueOf)                           \
+  F(SetValueOf)                        \
+  F(IsDate)                            \
+  F(StringCharFromCode)                \
+  F(StringCharAt)                      \
+  F(OneByteSeqStringSetChar)           \
+  F(TwoByteSeqStringSetChar)           \
+  F(ObjectEquals)                      \
+  F(ToInteger)                         \
+  F(ToObject)                          \
+  F(ToString)                          \
+  F(ToLength)                          \
+  F(ToNumber)                          \
+  F(IsFunction)                        \
+  F(IsJSReceiver)                      \
+  F(MathPow)                           \
+  F(IsMinusZero)                       \
+  F(HasCachedArrayIndex)               \
+  F(GetCachedArrayIndex)               \
+  F(FastOneByteArrayJoin)              \
+  F(DebugBreakInOptimizedCode)         \
+  F(StringCharCodeAt)                  \
+  F(SubString)                         \
+  F(RegExpExec)                        \
+  F(RegExpConstructResult)             \
+  F(RegExpFlags)                       \
+  F(RegExpSource)                      \
+  F(NumberToString)                    \
+  F(DebugIsActive)                     \
+  /* Typed Arrays */                   \
+  F(TypedArrayInitialize)              \
+  F(DataViewInitialize)                \
+  F(MaxSmi)                            \
+  F(TypedArrayMaxSizeInHeap)           \
+  F(ArrayBufferViewGetByteLength)      \
+  F(ArrayBufferViewGetByteOffset)      \
+  F(TypedArrayGetLength)               \
+  /* ArrayBuffer */                    \
+  F(ArrayBufferGetByteLength)          \
+  /* Maths */                          \
+  F(ConstructDouble)                   \
+  F(DoubleHi)                          \
+  F(DoubleLo)                          \
+  F(MathClz32)                         \
+  F(MathFloor)                         \
+  F(MathSqrt)                          \
+  F(MathLogRT)                         \
+  /* ES6 Collections */                \
+  F(MapClear)                          \
+  F(MapInitialize)                     \
+  F(SetClear)                          \
+  F(SetInitialize)                     \
+  F(FixedArrayGet)                     \
+  F(FixedArraySet)                     \
+  F(JSCollectionGetTable)              \
+  F(StringGetRawHashField)             \
+  F(TheHole)                           \
+  /* ES6 Iterators */                  \
+  F(CreateIterResultObject)            \
+  /* Arrays */                         \
+  F(HasFastPackedElements)             \
+  /* JSValue */                        \
+  F(JSValueGetValue)
+
+#define GENERATOR_DECLARATION(Name) void Generate##Name(CallRuntime* call);
+  FOR_EACH_HYDROGEN_INTRINSIC(GENERATOR_DECLARATION)
+#undef GENERATOR_DECLARATION
+
+  void VisitDelete(UnaryOperation* expr);
+  void VisitVoid(UnaryOperation* expr);
+  void VisitTypeof(UnaryOperation* expr);
+  void VisitNot(UnaryOperation* expr);
+
+  void VisitComma(BinaryOperation* expr);
+  void VisitLogicalExpression(BinaryOperation* expr);
+  void VisitArithmeticExpression(BinaryOperation* expr);
+
+  void VisitLoopBody(IterationStatement* stmt,
+                     HBasicBlock* loop_entry);
+
+  void BuildForInBody(ForInStatement* stmt, Variable* each_var,
+                      HValue* enumerable);
+
+  // Create a back edge in the flow graph.  body_exit is the predecessor
+  // block and loop_entry is the successor block.  loop_successor is the
+  // block where control flow exits the loop normally (e.g., via failure of
+  // the condition) and break_block is the block where control flow breaks
+  // from the loop.  All blocks except loop_entry can be NULL.  The return
+  // value is the new successor block which is the join of loop_successor
+  // and break_block, or NULL.
+  HBasicBlock* CreateLoop(IterationStatement* statement,
+                          HBasicBlock* loop_entry,
+                          HBasicBlock* body_exit,
+                          HBasicBlock* loop_successor,
+                          HBasicBlock* break_block);
+
+  // Build a loop entry
+  HBasicBlock* BuildLoopEntry();
+
+  // Builds a loop entry respectful of OSR requirements
+  HBasicBlock* BuildLoopEntry(IterationStatement* statement);
+
+  HBasicBlock* JoinContinue(IterationStatement* statement,
+                            HBasicBlock* exit_block,
+                            HBasicBlock* continue_block);
+
+  HValue* Top() const { return environment()->Top(); }
+  void Drop(int n) { environment()->Drop(n); }
+  void Bind(Variable* var, HValue* value) { environment()->Bind(var, value); }
+  bool IsEligibleForEnvironmentLivenessAnalysis(Variable* var,
+                                                int index,
+                                                HValue* value,
+                                                HEnvironment* env) {
+    if (!FLAG_analyze_environment_liveness) return false;
+    // |this| and |arguments| are always live; zapping parameters isn't
+    // safe because function.arguments can inspect them at any time.
+    return !var->is_this() &&
+           !var->is_arguments() &&
+           !value->IsArgumentsObject() &&
+           env->is_local_index(index);
+  }
+  void BindIfLive(Variable* var, HValue* value) {
+    HEnvironment* env = environment();
+    int index = env->IndexFor(var);
+    env->Bind(index, value);
+    if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+      HEnvironmentMarker* bind =
+          Add<HEnvironmentMarker>(HEnvironmentMarker::BIND, index);
+      USE(bind);
+#ifdef DEBUG
+      bind->set_closure(env->closure());
+#endif
+    }
+  }
+  HValue* LookupAndMakeLive(Variable* var) {
+    HEnvironment* env = environment();
+    int index = env->IndexFor(var);
+    HValue* value = env->Lookup(index);
+    if (IsEligibleForEnvironmentLivenessAnalysis(var, index, value, env)) {
+      HEnvironmentMarker* lookup =
+          Add<HEnvironmentMarker>(HEnvironmentMarker::LOOKUP, index);
+      USE(lookup);
+#ifdef DEBUG
+      lookup->set_closure(env->closure());
+#endif
+    }
+    return value;
+  }
+
+  // The value of the arguments object is allowed in some but not most value
+  // contexts.  (It's allowed in all effect contexts and disallowed in all
+  // test contexts.)
+  void VisitForValue(Expression* expr,
+                     ArgumentsAllowedFlag flag = ARGUMENTS_NOT_ALLOWED);
+  void VisitForTypeOf(Expression* expr);
+  void VisitForEffect(Expression* expr);
+  void VisitForControl(Expression* expr,
+                       HBasicBlock* true_block,
+                       HBasicBlock* false_block);
+
+  // Visit a list of expressions from left to right, each in a value context.
+  void VisitExpressions(ZoneList<Expression*>* exprs) override;
+  void VisitExpressions(ZoneList<Expression*>* exprs,
+                        ArgumentsAllowedFlag flag);
+
+  // Remove the arguments from the bailout environment and emit instructions
+  // to push them as outgoing parameters.
+  template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
+  void PushArgumentsFromEnvironment(int count);
+
+  void SetUpScope(Scope* scope);
+  void VisitStatements(ZoneList<Statement*>* statements) override;
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+  // Helpers for flow graph construction.
+  enum GlobalPropertyAccess {
+    kUseCell,
+    kUseGeneric
+  };
+  GlobalPropertyAccess LookupGlobalProperty(Variable* var, LookupIterator* it,
+                                            PropertyAccessType access_type);
+
+  void EnsureArgumentsArePushedForAccess();
+  bool TryArgumentsAccess(Property* expr);
+
+  // Shared code for .call and .apply optimizations.
+  void HandleIndirectCall(Call* expr, HValue* function, int arguments_count);
+  // Try to optimize indirect calls such as fun.apply(receiver, arguments)
+  // or fun.call(...).
+  bool TryIndirectCall(Call* expr);
+  void BuildFunctionApply(Call* expr);
+  void BuildFunctionCall(Call* expr);
+
+  bool TryHandleArrayCall(Call* expr, HValue* function);
+  bool TryHandleArrayCallNew(CallNew* expr, HValue* function);
+  void BuildArrayCall(Expression* expr, int arguments_count, HValue* function,
+                      Handle<AllocationSite> cell);
+
+  enum ArrayIndexOfMode { kFirstIndexOf, kLastIndexOf };
+  HValue* BuildArrayIndexOf(HValue* receiver,
+                            HValue* search_element,
+                            ElementsKind kind,
+                            ArrayIndexOfMode mode);
+
+  HValue* ImplicitReceiverFor(HValue* function,
+                              Handle<JSFunction> target);
+
+  int InliningAstSize(Handle<JSFunction> target);
+  bool TryInline(Handle<JSFunction> target, int arguments_count,
+                 HValue* implicit_return_value, BailoutId ast_id,
+                 BailoutId return_id, InliningKind inlining_kind);
+
+  bool TryInlineCall(Call* expr);
+  bool TryInlineConstruct(CallNew* expr, HValue* implicit_return_value);
+  bool TryInlineGetter(Handle<JSFunction> getter,
+                       Handle<Map> receiver_map,
+                       BailoutId ast_id,
+                       BailoutId return_id);
+  bool TryInlineSetter(Handle<JSFunction> setter,
+                       Handle<Map> receiver_map,
+                       BailoutId id,
+                       BailoutId assignment_id,
+                       HValue* implicit_return_value);
+  bool TryInlineIndirectCall(Handle<JSFunction> function, Call* expr,
+                             int arguments_count);
+  bool TryInlineBuiltinMethodCall(Call* expr, Handle<JSFunction> function,
+                                  Handle<Map> receiver_map,
+                                  int args_count_no_receiver);
+  bool TryInlineBuiltinFunctionCall(Call* expr);
+  enum ApiCallType {
+    kCallApiFunction,
+    kCallApiMethod,
+    kCallApiGetter,
+    kCallApiSetter
+  };
+  bool TryInlineApiMethodCall(Call* expr,
+                              HValue* receiver,
+                              SmallMapList* receiver_types);
+  bool TryInlineApiFunctionCall(Call* expr, HValue* receiver);
+  bool TryInlineApiGetter(Handle<JSFunction> function,
+                          Handle<Map> receiver_map,
+                          BailoutId ast_id);
+  bool TryInlineApiSetter(Handle<JSFunction> function,
+                          Handle<Map> receiver_map,
+                          BailoutId ast_id);
+  bool TryInlineApiCall(Handle<JSFunction> function,
+                         HValue* receiver,
+                         SmallMapList* receiver_maps,
+                         int argc,
+                         BailoutId ast_id,
+                         ApiCallType call_type);
+  static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
+  static bool CanInlineArrayResizeOperation(Handle<Map> receiver_map);
+
+  // If --trace-inlining, print a line of the inlining trace.  Inlining
+  // succeeded if the reason string is NULL and failed if there is a
+  // non-NULL reason string.
+  void TraceInline(Handle<JSFunction> target,
+                   Handle<JSFunction> caller,
+                   const char* failure_reason);
+
+  void HandleGlobalVariableAssignment(Variable* var, HValue* value,
+                                      FeedbackVectorSlot slot,
+                                      BailoutId ast_id);
+
+  void HandlePropertyAssignment(Assignment* expr);
+  void HandleCompoundAssignment(Assignment* expr);
+  void HandlePolymorphicNamedFieldAccess(
+      PropertyAccessType access_type, Expression* expr, FeedbackVectorSlot slot,
+      BailoutId ast_id, BailoutId return_id, HValue* object, HValue* value,
+      SmallMapList* types, Handle<Name> name);
+
+  HValue* BuildAllocateExternalElements(
+      ExternalArrayType array_type,
+      bool is_zero_byte_offset,
+      HValue* buffer, HValue* byte_offset, HValue* length);
+  HValue* BuildAllocateFixedTypedArray(ExternalArrayType array_type,
+                                       size_t element_size,
+                                       ElementsKind fixed_elements_kind,
+                                       HValue* byte_length, HValue* length,
+                                       bool initialize);
+
+  // TODO(adamk): Move all OrderedHashTable functions to their own class.
+  HValue* BuildOrderedHashTableHashToBucket(HValue* hash, HValue* num_buckets);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableHashToEntry(HValue* table, HValue* hash,
+                                           HValue* num_buckets);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableEntryToIndex(HValue* entry, HValue* num_buckets);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableFindEntry(HValue* table, HValue* key,
+                                         HValue* hash);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableAddEntry(HValue* table, HValue* key,
+                                        HValue* hash,
+                                        HIfContinuation* join_continuation);
+  template <typename CollectionType>
+  HValue* BuildAllocateOrderedHashTable();
+  template <typename CollectionType>
+  void BuildOrderedHashTableClear(HValue* receiver);
+  template <typename CollectionType>
+  void BuildJSCollectionDelete(CallRuntime* call,
+                               const Runtime::Function* c_function);
+  template <typename CollectionType>
+  void BuildJSCollectionHas(CallRuntime* call,
+                            const Runtime::Function* c_function);
+  HValue* BuildStringHashLoadIfIsStringAndHashComputed(
+      HValue* object, HIfContinuation* continuation);
+
+  Handle<JSFunction> array_function() {
+    return handle(isolate()->native_context()->array_function());
+  }
+
+  bool IsCallArrayInlineable(int argument_count, Handle<AllocationSite> site);
+  void BuildInlinedCallArray(Expression* expression, int argument_count,
+                             Handle<AllocationSite> site);
+
+  void BuildInitializeInobjectProperties(HValue* receiver,
+                                         Handle<Map> initial_map);
+
+  class PropertyAccessInfo {
+   public:
+    PropertyAccessInfo(HOptimizedGraphBuilder* builder,
+                       PropertyAccessType access_type, Handle<Map> map,
+                       Handle<Name> name)
+        : builder_(builder),
+          access_type_(access_type),
+          map_(map),
+          name_(name),
+          field_type_(HType::Tagged()),
+          access_(HObjectAccess::ForMap()),
+          lookup_type_(NOT_FOUND),
+          details_(NONE, DATA, Representation::None()) {}
+
+    // Checkes whether this PropertyAccessInfo can be handled as a monomorphic
+    // load named. It additionally fills in the fields necessary to generate the
+    // lookup code.
+    bool CanAccessMonomorphic();
+
+    // Checks whether all types behave uniform when loading name. If all maps
+    // behave the same, a single monomorphic load instruction can be emitted,
+    // guarded by a single map-checks instruction that whether the receiver is
+    // an instance of any of the types.
+    // This method skips the first type in types, assuming that this
+    // PropertyAccessInfo is built for types->first().
+    bool CanAccessAsMonomorphic(SmallMapList* types);
+
+    bool NeedsWrappingFor(Handle<JSFunction> target) const;
+
+    Handle<Map> map();
+    Handle<Name> name() const { return name_; }
+
+    bool IsJSObjectFieldAccessor() {
+      int offset;  // unused
+      return Accessors::IsJSObjectFieldAccessor(map_, name_, &offset);
+    }
+
+    bool GetJSObjectFieldAccess(HObjectAccess* access) {
+      int offset;
+      if (Accessors::IsJSObjectFieldAccessor(map_, name_, &offset)) {
+        if (IsStringType()) {
+          DCHECK(Name::Equals(isolate()->factory()->length_string(), name_));
+          *access = HObjectAccess::ForStringLength();
+        } else if (IsArrayType()) {
+          DCHECK(Name::Equals(isolate()->factory()->length_string(), name_));
+          *access = HObjectAccess::ForArrayLength(map_->elements_kind());
+        } else {
+          *access = HObjectAccess::ForMapAndOffset(map_, offset);
+        }
+        return true;
+      }
+      return false;
+    }
+
+    bool IsJSArrayBufferViewFieldAccessor() {
+      int offset;  // unused
+      return Accessors::IsJSArrayBufferViewFieldAccessor(map_, name_, &offset);
+    }
+
+    bool GetJSArrayBufferViewFieldAccess(HObjectAccess* access) {
+      int offset;
+      if (Accessors::IsJSArrayBufferViewFieldAccessor(map_, name_, &offset)) {
+        *access = HObjectAccess::ForMapAndOffset(map_, offset);
+        return true;
+      }
+      return false;
+    }
+
+    bool has_holder() { return !holder_.is_null(); }
+    bool IsLoad() const { return access_type_ == LOAD; }
+
+    Isolate* isolate() const { return builder_->isolate(); }
+    Handle<JSObject> holder() { return holder_; }
+    Handle<JSFunction> accessor() { return accessor_; }
+    Handle<Object> constant() { return constant_; }
+    Handle<Map> transition() { return transition_; }
+    SmallMapList* field_maps() { return &field_maps_; }
+    HType field_type() const { return field_type_; }
+    HObjectAccess access() { return access_; }
+
+    bool IsFound() const { return lookup_type_ != NOT_FOUND; }
+    bool IsProperty() const { return IsFound() && !IsTransition(); }
+    bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
+    bool IsData() const {
+      return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == DATA;
+    }
+    bool IsDataConstant() const {
+      return lookup_type_ == DESCRIPTOR_TYPE &&
+             details_.type() == DATA_CONSTANT;
+    }
+    bool IsAccessorConstant() const {
+      return !IsTransition() && details_.type() == ACCESSOR_CONSTANT;
+    }
+    bool IsConfigurable() const { return details_.IsConfigurable(); }
+    bool IsReadOnly() const { return details_.IsReadOnly(); }
+
+    bool IsStringType() { return map_->instance_type() < FIRST_NONSTRING_TYPE; }
+    bool IsNumberType() { return map_->instance_type() == HEAP_NUMBER_TYPE; }
+    bool IsValueWrapped() { return IsStringType() || IsNumberType(); }
+    bool IsArrayType() { return map_->instance_type() == JS_ARRAY_TYPE; }
+
+   private:
+    Handle<Object> GetConstantFromMap(Handle<Map> map) const {
+      DCHECK_EQ(DESCRIPTOR_TYPE, lookup_type_);
+      DCHECK(number_ < map->NumberOfOwnDescriptors());
+      return handle(map->instance_descriptors()->GetValue(number_), isolate());
+    }
+    Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
+      return GetConstantFromMap(map);
+    }
+    Handle<HeapType> GetFieldTypeFromMap(Handle<Map> map) const {
+      DCHECK(IsFound());
+      DCHECK(number_ < map->NumberOfOwnDescriptors());
+      return handle(map->instance_descriptors()->GetFieldType(number_),
+                    isolate());
+    }
+    Handle<Map> GetFieldOwnerFromMap(Handle<Map> map) const {
+      DCHECK(IsFound());
+      DCHECK(number_ < map->NumberOfOwnDescriptors());
+      return handle(map->FindFieldOwner(number_));
+    }
+    int GetLocalFieldIndexFromMap(Handle<Map> map) const {
+      DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
+             lookup_type_ == TRANSITION_TYPE);
+      DCHECK(number_ < map->NumberOfOwnDescriptors());
+      int field_index = map->instance_descriptors()->GetFieldIndex(number_);
+      return field_index - map->GetInObjectProperties();
+    }
+
+    void LookupDescriptor(Map* map, Name* name) {
+      DescriptorArray* descriptors = map->instance_descriptors();
+      int number = descriptors->SearchWithCache(name, map);
+      if (number == DescriptorArray::kNotFound) return NotFound();
+      lookup_type_ = DESCRIPTOR_TYPE;
+      details_ = descriptors->GetDetails(number);
+      number_ = number;
+    }
+    void LookupTransition(Map* map, Name* name, PropertyAttributes attributes) {
+      Map* target =
+          TransitionArray::SearchTransition(map, kData, name, attributes);
+      if (target == NULL) return NotFound();
+      lookup_type_ = TRANSITION_TYPE;
+      transition_ = handle(target);
+      number_ = transition_->LastAdded();
+      details_ = transition_->instance_descriptors()->GetDetails(number_);
+    }
+    void NotFound() {
+      lookup_type_ = NOT_FOUND;
+      details_ = PropertyDetails::Empty();
+    }
+    Representation representation() const {
+      DCHECK(IsFound());
+      return details_.representation();
+    }
+    bool IsTransitionToData() const {
+      return IsTransition() && details_.type() == DATA;
+    }
+
+    Zone* zone() { return builder_->zone(); }
+    CompilationInfo* top_info() { return builder_->top_info(); }
+    CompilationInfo* current_info() { return builder_->current_info(); }
+
+    bool LoadResult(Handle<Map> map);
+    bool LoadFieldMaps(Handle<Map> map);
+    bool LookupDescriptor();
+    bool LookupInPrototypes();
+    bool IsIntegerIndexedExotic();
+    bool IsCompatible(PropertyAccessInfo* other);
+
+    void GeneralizeRepresentation(Representation r) {
+      access_ = access_.WithRepresentation(
+          access_.representation().generalize(r));
+    }
+
+    HOptimizedGraphBuilder* builder_;
+    PropertyAccessType access_type_;
+    Handle<Map> map_;
+    Handle<Name> name_;
+    Handle<JSObject> holder_;
+    Handle<JSFunction> accessor_;
+    Handle<JSObject> api_holder_;
+    Handle<Object> constant_;
+    SmallMapList field_maps_;
+    HType field_type_;
+    HObjectAccess access_;
+
+    enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
+    Handle<Map> transition_;
+    int number_;
+    PropertyDetails details_;
+  };
+
+  HValue* BuildMonomorphicAccess(PropertyAccessInfo* info, HValue* object,
+                                 HValue* checked_object, HValue* value,
+                                 BailoutId ast_id, BailoutId return_id,
+                                 bool can_inline_accessor = true);
+
+  HValue* BuildNamedAccess(PropertyAccessType access, BailoutId ast_id,
+                           BailoutId reutrn_id, Expression* expr,
+                           FeedbackVectorSlot slot, HValue* object,
+                           Handle<Name> name, HValue* value,
+                           bool is_uninitialized = false);
+
+  void HandlePolymorphicCallNamed(Call* expr,
+                                  HValue* receiver,
+                                  SmallMapList* types,
+                                  Handle<String> name);
+  void HandleLiteralCompareTypeof(CompareOperation* expr,
+                                  Expression* sub_expr,
+                                  Handle<String> check);
+  void HandleLiteralCompareNil(CompareOperation* expr,
+                               Expression* sub_expr,
+                               NilValue nil);
+
+  enum PushBeforeSimulateBehavior {
+    PUSH_BEFORE_SIMULATE,
+    NO_PUSH_BEFORE_SIMULATE
+  };
+
+  HControlInstruction* BuildCompareInstruction(
+      Token::Value op, HValue* left, HValue* right, Type* left_type,
+      Type* right_type, Type* combined_type, SourcePosition left_position,
+      SourcePosition right_position, PushBeforeSimulateBehavior push_sim_result,
+      BailoutId bailout_id);
+
+  HInstruction* BuildStringCharCodeAt(HValue* string,
+                                      HValue* index);
+
+  HValue* BuildBinaryOperation(
+      BinaryOperation* expr,
+      HValue* left,
+      HValue* right,
+      PushBeforeSimulateBehavior push_sim_result);
+  HInstruction* BuildIncrement(bool returns_original_input,
+                               CountOperation* expr);
+  HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
+                                  Expression* expr, FeedbackVectorSlot slot,
+                                  HValue* object, HValue* key, HValue* value);
+
+  HInstruction* TryBuildConsolidatedElementLoad(HValue* object,
+                                                HValue* key,
+                                                HValue* val,
+                                                SmallMapList* maps);
+
+  LoadKeyedHoleMode BuildKeyedHoleMode(Handle<Map> map);
+
+  HInstruction* BuildMonomorphicElementAccess(HValue* object,
+                                              HValue* key,
+                                              HValue* val,
+                                              HValue* dependency,
+                                              Handle<Map> map,
+                                              PropertyAccessType access_type,
+                                              KeyedAccessStoreMode store_mode);
+
+  HValue* HandlePolymorphicElementAccess(
+      Expression* expr, FeedbackVectorSlot slot, HValue* object, HValue* key,
+      HValue* val, SmallMapList* maps, PropertyAccessType access_type,
+      KeyedAccessStoreMode store_mode, bool* has_side_effects);
+
+  HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
+                                   Expression* expr, FeedbackVectorSlot slot,
+                                   BailoutId ast_id, BailoutId return_id,
+                                   PropertyAccessType access_type,
+                                   bool* has_side_effects);
+
+  HInstruction* BuildNamedGeneric(PropertyAccessType access, Expression* expr,
+                                  FeedbackVectorSlot slot, HValue* object,
+                                  Handle<Name> name, HValue* value,
+                                  bool is_uninitialized = false);
+
+  HCheckMaps* AddCheckMap(HValue* object, Handle<Map> map);
+
+  void BuildLoad(Property* property,
+                 BailoutId ast_id);
+  void PushLoad(Property* property,
+                HValue* object,
+                HValue* key);
+
+  void BuildStoreForEffect(Expression* expression, Property* prop,
+                           FeedbackVectorSlot slot, BailoutId ast_id,
+                           BailoutId return_id, HValue* object, HValue* key,
+                           HValue* value);
+
+  void BuildStore(Expression* expression, Property* prop,
+                  FeedbackVectorSlot slot, BailoutId ast_id,
+                  BailoutId return_id, bool is_uninitialized = false);
+
+  HInstruction* BuildLoadNamedField(PropertyAccessInfo* info,
+                                    HValue* checked_object);
+  HInstruction* BuildStoreNamedField(PropertyAccessInfo* info,
+                                     HValue* checked_object,
+                                     HValue* value);
+
+  HValue* BuildContextChainWalk(Variable* var);
+
+  HValue* AddThisFunction();
+  HInstruction* BuildThisFunction();
+
+  HInstruction* BuildFastLiteral(Handle<JSObject> boilerplate_object,
+                                 AllocationSiteUsageContext* site_context);
+
+  void BuildEmitObjectHeader(Handle<JSObject> boilerplate_object,
+                             HInstruction* object);
+
+  void BuildEmitInObjectProperties(Handle<JSObject> boilerplate_object,
+                                   HInstruction* object,
+                                   AllocationSiteUsageContext* site_context,
+                                   PretenureFlag pretenure_flag);
+
+  void BuildEmitElements(Handle<JSObject> boilerplate_object,
+                         Handle<FixedArrayBase> elements,
+                         HValue* object_elements,
+                         AllocationSiteUsageContext* site_context);
+
+  void BuildEmitFixedDoubleArray(Handle<FixedArrayBase> elements,
+                                 ElementsKind kind,
+                                 HValue* object_elements);
+
+  void BuildEmitFixedArray(Handle<FixedArrayBase> elements,
+                           ElementsKind kind,
+                           HValue* object_elements,
+                           AllocationSiteUsageContext* site_context);
+
+  void AddCheckPrototypeMaps(Handle<JSObject> holder,
+                             Handle<Map> receiver_map);
+
+  HInstruction* NewPlainFunctionCall(HValue* fun, int argument_count);
+
+  HInstruction* NewArgumentAdaptorCall(HValue* fun, HValue* context,
+                                       int argument_count,
+                                       HValue* expected_param_count);
+
+  HInstruction* BuildCallConstantFunction(Handle<JSFunction> target,
+                                          int argument_count);
+
+  bool CanBeFunctionApplyArguments(Call* expr);
+
+  // The translation state of the currently-being-translated function.
+  FunctionState* function_state_;
+
+  // The base of the function state stack.
+  FunctionState initial_function_state_;
+
+  // Expression context of the currently visited subexpression. NULL when
+  // visiting statements.
+  AstContext* ast_context_;
+
+  // A stack of breakable statements entered.
+  BreakAndContinueScope* break_scope_;
+
+  int inlined_count_;
+  ZoneList<Handle<Object> > globals_;
+
+  bool inline_bailout_;
+
+  HOsrBuilder* osr_;
+
+  friend class FunctionState;  // Pushes and pops the state stack.
+  friend class AstContext;  // Pushes and pops the AST context stack.
+  friend class KeyedLoadFastElementStub;
+  friend class HOsrBuilder;
+
+  DISALLOW_COPY_AND_ASSIGN(HOptimizedGraphBuilder);
+};
+
+
+Zone* AstContext::zone() const { return owner_->zone(); }
+
+
+class HStatistics final : public Malloced {
+ public:
+  HStatistics()
+      : times_(5),
+        names_(5),
+        sizes_(5),
+        total_size_(0),
+        source_size_(0) { }
+
+  void Initialize(CompilationInfo* info);
+  void Print();
+  void SaveTiming(const char* name, base::TimeDelta time, size_t size);
+
+  void IncrementFullCodeGen(base::TimeDelta full_code_gen) {
+    full_code_gen_ += full_code_gen;
+  }
+
+  void IncrementCreateGraph(base::TimeDelta delta) { create_graph_ += delta; }
+
+  void IncrementOptimizeGraph(base::TimeDelta delta) {
+    optimize_graph_ += delta;
+  }
+
+  void IncrementGenerateCode(base::TimeDelta delta) { generate_code_ += delta; }
+
+  void IncrementSubtotals(base::TimeDelta create_graph,
+                          base::TimeDelta optimize_graph,
+                          base::TimeDelta generate_code) {
+    IncrementCreateGraph(create_graph);
+    IncrementOptimizeGraph(optimize_graph);
+    IncrementGenerateCode(generate_code);
+  }
+
+ private:
+  List<base::TimeDelta> times_;
+  List<const char*> names_;
+  List<size_t> sizes_;
+  base::TimeDelta create_graph_;
+  base::TimeDelta optimize_graph_;
+  base::TimeDelta generate_code_;
+  size_t total_size_;
+  base::TimeDelta full_code_gen_;
+  double source_size_;
+};
+
+
+class HPhase : public CompilationPhase {
+ public:
+  HPhase(const char* name, HGraph* graph)
+      : CompilationPhase(name, graph->info()),
+        graph_(graph) { }
+  ~HPhase();
+
+ protected:
+  HGraph* graph() const { return graph_; }
+
+ private:
+  HGraph* graph_;
+
+  DISALLOW_COPY_AND_ASSIGN(HPhase);
+};
+
+
+class HTracer final : public Malloced {
+ public:
+  explicit HTracer(int isolate_id)
+      : trace_(&string_allocator_), indent_(0) {
+    if (FLAG_trace_hydrogen_file == NULL) {
+      SNPrintF(filename_,
+               "hydrogen-%d-%d.cfg",
+               base::OS::GetCurrentProcessId(),
+               isolate_id);
+    } else {
+      StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
+    }
+    WriteChars(filename_.start(), "", 0, false);
+  }
+
+  void TraceCompilation(CompilationInfo* info);
+  void TraceHydrogen(const char* name, HGraph* graph);
+  void TraceLithium(const char* name, LChunk* chunk);
+  void TraceLiveRanges(const char* name, LAllocator* allocator);
+
+ private:
+  class Tag final BASE_EMBEDDED {
+   public:
+    Tag(HTracer* tracer, const char* name) {
+      name_ = name;
+      tracer_ = tracer;
+      tracer->PrintIndent();
+      tracer->trace_.Add("begin_%s\n", name);
+      tracer->indent_++;
+    }
+
+    ~Tag() {
+      tracer_->indent_--;
+      tracer_->PrintIndent();
+      tracer_->trace_.Add("end_%s\n", name_);
+      DCHECK(tracer_->indent_ >= 0);
+      tracer_->FlushToFile();
+    }
+
+   private:
+    HTracer* tracer_;
+    const char* name_;
+  };
+
+  void TraceLiveRange(LiveRange* range, const char* type, Zone* zone);
+  void Trace(const char* name, HGraph* graph, LChunk* chunk);
+  void FlushToFile();
+
+  void PrintEmptyProperty(const char* name) {
+    PrintIndent();
+    trace_.Add("%s\n", name);
+  }
+
+  void PrintStringProperty(const char* name, const char* value) {
+    PrintIndent();
+    trace_.Add("%s \"%s\"\n", name, value);
+  }
+
+  void PrintLongProperty(const char* name, int64_t value) {
+    PrintIndent();
+    trace_.Add("%s %d000\n", name, static_cast<int>(value / 1000));
+  }
+
+  void PrintBlockProperty(const char* name, int block_id) {
+    PrintIndent();
+    trace_.Add("%s \"B%d\"\n", name, block_id);
+  }
+
+  void PrintIntProperty(const char* name, int value) {
+    PrintIndent();
+    trace_.Add("%s %d\n", name, value);
+  }
+
+  void PrintIndent() {
+    for (int i = 0; i < indent_; i++) {
+      trace_.Add("  ");
+    }
+  }
+
+  EmbeddedVector<char, 64> filename_;
+  HeapStringAllocator string_allocator_;
+  StringStream trace_;
+  int indent_;
+};
+
+
+class NoObservableSideEffectsScope final {
+ public:
+  explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
+      builder_(builder) {
+    builder_->graph()->IncrementInNoSideEffectsScope();
+  }
+  ~NoObservableSideEffectsScope() {
+    builder_->graph()->DecrementInNoSideEffectsScope();
+  }
+
+ private:
+  HGraphBuilder* builder_;
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_HYDROGEN_H_
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.cc b/src/crankshaft/ia32/lithium-codegen-ia32.cc
new file mode 100644
index 0000000..4ec33ab
--- /dev/null
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.cc
@@ -0,0 +1,5477 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/deoptimizer.h"
+#include "src/ia32/frames-ia32.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) {}
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+  support_aligned_spilled_doubles_ = info()->IsOptimizing();
+
+  dynamic_frame_alignment_ = info()->IsOptimizing() &&
+      ((chunk()->num_double_slots() > 2 &&
+        !chunk()->graph()->is_recursive()) ||
+       !info()->osr_ast_id().IsNone());
+
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateJumpTable() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+  }
+}
+
+
+#ifdef _MSC_VER
+void LCodeGen::MakeSureStackPagesMapped(int offset) {
+  const int kPageSize = 4 * KB;
+  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
+    __ mov(Operand(esp, offset), eax);
+  }
+}
+#endif
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ movsd(MemOperand(esp, count * kDoubleSize),
+             XMMRegister::from_code(save_iterator.Current()));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ movsd(XMMRegister::from_code(save_iterator.Current()),
+             MemOperand(esp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ int3();
+    }
+#endif
+
+    if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+      // Move state of dynamic frame alignment into edx.
+      __ Move(edx, Immediate(kNoAlignmentPadding));
+
+      Label do_not_pad, align_loop;
+      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+      // Align esp + 4 to a multiple of 2 * kPointerSize.
+      __ test(esp, Immediate(kPointerSize));
+      __ j(not_zero, &do_not_pad, Label::kNear);
+      __ push(Immediate(0));
+      __ mov(ebx, esp);
+      __ mov(edx, Immediate(kAlignmentPaddingPushed));
+      // Copy arguments, receiver, and return address.
+      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+      __ bind(&align_loop);
+      __ mov(eax, Operand(ebx, 1 * kPointerSize));
+      __ mov(Operand(ebx, 0), eax);
+      __ add(Operand(ebx), Immediate(kPointerSize));
+      __ dec(ecx);
+      __ j(not_zero, &align_loop, Label::kNear);
+      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+      __ bind(&do_not_pad);
+    }
+  }
+
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    DCHECK(!frame_is_built_);
+    frame_is_built_ = true;
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue());
+    }
+  }
+
+  if (info()->IsOptimizing() &&
+      dynamic_frame_alignment_ &&
+      FLAG_debug_code) {
+    __ test(esp, Immediate(kPointerSize));
+    __ Assert(zero, kFrameIsExpectedToBeAligned);
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  DCHECK(slots != 0 || !info()->IsOptimizing());
+  if (slots > 0) {
+    if (slots == 1) {
+      if (dynamic_frame_alignment_) {
+        __ push(edx);
+      } else {
+        __ push(Immediate(kNoAlignmentPadding));
+      }
+    } else {
+      if (FLAG_debug_code) {
+        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+        MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+        __ push(eax);
+        __ mov(Operand(eax), Immediate(slots));
+        Label loop;
+        __ bind(&loop);
+        __ mov(MemOperand(esp, eax, times_4, 0),
+               Immediate(kSlotsZapValue));
+        __ dec(eax);
+        __ j(not_zero, &loop);
+        __ pop(eax);
+      } else {
+        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+        MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+      }
+
+      if (support_aligned_spilled_doubles_) {
+        Comment(";;; Store dynamic frame alignment tag for spilled doubles");
+        // Store dynamic frame alignment state in the first local.
+        int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
+        if (dynamic_frame_alignment_) {
+          __ mov(Operand(ebp, offset), edx);
+        } else {
+          __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
+        }
+      }
+    }
+
+    if (info()->saves_caller_doubles()) SaveCallerDoubles();
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info_->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is still in edi.
+    int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ push(edi);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(edi);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in eax.  It replaces the context passed to us.
+    // It's saved in the stack and kept live in esi.
+    __ mov(esi, eax);
+    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
+
+    // Copy parameters into context if necessary.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ mov(eax, Operand(ebp, parameter_offset));
+        // Store it in the context.
+        int context_offset = Context::SlotOffset(var->index());
+        __ mov(Operand(esi, context_offset), eax);
+        // Update the write barrier. This clobbers eax and ebx.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(esi,
+                                    context_offset,
+                                    eax,
+                                    ebx,
+                                    kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+    // Move state of dynamic frame alignment into edx.
+  __ Move(edx, Immediate(kNoAlignmentPadding));
+
+  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+    Label do_not_pad, align_loop;
+    // Align ebp + 4 to a multiple of 2 * kPointerSize.
+    __ test(ebp, Immediate(kPointerSize));
+    __ j(zero, &do_not_pad, Label::kNear);
+    __ push(Immediate(0));
+    __ mov(ebx, esp);
+    __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+    // Move all parts of the frame over one word. The frame consists of:
+    // unoptimized frame slots, alignment state, context, frame pointer, return
+    // address, receiver, and the arguments.
+    __ mov(ecx, Immediate(scope()->num_parameters() +
+           5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+    __ bind(&align_loop);
+    __ mov(eax, Operand(ebx, 1 * kPointerSize));
+    __ mov(Operand(ebx, 0), eax);
+    __ add(Operand(ebx), Immediate(kPointerSize));
+    __ dec(ecx);
+    __ j(not_zero, &align_loop, Label::kNear);
+    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+    __ sub(Operand(ebp), Immediate(kPointerSize));
+    __ bind(&do_not_pad);
+  }
+
+  // Save the first local, which is overwritten by the alignment state.
+  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
+  __ push(alignment_loc);
+
+  // Set the dynamic frame alignment state.
+  __ mov(alignment_loc, edx);
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 1);
+  __ sub(esp, Immediate((slots - 1) * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
+
+
+bool LCodeGen::GenerateJumpTable() {
+  if (!jump_table_.length()) return !is_aborted();
+
+  Label needs_frame;
+  Comment(";;; -------------------- Jump table --------------------");
+
+  for (int i = 0; i < jump_table_.length(); i++) {
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->deopt_info);
+    if (table_entry->needs_frame) {
+      DCHECK(!info()->saves_caller_doubles());
+      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+      __ call(&needs_frame);
+    } else {
+      if (info()->saves_caller_doubles()) RestoreCallerDoubles();
+      __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    }
+    info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                 table_entry->deopt_info.inlining_id);
+  }
+  if (needs_frame.is_linked()) {
+    __ bind(&needs_frame);
+    /* stack layout
+       4: entry address
+       3: return address  <-- esp
+       2: garbage
+       1: garbage
+       0: garbage
+    */
+    __ sub(esp, Immediate(kPointerSize));    // Reserve space for stub marker.
+    __ push(MemOperand(esp, kPointerSize));  // Copy return address.
+    __ push(MemOperand(esp, 3 * kPointerSize));  // Copy entry address.
+
+    /* stack layout
+       4: entry address
+       3: return address
+       2: garbage
+       1: return address
+       0: entry address  <-- esp
+    */
+    __ mov(MemOperand(esp, 4 * kPointerSize), ebp);  // Save ebp.
+    // Copy context.
+    __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
+    __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+    // Fill ebp with the right stack frame address.
+    __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+    // This variant of deopt can only be used with stubs. Since we don't
+    // have a function pointer to install in the stack frame that we're
+    // building, install a special marker there instead.
+    DCHECK(info()->IsStub());
+    __ mov(MemOperand(esp, 2 * kPointerSize),
+           Immediate(Smi::FromInt(StackFrame::STUB)));
+
+    /* stack layout
+       4: old ebp
+       3: context pointer
+       2: stub marker
+       1: return address
+       0: entry address  <-- esp
+    */
+    __ ret(0);  // Call the continuation without clobbering registers.
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        // Build the frame in such a way that esi isn't trashed.
+        __ push(ebp);  // Caller's frame pointer.
+        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+        __ lea(ebp, Operand(esp, 2 * kPointerSize));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        __ bind(code->done());
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        frame_is_built_ = false;
+        __ mov(esp, ebp);
+        __ pop(ebp);
+      }
+      __ jmp(code->exit());
+    }
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // For lazy deoptimization we need space to patch a call after every call.
+    // Ensure there is always space for such patching, even if the code ends
+    // in a call.
+    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+    while (masm()->pc_offset() < target_offset) {
+      masm()->nop();
+    }
+  }
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int code) const {
+  return Register::from_code(code);
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(int code) const {
+  return XMMRegister::from_code(code);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                   const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  if (r.IsExternal()) {
+    return reinterpret_cast<int32_t>(
+        constant->ExternalReferenceValue().address());
+  }
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasExternalReferenceValue());
+  return constant->ExternalReferenceValue();
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+  if (op->IsRegister()) return Operand(ToRegister(op));
+  if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return Operand(ebp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+Operand LCodeGen::HighOperand(LOperand* op) {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(
+        esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    XMMRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::BINARY_OP_IC ||
+      code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* fun,
+                           int argc,
+                           LInstruction* instr,
+                           SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+  DCHECK(instr->HasPointerMap());
+
+  __ CallRuntime(fun, argc, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+  DCHECK(info()->is_calling());
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    if (!ToRegister(context).is(esi)) {
+      __ mov(esi, ToRegister(context));
+    }
+  } else if (context->IsStackSlot()) {
+    __ mov(esi, ToOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+  DCHECK(info()->is_calling());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(
+    LEnvironment* environment, Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (DeoptEveryNTimes()) {
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    __ pushfd();
+    __ push(eax);
+    __ mov(eax, Operand::StaticVariable(count));
+    __ sub(eax, Immediate(1));
+    __ j(not_zero, &no_deopt, Label::kNear);
+    if (FLAG_trap_on_deopt) __ int3();
+    __ mov(eax, Immediate(FLAG_deopt_every_n_times));
+    __ mov(Operand::StaticVariable(count), eax);
+    __ pop(eax);
+    __ popfd();
+    DCHECK(frame_is_built_);
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ mov(Operand::StaticVariable(count), eax);
+    __ pop(eax);
+    __ popfd();
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    Label done;
+    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+    __ int3();
+    __ bind(&done);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  if (cc == no_condition && frame_is_built_) {
+    DeoptComment(deopt_info);
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    if (cc == no_condition) {
+      __ jmp(&jump_table_.last().label);
+    } else {
+      __ j(cc, &jump_table_.last().label);
+    }
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason) {
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  DCHECK(kind == expected_safepoint_kind_);
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ test(dividend, dividend);
+    __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+    // Note that this is correct even for kMinInt operands.
+    __ neg(dividend);
+    __ and_(dividend, mask);
+    __ neg(dividend);
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+    }
+    __ jmp(&done, Label::kNear);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ and_(dividend, mask);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(dividend, Abs(divisor));
+  __ imul(edx, edx, Abs(divisor));
+  __ mov(eax, dividend);
+  __ sub(eax, edx);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ j(not_zero, &remainder_not_zero, Label::kNear);
+    __ cmp(dividend, Immediate(0));
+    DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+
+  Register left_reg = ToRegister(instr->left());
+  DCHECK(left_reg.is(eax));
+  Register right_reg = ToRegister(instr->right());
+  DCHECK(!right_reg.is(eax));
+  DCHECK(!right_reg.is(edx));
+  Register result_reg = ToRegister(instr->result());
+  DCHECK(result_reg.is(edx));
+
+  Label done;
+  // Check for x % 0, idiv would signal a divide error. We have to
+  // deopt in this case because we can't return a NaN.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, Operand(right_reg));
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for kMinInt % -1, idiv would signal a divide error. We
+  // have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ cmp(left_reg, kMinInt);
+    __ j(not_equal, &no_overflow_possible, Label::kNear);
+    __ cmp(right_reg, -1);
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+    } else {
+      __ j(not_equal, &no_overflow_possible, Label::kNear);
+      __ Move(result_reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  // Sign extend dividend in eax into edx:eax.
+  __ cdq();
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label positive_left;
+    __ test(left_reg, Operand(left_reg));
+    __ j(not_sign, &positive_left, Label::kNear);
+    __ idiv(right_reg);
+    __ test(result_reg, Operand(result_reg));
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+    __ jmp(&done, Label::kNear);
+    __ bind(&positive_left);
+  }
+  __ idiv(right_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ test(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    __ cmp(dividend, kMinInt);
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ test(dividend, Immediate(mask));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+  }
+  __ Move(result, dividend);
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (shift > 0) {
+    // The arithmetic shift is always OK, the 'if' is an optimization only.
+    if (shift > 1) __ sar(result, 31);
+    __ shr(result, 32 - shift);
+    __ add(result, dividend);
+    __ sar(result, shift);
+  }
+  if (divisor < 0) __ neg(result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(edx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ test(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TruncatingDiv(dividend, Abs(divisor));
+  if (divisor < 0) __ neg(edx);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ mov(eax, edx);
+    __ imul(eax, eax, divisor);
+    __ sub(eax, dividend);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  Register remainder = ToRegister(instr->temp());
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(divisor, divisor);
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ test(dividend, dividend);
+    __ j(not_zero, &dividend_not_zero, Label::kNear);
+    __ test(divisor, divisor);
+    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label dividend_not_min_int;
+    __ cmp(dividend, kMinInt);
+    __ j(not_zero, &dividend_not_min_int, Label::kNear);
+    __ cmp(divisor, -1);
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+    __ bind(&dividend_not_min_int);
+  }
+
+  // Sign extend to edx (= remainder).
+  __ cdq();
+  __ idiv(divisor);
+
+  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    __ test(remainder, remainder);
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  if (divisor == 1) return;
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ sar(dividend, shift);
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  __ neg(dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ sar(dividend, shift);
+    return;
+  }
+
+  Label not_kmin_int, done;
+  __ j(no_overflow, &not_kmin_int, Label::kNear);
+  __ mov(dividend, Immediate(kMinInt / divisor));
+  __ jmp(&done, Label::kNear);
+  __ bind(&not_kmin_int);
+  __ sar(dividend, shift);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(edx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ test(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(dividend, Abs(divisor));
+    if (divisor < 0) __ neg(edx);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp3());
+  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+  Label needs_adjustment, done;
+  __ cmp(dividend, Immediate(0));
+  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+  __ TruncatingDiv(dividend, Abs(divisor));
+  if (divisor < 0) __ neg(edx);
+  __ jmp(&done, Label::kNear);
+  __ bind(&needs_adjustment);
+  __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(temp, Abs(divisor));
+  if (divisor < 0) __ neg(edx);
+  __ dec(edx);
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  Register remainder = ToRegister(instr->temp());
+  Register result = ToRegister(instr->result());
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(result.is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(divisor, divisor);
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ test(dividend, dividend);
+    __ j(not_zero, &dividend_not_zero, Label::kNear);
+    __ test(divisor, divisor);
+    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label dividend_not_min_int;
+    __ cmp(dividend, kMinInt);
+    __ j(not_zero, &dividend_not_min_int, Label::kNear);
+    __ cmp(divisor, -1);
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+    __ bind(&dividend_not_min_int);
+  }
+
+  // Sign extend to edx (= remainder).
+  __ cdq();
+  __ idiv(divisor);
+
+  Label done;
+  __ test(remainder, remainder);
+  __ j(zero, &done, Label::kNear);
+  __ xor_(remainder, divisor);
+  __ sar(remainder, 31);
+  __ add(result, remainder);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  LOperand* right = instr->right();
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ mov(ToRegister(instr->temp()), left);
+  }
+
+  if (right->IsConstantOperand()) {
+    // Try strength reductions on the multiplication.
+    // All replacement instructions are at most as long as the imul
+    // and have better latency.
+    int constant = ToInteger32(LConstantOperand::cast(right));
+    if (constant == -1) {
+      __ neg(left);
+    } else if (constant == 0) {
+      __ xor_(left, Operand(left));
+    } else if (constant == 2) {
+      __ add(left, Operand(left));
+    } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      // If we know that the multiplication can't overflow, it's safe to
+      // use instructions that don't set the overflow flag for the
+      // multiplication.
+      switch (constant) {
+        case 1:
+          // Do nothing.
+          break;
+        case 3:
+          __ lea(left, Operand(left, left, times_2, 0));
+          break;
+        case 4:
+          __ shl(left, 2);
+          break;
+        case 5:
+          __ lea(left, Operand(left, left, times_4, 0));
+          break;
+        case 8:
+          __ shl(left, 3);
+          break;
+        case 9:
+          __ lea(left, Operand(left, left, times_8, 0));
+          break;
+        case 16:
+          __ shl(left, 4);
+          break;
+        default:
+          __ imul(left, left, constant);
+          break;
+      }
+    } else {
+      __ imul(left, left, constant);
+    }
+  } else {
+    if (instr->hydrogen()->representation().IsSmi()) {
+      __ SmiUntag(left);
+    }
+    __ imul(left, ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    Label done;
+    __ test(left, Operand(left));
+    __ j(not_zero, &done, Label::kNear);
+    if (right->IsConstantOperand()) {
+      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+        DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+      } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+        __ cmp(ToRegister(instr->temp()), Immediate(0));
+        DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+      }
+    } else {
+      // Test the non-zero operand for negative sign.
+      __ or_(ToRegister(instr->temp()), ToOperand(right));
+      DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
+
+  if (right->IsConstantOperand()) {
+    int32_t right_operand =
+        ToRepresentation(LConstantOperand::cast(right),
+                         instr->hydrogen()->representation());
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_XOR:
+        if (right_operand == int32_t(~0)) {
+          __ not_(ToRegister(left));
+        } else {
+          __ xor_(ToRegister(left), right_operand);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), ToOperand(right));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
+  if (right->IsRegister()) {
+    DCHECK(ToRegister(right).is(ecx));
+
+    switch (instr->op()) {
+      case Token::ROR:
+        __ ror_cl(ToRegister(left));
+        break;
+      case Token::SAR:
+        __ sar_cl(ToRegister(left));
+        break;
+      case Token::SHR:
+        __ shr_cl(ToRegister(left));
+        if (instr->can_deopt()) {
+          __ test(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case Token::SHL:
+        __ shl_cl(ToRegister(left));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ test(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+        } else {
+          __ ror(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sar(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ shr(ToRegister(left), shift_count);
+        } else if (instr->can_deopt()) {
+          __ test(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          if (instr->hydrogen_value()->representation().IsSmi() &&
+              instr->can_deopt()) {
+            if (shift_count != 1) {
+              __ shl(ToRegister(left), shift_count - 1);
+            }
+            __ SmiTag(ToRegister(left));
+            DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+          } else {
+            __ shl(ToRegister(left), shift_count);
+          }
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ sub(ToOperand(left),
+           ToImmediate(right, instr->hydrogen()->representation()));
+  } else {
+    __ sub(ToRegister(left), ToOperand(right));
+  }
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  uint64_t const bits = instr->bits();
+  uint32_t const lower = static_cast<uint32_t>(bits);
+  uint32_t const upper = static_cast<uint32_t>(bits >> 32);
+  DCHECK(instr->result()->IsDoubleRegister());
+
+  XMMRegister result = ToDoubleRegister(instr->result());
+  if (bits == 0u) {
+    __ xorps(result, result);
+  } else {
+    Register temp = ToRegister(instr->temp());
+    if (CpuFeatures::IsSupported(SSE4_1)) {
+      CpuFeatureScope scope2(masm(), SSE4_1);
+      if (lower != 0) {
+        __ Move(temp, Immediate(lower));
+        __ movd(result, Operand(temp));
+        __ Move(temp, Immediate(upper));
+        __ pinsrd(result, Operand(temp), 1);
+      } else {
+        __ xorps(result, result);
+        __ Move(temp, Immediate(upper));
+        __ pinsrd(result, Operand(temp), 1);
+      }
+    } else {
+      __ Move(temp, Immediate(upper));
+      __ movd(result, Operand(temp));
+      __ psllq(result, 32);
+      if (lower != 0u) {
+        XMMRegister xmm_scratch = double_scratch0();
+        __ Move(temp, Immediate(lower));
+        __ movd(xmm_scratch, Operand(temp));
+        __ orps(result, xmm_scratch);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Register reg = ToRegister(instr->result());
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ LoadObject(reg, object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+                                        LOperand* index,
+                                        String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToRepresentation(LConstantOperand::cast(index),
+                                  Representation::Integer32());
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldOperand(string, SeqString::kHeaderSize + offset);
+  }
+  return FieldOperand(
+      string, ToRegister(index),
+      encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+      SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register result = ToRegister(instr->result());
+  Register string = ToRegister(instr->string());
+
+  if (FLAG_debug_code) {
+    __ push(string);
+    __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
+    __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
+
+    __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+                             ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(equal, kUnexpectedStringType);
+    __ pop(string);
+  }
+
+  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ movzx_b(result, operand);
+  } else {
+    __ movzx_w(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+
+  if (FLAG_debug_code) {
+    Register value = ToRegister(instr->value());
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+  }
+
+  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (instr->value()->IsConstantOperand()) {
+    int value = ToRepresentation(LConstantOperand::cast(instr->value()),
+                                 Representation::Integer32());
+    DCHECK_LE(0, value);
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      DCHECK_LE(value, String::kMaxOneByteCharCode);
+      __ mov_b(operand, static_cast<int8_t>(value));
+    } else {
+      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
+      __ mov_w(operand, static_cast<int16_t>(value));
+    }
+  } else {
+    Register value = ToRegister(instr->value());
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      __ mov_b(operand, value);
+    } else {
+      __ mov_w(operand, value);
+    }
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+
+  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+    if (right->IsConstantOperand()) {
+      int32_t offset = ToRepresentation(LConstantOperand::cast(right),
+                                        instr->hydrogen()->representation());
+      __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+    } else {
+      Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+      __ lea(ToRegister(instr->result()), address);
+    }
+  } else {
+    if (right->IsConstantOperand()) {
+      __ add(ToOperand(left),
+             ToImmediate(right, instr->hydrogen()->representation()));
+    } else {
+      __ add(ToRegister(left), ToOperand(right));
+    }
+    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    }
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Label return_left;
+    Condition condition = (operation == HMathMinMax::kMathMin)
+        ? less_equal
+        : greater_equal;
+    if (right->IsConstantOperand()) {
+      Operand left_op = ToOperand(left);
+      Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
+                                        instr->hydrogen()->representation());
+      __ cmp(left_op, immediate);
+      __ j(condition, &return_left, Label::kNear);
+      __ mov(left_op, immediate);
+    } else {
+      Register left_reg = ToRegister(left);
+      Operand right_op = ToOperand(right);
+      __ cmp(left_reg, right_op);
+      __ j(condition, &return_left, Label::kNear);
+      __ mov(left_reg, right_op);
+    }
+    __ bind(&return_left);
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    Label check_nan_left, check_zero, return_left, return_right;
+    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+    XMMRegister left_reg = ToDoubleRegister(left);
+    XMMRegister right_reg = ToDoubleRegister(right);
+    __ ucomisd(left_reg, right_reg);
+    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
+    __ j(equal, &check_zero, Label::kNear);  // left == right.
+    __ j(condition, &return_left, Label::kNear);
+    __ jmp(&return_right, Label::kNear);
+
+    __ bind(&check_zero);
+    XMMRegister xmm_scratch = double_scratch0();
+    __ xorps(xmm_scratch, xmm_scratch);
+    __ ucomisd(left_reg, xmm_scratch);
+    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      __ orpd(left_reg, right_reg);
+    } else {
+      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+      __ addsd(left_reg, right_reg);
+    }
+    __ jmp(&return_left, Label::kNear);
+
+    __ bind(&check_nan_left);
+    __ ucomisd(left_reg, left_reg);  // NaN check.
+    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
+    __ bind(&return_right);
+    __ movaps(left_reg, right_reg);
+
+    __ bind(&return_left);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  XMMRegister left = ToDoubleRegister(instr->left());
+  XMMRegister right = ToDoubleRegister(instr->right());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vaddsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ addsd(left, right);
+      }
+      break;
+    case Token::SUB:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vsubsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ subsd(left, right);
+      }
+      break;
+    case Token::MUL:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vmulsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ mulsd(left, right);
+      }
+      break;
+    case Token::DIV:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vdivsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ divsd(left, right);
+      }
+      // Don't delete this mov. It may improve performance on some CPUs,
+      // when there is a (v)mulsd depending on the result
+      __ movaps(result, result);
+      break;
+    case Token::MOD: {
+      // Pass two doubles as arguments on the stack.
+      __ PrepareCallCFunction(4, eax);
+      __ movsd(Operand(esp, 0 * kDoubleSize), left);
+      __ movsd(Operand(esp, 1 * kDoubleSize), right);
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()),
+          4);
+
+      // Return value is in st(0) on ia32.
+      // Store it into the result register.
+      __ sub(Operand(esp), Immediate(kDoubleSize));
+      __ fstp_d(Operand(esp, 0));
+      __ movsd(result, Operand(esp, 0));
+      __ add(Operand(esp), Immediate(kDoubleSize));
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block || cc == no_condition) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+    __ jmp(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
+  int true_block = instr->TrueDestination(chunk_);
+  if (cc == no_condition) {
+    __ jmp(chunk_->GetAssemblyLabel(true_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(true_block));
+  }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+  int false_block = instr->FalseDestination(chunk_);
+  if (cc == no_condition) {
+    __ jmp(chunk_->GetAssemblyLabel(false_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(false_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsSmiOrInteger32()) {
+    Register reg = ToRegister(instr->value());
+    __ test(reg, Operand(reg));
+    EmitBranch(instr, not_zero);
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    XMMRegister reg = ToDoubleRegister(instr->value());
+    XMMRegister xmm_scratch = double_scratch0();
+    __ xorps(xmm_scratch, xmm_scratch);
+    __ ucomisd(reg, xmm_scratch);
+    EmitBranch(instr, not_equal);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ cmp(reg, factory()->true_value());
+      EmitBranch(instr, equal);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      __ test(reg, Operand(reg));
+      EmitBranch(instr, not_equal);
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, no_condition);
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      XMMRegister xmm_scratch = double_scratch0();
+      __ xorps(xmm_scratch, xmm_scratch);
+      __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+      EmitBranch(instr, not_equal);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+      EmitBranch(instr, not_equal);
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ cmp(reg, factory()->undefined_value());
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // true -> true.
+        __ cmp(reg, factory()->true_value());
+        __ j(equal, instr->TrueLabel(chunk_));
+        // false -> false.
+        __ cmp(reg, factory()->false_value());
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ cmp(reg, factory()->null_value());
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ test(reg, Operand(reg));
+        __ j(equal, instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ test(reg, Immediate(kSmiTagMask));
+        DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+      }
+
+      Register map = no_reg;  // Keep the compiler happy.
+      if (expected.NeedsMap()) {
+        map = ToRegister(instr->temp());
+        DCHECK(!map.is(reg));
+        __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+                    1 << Map::kIsUndetectable);
+          __ j(not_zero, instr->FalseLabel(chunk_));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
+        __ j(above_equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+        __ j(above_equal, &not_string, Label::kNear);
+        __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+        __ j(not_zero, instr->TrueLabel(chunk_));
+        __ jmp(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CmpInstanceType(map, SYMBOL_TYPE);
+        __ j(equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
+        __ j(equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+               factory()->heap_number_map());
+        __ j(not_equal, &not_heap_number, Label::kNear);
+        XMMRegister xmm_scratch = double_scratch0();
+        __ xorps(xmm_scratch, xmm_scratch);
+        __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+        __ j(zero, instr->FalseLabel(chunk_));
+        __ jmp(instr->TrueLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = equal;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = not_equal;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? below : less;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? above : greater;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? below_equal : less_equal;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? above_equal : greater_equal;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->is_double() ||
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cc = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+      // Don't base result on EFLAGS when a NaN is involved. Instead
+      // jump to the false block.
+      __ j(parity_even, instr->FalseLabel(chunk_));
+    } else {
+      if (right->IsConstantOperand()) {
+        __ cmp(ToOperand(left),
+               ToImmediate(right, instr->hydrogen()->representation()));
+      } else if (left->IsConstantOperand()) {
+        __ cmp(ToOperand(right),
+               ToImmediate(left, instr->hydrogen()->representation()));
+        // We commuted the operands, so commute the condition.
+        cc = CommuteCondition(cc);
+      } else {
+        __ cmp(ToRegister(left), ToOperand(right));
+      }
+    }
+    EmitBranch(instr, cc);
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+
+  if (instr->right()->IsConstantOperand()) {
+    Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+    __ CmpObject(left, right);
+  } else {
+    Operand right = ToOperand(instr->right());
+    __ cmp(left, right);
+  }
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ cmp(input_reg, factory()->the_hole_value());
+    EmitBranch(instr, equal);
+    return;
+  }
+
+  XMMRegister input_reg = ToDoubleRegister(instr->object());
+  __ ucomisd(input_reg, input_reg);
+  EmitFalseBranch(instr, parity_odd);
+
+  __ sub(esp, Immediate(kDoubleSize));
+  __ movsd(MemOperand(esp, 0), input_reg);
+
+  __ add(esp, Immediate(kDoubleSize));
+  int offset = sizeof(kHoleNanUpper32);
+  __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    XMMRegister value = ToDoubleRegister(instr->value());
+    XMMRegister xmm_scratch = double_scratch0();
+    __ xorps(xmm_scratch, xmm_scratch);
+    __ ucomisd(xmm_scratch, value);
+    EmitFalseBranch(instr, not_equal);
+    __ movmskpd(scratch, value);
+    __ test(scratch, Immediate(1));
+    EmitBranch(instr, not_zero);
+  } else {
+    Register value = ToRegister(instr->value());
+    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+    __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
+           Immediate(0x1));
+    EmitFalseBranch(instr, no_overflow);
+    __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
+           Immediate(0x00000000));
+    EmitBranch(instr, equal);
+  }
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+
+  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+  Condition true_cond = EmitIsString(
+      reg, temp, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Operand input = ToOperand(instr->value());
+
+  __ test(input, Immediate(kSmiTagMask));
+  EmitBranch(instr, zero);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+            1 << Map::kIsUndetectable);
+  EmitBranch(instr, not_zero);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  __ test(eax, eax);
+
+  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return equal;
+  if (to == LAST_TYPE) return above_equal;
+  if (from == FIRST_TYPE) return below_equal;
+  UNREACHABLE();
+  return equal;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  EmitBranch(instr, equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+  __ JumpIfSmi(input, is_false);
+
+  __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ j(equal, is_true);
+  } else {
+    __ j(equal, is_false);
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  __ GetMapConstructor(temp, temp, temp2);
+  // Objects with a non-function constructor have class 'Object'.
+  __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
+    __ j(not_equal, is_true);
+  } else {
+    __ j(not_equal, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(temp, FieldOperand(temp,
+                            SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  __ cmp(temp, class_name);
+  // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+      class_name, input, temp, temp2);
+
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = ToRegister(instr->scratch());
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ test(object, Immediate(kSmiTagMask));
+    EmitFalseBranch(instr, zero);
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+            1 << Map::kIsAccessCheckNeeded);
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+  // Deoptimize for proxies.
+  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+  DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
+  __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, equal);
+  __ cmp(object_prototype, factory()->null_value());
+  EmitFalseBranch(instr, equal);
+  __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+  __ jmp(&loop);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  Label true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(condition, &true_value, Label::kNear);
+  __ mov(ToRegister(instr->result()), factory()->false_value());
+  __ jmp(&done, Label::kNear);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), factory()->true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
+  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    if (dynamic_frame_alignment && FLAG_debug_code) {
+      __ cmp(Operand(esp,
+                     (parameter_count + extra_value_count) * kPointerSize),
+             Immediate(kAlignmentZapValue));
+      __ Assert(equal, kExpectedAlignmentMarker);
+    }
+    __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    __ SmiUntag(reg);
+    Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
+    if (dynamic_frame_alignment && FLAG_debug_code) {
+      DCHECK(extra_value_count == 2);
+      __ cmp(Operand(esp, reg, times_pointer_size,
+                     extra_value_count * kPointerSize),
+             Immediate(kAlignmentZapValue));
+      __ Assert(equal, kExpectedAlignmentMarker);
+    }
+
+    // emit code to restore stack based on instr->parameter_count()
+    __ pop(return_addr_reg);  // save return address
+    if (dynamic_frame_alignment) {
+      __ inc(reg);  // 1 more for alignment
+    }
+
+    __ shl(reg, kPointerSizeLog2);
+    __ add(esp, reg);
+    __ jmp(return_addr_reg);
+  }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Preserve the return value on the stack and rely on the runtime call
+    // to return the value in the same register.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(eax);
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (info()->saves_caller_doubles()) RestoreCallerDoubles();
+  if (dynamic_frame_alignment_) {
+    // Fetch the state of the dynamic frame alignment.
+    __ mov(edx, Operand(ebp,
+      JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
+  }
+  if (NeedsEagerFrame()) {
+    __ mov(esp, ebp);
+    __ pop(ebp);
+  }
+  if (dynamic_frame_alignment_) {
+    Label no_padding;
+    __ cmp(edx, Immediate(kNoAlignmentPadding));
+    __ j(equal, &no_padding, Label::kNear);
+
+    EmitReturn(instr, true);
+    __ bind(&no_padding);
+  }
+
+  EmitReturn(instr, false);
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(eax));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ mov(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Immediate(Smi::FromInt(index)));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ mov(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Immediate(Smi::FromInt(index)));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ mov(result, ContextOperand(context, instr->slot_index()));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(result, factory()->the_hole_value());
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    } else {
+      Label is_not_hole;
+      __ j(not_equal, &is_not_hole, Label::kNear);
+      __ mov(result, factory()->undefined_value());
+      __ bind(&is_not_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+
+  Label skip_assignment;
+
+  Operand target = ContextOperand(context, instr->slot_index());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(target, factory()->the_hole_value());
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    } else {
+      __ j(not_equal, &skip_assignment, Label::kNear);
+    }
+  }
+
+  __ mov(target, value);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    Register temp = ToRegister(instr->temp());
+    int offset = Context::SlotOffset(instr->slot_index());
+    __ RecordWriteContextSlot(context,
+                              offset,
+                              value,
+                              temp,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = instr->object()->IsConstantOperand()
+        ? MemOperand::StaticVariable(ToExternalReference(
+                LConstantOperand::cast(instr->object())))
+        : MemOperand(ToRegister(instr->object()), offset);
+    __ Load(result, operand, access.representation());
+    return;
+  }
+
+  Register object = ToRegister(instr->object());
+  if (instr->hydrogen()->representation().IsDouble()) {
+    XMMRegister result = ToDoubleRegister(instr->result());
+    __ movsd(result, FieldOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+  __ Load(result, FieldOperand(object, offset), access.representation());
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  DCHECK(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+    AllowDeferredHandleDereference smi_check;
+    if (object->IsSmi()) {
+      __ Push(Handle<Smi>::cast(object));
+    } else {
+      __ PushHeapObject(Handle<HeapObject>::cast(object));
+    }
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
+  } else {
+    __ push(ToOperand(operand));
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register function = ToRegister(instr->function());
+  Register temp = ToRegister(instr->temp());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ mov(result,
+         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
+  DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ CmpObjectType(result, MAP_TYPE, temp);
+  __ j(not_equal, &done, Label::kNear);
+
+  // Get the prototype from the initial map.
+  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  if (instr->length()->IsConstantOperand() &&
+      instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    int index = (const_length - const_index) + 1;
+    __ mov(result, Operand(arguments, index * kPointerSize));
+  } else {
+    Register length = ToRegister(instr->length());
+    Operand index = ToOperand(instr->index());
+    // There are two words between the frame pointer and the last argument.
+    // Subtracting from length accounts for one of them add one more.
+    __ sub(length, index);
+    __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = instr->key();
+  if (!key->IsConstantOperand() &&
+      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+                                  elements_kind)) {
+    __ SmiUntag(ToRegister(key));
+  }
+  Operand operand(BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      elements_kind,
+      instr->base_offset()));
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    XMMRegister result(ToDoubleRegister(instr->result()));
+    __ movss(result, operand);
+    __ cvtss2sd(result, result);
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    __ movsd(ToDoubleRegister(instr->result()), operand);
+  } else {
+    Register result(ToRegister(instr->result()));
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ movsx_b(result, operand);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ movzx_b(result, operand);
+        break;
+      case INT16_ELEMENTS:
+        __ movsx_w(result, operand);
+        break;
+      case UINT16_ELEMENTS:
+        __ movzx_w(result, operand);
+        break;
+      case INT32_ELEMENTS:
+        __ mov(result, operand);
+        break;
+      case UINT32_ELEMENTS:
+        __ mov(result, operand);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          __ test(result, Operand(result));
+          DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    Operand hole_check_operand = BuildFastArrayOperand(
+        instr->elements(), instr->key(),
+        instr->hydrogen()->key()->representation(),
+        FAST_DOUBLE_ELEMENTS,
+        instr->base_offset() + sizeof(kHoleNanLower32));
+    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+    DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+  }
+
+  Operand double_load_operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      instr->hydrogen()->key()->representation(),
+      FAST_DOUBLE_ELEMENTS,
+      instr->base_offset());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  __ movsd(result, double_load_operand);
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  Register result = ToRegister(instr->result());
+
+  // Load the result.
+  __ mov(result,
+         BuildFastArrayOperand(instr->elements(), instr->key(),
+                               instr->hydrogen()->key()->representation(),
+                               FAST_ELEMENTS, instr->base_offset()));
+
+  // Check for the hole value.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+      __ test(result, Immediate(kSmiTagMask));
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
+    } else {
+      __ cmp(result, factory()->the_hole_value());
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ cmp(result, factory()->the_hole_value());
+    __ j(not_equal, &done);
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ mov(result, isolate()->factory()->array_protector());
+      __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
+             Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
+    }
+    __ mov(result, isolate()->factory()->undefined_value());
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+    LOperand* elements_pointer,
+    LOperand* key,
+    Representation key_representation,
+    ElementsKind elements_kind,
+    uint32_t base_offset) {
+  Register elements_pointer_reg = ToRegister(elements_pointer);
+  int element_shift_size = ElementsKindToShiftSize(elements_kind);
+  int shift_size = element_shift_size;
+  if (key->IsConstantOperand()) {
+    int constant_value = ToInteger32(LConstantOperand::cast(key));
+    if (constant_value & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    return Operand(elements_pointer_reg,
+                   ((constant_value) << shift_size)
+                       + base_offset);
+  } else {
+    // Take the tag bit into account while computing the shift size.
+    if (key_representation.IsSmi() && (shift_size >= 1)) {
+      shift_size -= kSmiTagSize;
+    }
+    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+    return Operand(elements_pointer_reg,
+                   ToRegister(key),
+                   scale_factor,
+                   base_offset);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ lea(result, Operand(esp, -2 * kPointerSize));
+  } else {
+    // Check for arguments adapter frame.
+    Label done, adapted;
+    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+    __ cmp(Operand(result),
+           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ j(equal, &adapted, Label::kNear);
+
+    // No arguments adaptor frame.
+    __ mov(result, Operand(ebp));
+    __ jmp(&done, Label::kNear);
+
+    // Arguments adaptor frame present.
+    __ bind(&adapted);
+    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Operand elem = ToOperand(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ cmp(ebp, elem);
+  __ mov(result, Immediate(scope()->num_parameters()));
+  __ j(equal, &done, Label::kNear);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result,
+                         ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label receiver_ok, global_object;
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+  Register scratch = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions.
+    __ mov(scratch,
+           FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+    __ j(not_equal, &receiver_ok, dist);
+
+    // Do not transform the receiver to object for builtins.
+    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+              1 << SharedFunctionInfo::kNativeBitWithinByte);
+    __ j(not_equal, &receiver_ok, dist);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ cmp(receiver, factory()->null_value());
+  __ j(equal, &global_object, Label::kNear);
+  __ cmp(receiver, factory()->undefined_value());
+  __ j(equal, &global_object, Label::kNear);
+
+  // The receiver should be a JS object.
+  __ test(receiver, Immediate(kSmiTagMask));
+  DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+  __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
+  DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+
+  __ jmp(&receiver_ok, Label::kNear);
+  __ bind(&global_object);
+  __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
+  __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+  __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
+  __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  DCHECK(receiver.is(eax));  // Used for parameter count.
+  DCHECK(function.is(edi));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmp(length, kArgumentsLimit);
+  DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+
+  __ push(receiver);
+  __ mov(receiver, length);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ test(length, Operand(length));
+  __ j(zero, &invoke, Label::kNear);
+  __ bind(&loop);
+  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+  __ dec(length);
+  __ j(not_zero, &loop);
+
+  // Invoke the function.
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount actual(eax);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ int3();
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in esi.
+    DCHECK(result.is(esi));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  __ push(Immediate(instr->hydrogen()->pairs()));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = edi;
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ mov(edx, factory()->undefined_value());
+    __ mov(eax, arity);
+
+    // Invoke function directly.
+    if (function.is_identical_to(info()->closure())) {
+      __ CallSelf();
+    } else {
+      __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+    }
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    // We need to adapt arguments.
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ jmp(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ jmp(target);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(Operand(target)));
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ call(target);
+    }
+    generator.AfterCall();
+  }
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  // Change context.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ mov(edx, factory()->undefined_value());
+  __ mov(eax, instr->arity());
+
+  bool is_self_call = false;
+  if (instr->hydrogen()->function()->IsConstant()) {
+    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+    Handle<JSFunction> jsfun =
+      Handle<JSFunction>::cast(fun_const->handle(isolate()));
+    is_self_call = jsfun.is_identical_to(info()->closure());
+  }
+
+  if (is_self_call) {
+    __ CallSelf();
+  } else {
+    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  Register input_reg = ToRegister(instr->value());
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+
+  Label slow, allocated, done;
+  Register tmp = input_reg.is(eax) ? ecx : eax;
+  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+
+  // Preserve the value of all registers.
+  PushSafepointRegistersScope scope(this);
+
+  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it. We do not need to patch the stack since |input| and
+  // |result| are the same register and |input| will be restored
+  // unchanged by popping safepoint registers.
+  __ test(tmp, Immediate(HeapNumber::kSignMask));
+  __ j(zero, &done, Label::kNear);
+
+  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
+  __ jmp(&allocated, Label::kNear);
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+                          instr, instr->context());
+  // Set the pointer to the new heap number in tmp.
+  if (!tmp.is(eax)) __ mov(tmp, eax);
+  // Restore input_reg after call to runtime.
+  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+  __ bind(&allocated);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  __ and_(tmp2, ~HeapNumber::kSignMask);
+  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
+  __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+  Register input_reg = ToRegister(instr->value());
+  __ test(input_reg, Operand(input_reg));
+  Label is_positive;
+  __ j(not_sign, &is_positive, Label::kNear);
+  __ neg(input_reg);  // Sets flags.
+  DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+  __ bind(&is_positive);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  DCHECK(instr->value()->Equals(instr->result()));
+  Representation r = instr->hydrogen()->value()->representation();
+
+  if (r.IsDouble()) {
+    XMMRegister scratch = double_scratch0();
+    XMMRegister input_reg = ToDoubleRegister(instr->value());
+    __ xorps(scratch, scratch);
+    __ subsd(scratch, input_reg);
+    __ andps(input_reg, scratch);
+  } else if (r.IsSmiOrInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {  // Tagged case.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input_reg = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  XMMRegister xmm_scratch = double_scratch0();
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatureScope scope(masm(), SSE4_1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Deoptimize on negative zero.
+      Label non_zero;
+      __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+      __ ucomisd(input_reg, xmm_scratch);
+      __ j(not_equal, &non_zero, Label::kNear);
+      __ movmskpd(output_reg, input_reg);
+      __ test(output_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+      __ bind(&non_zero);
+    }
+    __ roundsd(xmm_scratch, input_reg, kRoundDown);
+    __ cvttsd2si(output_reg, Operand(xmm_scratch));
+    // Overflow is signalled with minint.
+    __ cmp(output_reg, 0x1);
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  } else {
+    Label negative_sign, done;
+    // Deoptimize on unordered.
+    __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+    __ j(below, &negative_sign, Label::kNear);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Check for negative zero.
+      Label positive_sign;
+      __ j(above, &positive_sign, Label::kNear);
+      __ movmskpd(output_reg, input_reg);
+      __ test(output_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+      __ Move(output_reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+      __ bind(&positive_sign);
+    }
+
+    // Use truncating instruction (OK because input is positive).
+    __ cvttsd2si(output_reg, Operand(input_reg));
+    // Overflow is signalled with minint.
+    __ cmp(output_reg, 0x1);
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    __ jmp(&done, Label::kNear);
+
+    // Non-zero negative reaches here.
+    __ bind(&negative_sign);
+    // Truncate, then compare and compensate.
+    __ cvttsd2si(output_reg, Operand(input_reg));
+    __ Cvtsi2sd(xmm_scratch, output_reg);
+    __ ucomisd(input_reg, xmm_scratch);
+    __ j(equal, &done, Label::kNear);
+    __ sub(output_reg, Immediate(1));
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister xmm_scratch = double_scratch0();
+  XMMRegister input_temp = ToDoubleRegister(instr->temp());
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+  ExternalReference minus_one_half =
+      ExternalReference::address_of_minus_one_half();
+
+  Label done, round_to_zero, below_one_half, do_not_compensate;
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+
+  __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(above, &below_one_half, Label::kNear);
+
+  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
+  __ addsd(xmm_scratch, input_reg);
+  __ cvttsd2si(output_reg, Operand(xmm_scratch));
+  // Overflow is signalled with minint.
+  __ cmp(output_reg, 0x1);
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  __ jmp(&done, dist);
+
+  __ bind(&below_one_half);
+  __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
+  __ ucomisd(xmm_scratch, input_reg);
+  __ j(below_equal, &round_to_zero, Label::kNear);
+
+  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
+  // compare and compensate.
+  __ movaps(input_temp, input_reg);  // Do not alter input_reg.
+  __ subsd(input_temp, xmm_scratch);
+  __ cvttsd2si(output_reg, Operand(input_temp));
+  // Catch minint due to overflow, and to prevent overflow when compensating.
+  __ cmp(output_reg, 0x1);
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+
+  __ Cvtsi2sd(xmm_scratch, output_reg);
+  __ ucomisd(xmm_scratch, input_temp);
+  __ j(equal, &done, dist);
+  __ sub(output_reg, Immediate(1));
+  // No overflow because we already ruled out minint.
+  __ jmp(&done, dist);
+
+  __ bind(&round_to_zero);
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // If the sign is positive, we return +0.
+    __ movmskpd(output_reg, input_reg);
+    __ test(output_reg, Immediate(1));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+  }
+  __ Move(output_reg, Immediate(0));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  __ cvtsd2ss(output_reg, input_reg);
+  __ cvtss2sd(output_reg, output_reg);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  Operand input = ToOperand(instr->value());
+  XMMRegister output = ToDoubleRegister(instr->result());
+  __ sqrtsd(output, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  XMMRegister xmm_scratch = double_scratch0();
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = ToRegister(instr->temp());
+  DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity.  According to IEEE-754, single-precision
+  // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
+  __ mov(scratch, 0xFF800000);
+  __ movd(xmm_scratch, scratch);
+  __ cvtss2sd(xmm_scratch, xmm_scratch);
+  __ ucomisd(input_reg, xmm_scratch);
+  // Comparing -Infinity with NaN results in "unordered", which sets the
+  // zero flag as if both were equal.  However, it also sets the carry flag.
+  __ j(not_equal, &sqrt, Label::kNear);
+  __ j(carry, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ xorps(input_reg, input_reg);
+  __ subsd(input_reg, xmm_scratch);
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
+  __ xorps(xmm_scratch, xmm_scratch);
+  __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
+  __ sqrtsd(input_reg, input_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(xmm1));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!ecx.is(tagged_exponent));
+    __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  DCHECK(instr->value()->Equals(instr->result()));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister xmm_scratch = double_scratch0();
+  Label positive, done, zero;
+  __ xorps(xmm_scratch, xmm_scratch);
+  __ ucomisd(input_reg, xmm_scratch);
+  __ j(above, &positive, Label::kNear);
+  __ j(not_carry, &zero, Label::kNear);
+  __ pcmpeqd(input_reg, input_reg);
+  __ jmp(&done, Label::kNear);
+  __ bind(&zero);
+  ExternalReference ninf =
+      ExternalReference::address_of_negative_infinity();
+  __ movsd(input_reg, Operand::StaticVariable(ninf));
+  __ jmp(&done, Label::kNear);
+  __ bind(&positive);
+  __ fldln2();
+  __ sub(Operand(esp), Immediate(kDoubleSize));
+  __ movsd(Operand(esp, 0), input_reg);
+  __ fld_d(Operand(esp, 0));
+  __ fyl2x();
+  __ fstp_d(Operand(esp, 0));
+  __ movsd(input_reg, Operand(esp, 0));
+  __ add(Operand(esp), Immediate(kDoubleSize));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ Lzcnt(result, input);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  XMMRegister input = ToDoubleRegister(instr->value());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  XMMRegister temp0 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(edx));
+    DCHECK(vector_register.is(ebx));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ mov(vector_register, vector);
+    __ mov(slot_register, Immediate(Smi::FromInt(index)));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ Set(eax, arity);
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->constructor()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ Move(eax, Immediate(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ mov(ebx, instr->hydrogen()->site());
+  } else {
+    __ mov(ebx, isolate()->factory()->undefined_value());
+  }
+
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here
+      // look at the first argument
+      __ mov(ecx, Operand(esp, 0));
+      __ test(ecx, ecx);
+      __ j(zero, &packed_case, Label::kNear);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ jmp(&done, Label::kNear);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ lea(result, Operand(base, ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ lea(result, Operand(base, offset, times_1, 0));
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Representation representation = instr->hydrogen()->field_representation();
+
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    MemOperand operand = instr->object()->IsConstantOperand()
+        ? MemOperand::StaticVariable(
+            ToExternalReference(LConstantOperand::cast(instr->object())))
+        : MemOperand(ToRegister(instr->object()), offset);
+    if (instr->value()->IsConstantOperand()) {
+      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+      __ mov(operand, Immediate(ToInteger32(operand_value)));
+    } else {
+      Register value = ToRegister(instr->value());
+      __ Store(value, operand, representation);
+    }
+    return;
+  }
+
+  Register object = ToRegister(instr->object());
+  __ AssertNotSmi(object);
+
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+  if (representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    XMMRegister value = ToDoubleRegister(instr->value());
+    __ movsd(FieldOperand(object, offset), value);
+    return;
+  }
+
+  if (instr->hydrogen()->has_transition()) {
+    Handle<Map> transition = instr->hydrogen()->transition_map();
+    AddDeprecationDependency(transition);
+    __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
+    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      Register temp_map = ToRegister(instr->temp_map());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register write_register = object;
+  if (!access.IsInobject()) {
+    write_register = ToRegister(instr->temp());
+    __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+  }
+
+  MemOperand operand = FieldOperand(write_register, offset);
+  if (instr->value()->IsConstantOperand()) {
+    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+    if (operand_value->IsRegister()) {
+      Register value = ToRegister(operand_value);
+      __ Store(value, operand, representation);
+    } else if (representation.IsInteger32() || representation.IsExternal()) {
+      Immediate immediate = ToImmediate(operand_value, representation);
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+      __ mov(operand, immediate);
+    } else {
+      Handle<Object> handle_value = ToHandle(operand_value);
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+      __ mov(operand, handle_value);
+    }
+  } else {
+    Register value = ToRegister(instr->value());
+    __ Store(value, operand, representation);
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    Register value = ToRegister(instr->value());
+    Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
+    // Update the write barrier for the object for in-object properties.
+    __ RecordWriteField(write_register,
+                        offset,
+                        value,
+                        temp,
+                        kSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        instr->hydrogen()->SmiCheckForWriteBarrier(),
+                        instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ mov(StoreDescriptor::NameRegister(), instr->name());
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
+  if (instr->index()->IsConstantOperand()) {
+    __ cmp(ToOperand(instr->length()),
+           ToImmediate(LConstantOperand::cast(instr->index()),
+                       instr->hydrogen()->length()->representation()));
+    cc = CommuteCondition(cc);
+  } else if (instr->length()->IsConstantOperand()) {
+    __ cmp(ToOperand(instr->index()),
+           ToImmediate(LConstantOperand::cast(instr->length()),
+                       instr->hydrogen()->index()->representation()));
+  } else {
+    __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ j(NegateCondition(cc), &done, Label::kNear);
+    __ int3();
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = instr->key();
+  if (!key->IsConstantOperand() &&
+      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+                                  elements_kind)) {
+    __ SmiUntag(ToRegister(key));
+  }
+  Operand operand(BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      elements_kind,
+      instr->base_offset()));
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    XMMRegister xmm_scratch = double_scratch0();
+    __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
+    __ movss(operand, xmm_scratch);
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    __ movsd(operand, ToDoubleRegister(instr->value()));
+  } else {
+    Register value = ToRegister(instr->value());
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case INT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ mov_b(operand, value);
+        break;
+      case UINT16_ELEMENTS:
+      case INT16_ELEMENTS:
+        __ mov_w(operand, value);
+        break;
+      case UINT32_ELEMENTS:
+      case INT32_ELEMENTS:
+        __ mov(operand, value);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  Operand double_store_operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      instr->hydrogen()->key()->representation(),
+      FAST_DOUBLE_ELEMENTS,
+      instr->base_offset());
+
+  XMMRegister value = ToDoubleRegister(instr->value());
+
+  if (instr->NeedsCanonicalization()) {
+    XMMRegister xmm_scratch = double_scratch0();
+    // Turn potential sNaN value into qNaN.
+    __ xorps(xmm_scratch, xmm_scratch);
+    __ subsd(value, xmm_scratch);
+  }
+
+  __ movsd(double_store_operand, value);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+  Operand operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      instr->hydrogen()->key()->representation(),
+      FAST_ELEMENTS,
+      instr->base_offset());
+  if (instr->value()->IsRegister()) {
+    __ mov(operand, ToRegister(instr->value()));
+  } else {
+    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+    if (IsSmi(operand_value)) {
+      Immediate immediate = ToImmediate(operand_value, Representation::Smi());
+      __ mov(operand, immediate);
+    } else {
+      DCHECK(!IsInteger32(operand_value));
+      Handle<Object> handle_value = ToHandle(operand_value);
+      __ mov(operand, handle_value);
+    }
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    DCHECK(instr->value()->IsRegister());
+    Register value = ToRegister(instr->value());
+    DCHECK(!instr->key()->IsConstantOperand());
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ lea(key, operand);
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed,
+                   instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases...external, fast-double, fast
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+  DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = eax;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ jmp(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ cmp(ToOperand(current_capacity), Immediate(constant_key));
+    __ j(less_equal, deferred->entry());
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ cmp(ToRegister(key), Immediate(constant_capacity));
+    __ j(greater_equal, deferred->entry());
+  } else {
+    __ cmp(ToRegister(key), ToRegister(current_capacity));
+    __ j(greater_equal, deferred->entry());
+  }
+
+  __ mov(result, ToOperand(instr->elements()));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = eax;
+  __ Move(result, Immediate(0));
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsRegister()) {
+      __ Move(result, ToRegister(instr->object()));
+    } else {
+      __ mov(result, ToOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ mov(ebx, ToImmediate(key, Representation::Smi()));
+    } else {
+      __ Move(ebx, ToRegister(key));
+      __ SmiTag(ebx);
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  __ test(result, Immediate(kSmiTagMask));
+  DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  bool is_simple_map_transition =
+      IsSimpleMapChangeTransition(from_kind, to_kind);
+  Label::Distance branch_distance =
+      is_simple_map_transition ? Label::kNear : Label::kFar;
+  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+  __ j(not_equal, &not_applicable, branch_distance);
+  if (is_simple_map_transition) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
+           Immediate(to_map));
+    // Write barrier.
+    DCHECK_NOT_NULL(instr->temp());
+    __ RecordWriteForMap(object_reg, to_map, new_map_reg,
+                         ToRegister(instr->temp()),
+                         kDontSaveFPRegs);
+  } else {
+    DCHECK(ToRegister(instr->context()).is(esi));
+    DCHECK(object_reg.is(eax));
+    PushSafepointRegistersScope scope(this);
+    __ mov(ebx, to_map);
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(instr,
+        RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen,
+                             LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr);
+
+  StringCharLoadGenerator::Generate(masm(),
+                                    factory(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Move(result, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+  if (instr->index()->IsConstantOperand()) {
+    Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
+                                      Representation::Smi());
+    __ push(immediate);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
+                          instr, instr->context());
+  __ AssertSmi(eax);
+  __ SmiUntag(eax);
+  __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen,
+                               LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  DCHECK(!char_code.is(result));
+
+  __ cmp(char_code, String::kMaxOneByteCharCode);
+  __ j(above, deferred->entry());
+  __ Move(result, Immediate(factory()->single_character_string_cache()));
+  __ mov(result, FieldOperand(result,
+                              char_code, times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ cmp(result, factory()->undefined_value());
+  __ j(equal, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Move(result, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  DCHECK(output->IsDoubleRegister());
+  __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+  __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI final : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen,
+                       LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(
+          instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagI* deferred =
+      new(zone()) DeferredNumberTagI(this, instr);
+  __ SmiTag(reg);
+  __ j(overflow, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(
+          instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagU* deferred =
+      new(zone()) DeferredNumberTagU(this, instr);
+  __ cmp(reg, Immediate(Smi::kMaxValue));
+  __ j(above, deferred->entry());
+  __ SmiTag(reg);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+                                     LOperand* value,
+                                     LOperand* temp,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register reg = ToRegister(value);
+  Register tmp = ToRegister(temp);
+  XMMRegister xmm_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    __ SmiUntag(reg);
+    __ xor_(reg, 0x80000000);
+    __ Cvtsi2sd(xmm_scratch, Operand(reg));
+  } else {
+    __ LoadUint32(xmm_scratch, reg);
+  }
+
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
+    __ jmp(&done, Label::kNear);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ Move(reg, Immediate(0));
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(reg, eax);
+  }
+
+  // Done. Put the value in xmm_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  Register reg = ToRegister(instr->result());
+
+  DeferredNumberTagD* deferred =
+      new(zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    Register tmp = ToRegister(instr->temp());
+    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ Move(reg, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(reg, eax);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ test(input, Immediate(0xc0000000));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
+  }
+  __ SmiTag(input);
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  LOperand* input = instr->value();
+  Register result = ToRegister(input);
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  if (instr->needs_check()) {
+    __ test(result, Immediate(kSmiTagMask));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+  } else {
+    __ AssertSmi(result);
+  }
+  __ SmiUntag(result);
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                Register temp_reg, XMMRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Label convert, load_smi, done;
+
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+
+    // Heap number map check.
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           factory()->heap_number_map());
+    if (can_convert_undefined_to_nan) {
+      __ j(not_equal, &convert, Label::kNear);
+    } else {
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    }
+
+    // Heap number to XMM conversion.
+    __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+    if (deoptimize_on_minus_zero) {
+      XMMRegister xmm_scratch = double_scratch0();
+      __ xorps(xmm_scratch, xmm_scratch);
+      __ ucomisd(result_reg, xmm_scratch);
+      __ j(not_zero, &done, Label::kNear);
+      __ movmskpd(temp_reg, result_reg);
+      __ test_b(temp_reg, 1);
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+    }
+    __ jmp(&done, Label::kNear);
+
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+
+      // Convert undefined to NaN.
+      __ cmp(input_reg, factory()->undefined_value());
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+
+      __ pcmpeqd(result_reg, result_reg);
+      __ jmp(&done, Label::kNear);
+    }
+  } else {
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+
+  __ bind(&load_smi);
+  // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
+  // input register since we avoid dependencies.
+  __ mov(temp_reg, input_reg);
+  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
+  __ Cvtsi2sd(result_reg, Operand(temp_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+  Register input_reg = ToRegister(instr->value());
+
+  // The input was optimistically untagged; revert it.
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
+
+  if (instr->truncating()) {
+    Label no_heap_number, check_bools, check_false;
+
+    // Heap number map check.
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           factory()->heap_number_map());
+    __ j(not_equal, &no_heap_number, Label::kNear);
+    __ TruncateHeapNumberToI(input_reg, input_reg);
+    __ jmp(done);
+
+    __ bind(&no_heap_number);
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ cmp(input_reg, factory()->undefined_value());
+    __ j(not_equal, &check_bools, Label::kNear);
+    __ Move(input_reg, Immediate(0));
+    __ jmp(done);
+
+    __ bind(&check_bools);
+    __ cmp(input_reg, factory()->true_value());
+    __ j(not_equal, &check_false, Label::kNear);
+    __ Move(input_reg, Immediate(1));
+    __ jmp(done);
+
+    __ bind(&check_false);
+    __ cmp(input_reg, factory()->false_value());
+    DeoptimizeIf(not_equal, instr,
+                 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+    __ Move(input_reg, Immediate(0));
+  } else {
+    XMMRegister scratch = ToDoubleRegister(instr->temp());
+    DCHECK(!scratch.is(xmm0));
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           isolate()->factory()->heap_number_map());
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2si(input_reg, Operand(xmm0));
+    __ Cvtsi2sd(scratch, Operand(input_reg));
+    __ ucomisd(xmm0, scratch);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+    DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+      __ test(input_reg, Operand(input_reg));
+      __ j(not_zero, done);
+      __ movmskpd(input_reg, xmm0);
+      __ and_(input_reg, 1);
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+    }
+  }
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register input_reg = ToRegister(input);
+  DCHECK(input_reg.is(ToRegister(instr->result())));
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred =
+        new(zone()) DeferredTaggedToI(this, instr);
+    // Optimistically untag the input.
+    // If the input is a HeapObject, SmiUntag will set the carry flag.
+    STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+    __ SmiUntag(input_reg);
+    // Branch to deferred code if the input was tagged.
+    // The deferred code will take care of restoring the tag.
+    __ j(carry, deferred->entry());
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* temp = instr->temp();
+  DCHECK(temp->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  Register temp_reg = ToRegister(temp);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  XMMRegister result_reg = ToDoubleRegister(result);
+  EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsRegister());
+  Register result_reg = ToRegister(result);
+
+  if (instr->truncating()) {
+    XMMRegister input_reg = ToDoubleRegister(input);
+    __ TruncateDoubleToI(result_reg, input_reg);
+  } else {
+    Label lost_precision, is_nan, minus_zero, done;
+    XMMRegister input_reg = ToDoubleRegister(input);
+    XMMRegister xmm_scratch = double_scratch0();
+    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+    __ DoubleToI(result_reg, input_reg, xmm_scratch,
+                 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+                 &is_nan, &minus_zero, dist);
+    __ jmp(&done, dist);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsRegister());
+  Register result_reg = ToRegister(result);
+
+  Label lost_precision, is_nan, minus_zero, done;
+  XMMRegister input_reg = ToDoubleRegister(input);
+  XMMRegister xmm_scratch = double_scratch0();
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+  __ DoubleToI(result_reg, input_reg, xmm_scratch,
+               instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+               &minus_zero, dist);
+  __ jmp(&done, dist);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+  __ bind(&done);
+  __ SmiTag(result_reg);
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ test(ToOperand(input), Immediate(kSmiTagMask));
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ test(ToOperand(input), Immediate(kSmiTagMask));
+    DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+  }
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+  Register scratch = ToRegister(instr->scratch());
+
+  __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
+  __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
+            1 << JSArrayBuffer::WasNeutered::kShift);
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+            static_cast<int8_t>(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+                static_cast<int8_t>(last));
+        DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
+                   Deoptimizer::kWrongInstanceType);
+    } else {
+      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+      __ and_(temp, mask);
+      __ cmp(temp, tag);
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  if (instr->hydrogen()->object_in_new_space()) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ cmp(reg, Operand::ForCell(cell));
+  } else {
+    Operand operand = ToOperand(instr->value());
+    __ cmp(operand, object);
+  }
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ xor_(esi, esi);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
+    __ test(eax, Immediate(kSmiTagMask));
+  }
+  DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr,  Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(reg, map);
+    __ j(equal, &success, Label::kNear);
+  }
+
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(reg, map);
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ j(not_equal, deferred->entry());
+  } else {
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+  XMMRegister xmm_scratch = double_scratch0();
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  DCHECK(instr->unclamped()->Equals(instr->result()));
+  Register value_reg = ToRegister(instr->result());
+  __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  DCHECK(instr->unclamped()->Equals(instr->result()));
+  Register input_reg = ToRegister(instr->unclamped());
+  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+  XMMRegister xmm_scratch = double_scratch0();
+  Label is_smi, done, heap_number;
+
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  __ j(equal, &heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ cmp(input_reg, factory()->undefined_value());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+  __ mov(input_reg, 0);
+  __ jmp(&done, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
+  __ jmp(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiUntag(input_reg);
+  __ ClampUint8(input_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    if (CpuFeatures::IsSupported(SSE4_1)) {
+      CpuFeatureScope scope2(masm(), SSE4_1);
+      __ pextrd(result_reg, value_reg, 1);
+    } else {
+      XMMRegister xmm_scratch = double_scratch0();
+      __ pshufd(xmm_scratch, value_reg, 1);
+      __ movd(result_reg, xmm_scratch);
+    }
+  } else {
+    __ movd(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  XMMRegister result_reg = ToDoubleRegister(instr->result());
+
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatureScope scope2(masm(), SSE4_1);
+    __ movd(result_reg, lo_reg);
+    __ pinsrd(result_reg, hi_reg, 1);
+  } else {
+    XMMRegister xmm_scratch = double_scratch0();
+    __ movd(result_reg, hi_reg);
+    __ psllq(result_reg, 32);
+    __ movd(xmm_scratch, lo_reg);
+    __ orps(result_reg, xmm_scratch);
+  }
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen,  LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ mov(temp, (size / kPointerSize) - 1);
+    } else {
+      temp = ToRegister(instr->size());
+      __ shr(temp, kPointerSizeLog2);
+      __ dec(temp);
+    }
+    Label loop;
+    __ bind(&loop);
+    __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+        isolate()->factory()->one_pointer_filler_map());
+    __ dec(temp);
+    __ j(not_zero, &loop);
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Move(result, Immediate(Smi::FromInt(0)));
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(ToRegister(instr->size()));
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size >= 0 && size <= Smi::kMaxValue) {
+      __ push(Immediate(Smi::FromInt(size)));
+    } else {
+      // We should never get here at runtime => abort
+      __ int3();
+      return;
+    }
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ push(Immediate(Smi::FromInt(flags)));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(eax));
+  __ push(eax);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->value()).is(ebx));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ mov(eax, Immediate(isolate()->factory()->number_string()));
+  __ jmp(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Condition final_branch_condition = EmitTypeofIs(instr, input);
+  if (final_branch_condition != no_condition) {
+    EmitBranch(instr, final_branch_condition);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+  Handle<String> type_name = instr->type_literal();
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+  int next_block = GetNextEmittedBlock();
+
+  Label::Distance true_distance = left_block == next_block ? Label::kNear
+                                                           : Label::kFar;
+  Label::Distance false_distance = right_block == next_block ? Label::kNear
+                                                             : Label::kFar;
+  Condition final_branch_condition = no_condition;
+  if (String::Equals(type_name, factory()->number_string())) {
+    __ JumpIfSmi(input, true_label, true_distance);
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+           factory()->heap_number_map());
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->string_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+    final_branch_condition = below;
+
+  } else if (String::Equals(type_name, factory()->symbol_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ CmpObjectType(input, SYMBOL_TYPE, input);
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->boolean_string())) {
+    __ cmp(input, factory()->true_value());
+    __ j(equal, true_label, true_distance);
+    __ cmp(input, factory()->false_value());
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->undefined_string())) {
+    __ cmp(input, factory()->undefined_value());
+    __ j(equal, true_label, true_distance);
+    __ JumpIfSmi(input, false_label, false_distance);
+    // Check for undetectable objects => true.
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    final_branch_condition = not_zero;
+
+  } else if (String::Equals(type_name, factory()->function_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    // Check for callable and not undetectable objects => true.
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
+    __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+    __ cmp(input, 1 << Map::kIsCallable);
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->object_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ cmp(input, factory()->null_value());
+    __ j(equal, true_label, true_distance);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
+    __ j(below, false_label, false_distance);
+    // Check for callable or undetectable objects => false.
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+    final_branch_condition = zero;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
+  } else if (String::Equals(type_name, factory()->type##_string())) { \
+    __ JumpIfSmi(input, false_label, false_distance);                 \
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
+           factory()->type##_map());                                  \
+    final_branch_condition = equal;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    __ jmp(false_label, false_distance);
+  }
+  return final_branch_condition;
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &done, Label::kNear);
+
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(esi));
+    CallCode(isolate()->builtins()->StackCheck(),
+             RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr);
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(below, deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  __ test(eax, Immediate(kSmiTagMask));
+  DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
+  DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
+
+  Label use_cache, call_runtime;
+  __ CheckEnumCache(&call_runtime);
+
+  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+  __ jmp(&use_cache, Label::kNear);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(eax);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         isolate()->factory()->meta_map());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ cmp(result, Immediate(Smi::FromInt(0)));
+  __ j(not_equal, &load_cache, Label::kNear);
+  __ mov(result, isolate()->factory()->empty_fixed_array());
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ mov(result,
+         FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ mov(result,
+         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ bind(&done);
+  __ test(result, result);
+  DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  __ cmp(ToRegister(instr->map()),
+         FieldOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ push(object);
+  __ push(index);
+  __ xor_(esi, esi);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(object, eax);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          object_(object),
+          index_(index) {
+    }
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, object, index);
+
+  Label out_of_object, done;
+  __ test(index, Immediate(Smi::FromInt(1)));
+  __ j(not_zero, deferred->entry());
+
+  __ sar(index, 1);
+
+  __ cmp(index, Immediate(0));
+  __ j(less, &out_of_object, Label::kNear);
+  __ mov(object, FieldOperand(object,
+                              index,
+                              times_half_pointer_size,
+                              JSObject::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&out_of_object);
+  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
+  __ neg(index);
+  // Index is now equal to out of object property index plus 1.
+  __ mov(object, FieldOperand(object,
+                              index,
+                              times_half_pointer_size,
+                              FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ Push(scope_info);
+  __ push(ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/crankshaft/ia32/lithium-codegen-ia32.h b/src/crankshaft/ia32/lithium-codegen-ia32.h
new file mode 100644
index 0000000..06a3e10
--- /dev/null
+++ b/src/crankshaft/ia32/lithium-codegen-ia32.h
@@ -0,0 +1,393 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
+
+#include "src/ast/scopes.h"
+#include "src/base/logging.h"
+#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
+#include "src/crankshaft/ia32/lithium-ia32.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class LGapNode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        dynamic_frame_alignment_(false),
+        support_aligned_spilled_doubles_(false),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  // Support for converting LOperands to assembler types.
+  Operand ToOperand(LOperand* op) const;
+  Register ToRegister(LOperand* op) const;
+  XMMRegister ToDoubleRegister(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Immediate ToImmediate(LOperand* op, const Representation& r) const {
+    return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
+  }
+  double ToDouble(LConstantOperand* op) const;
+
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // The operand denoting the second word (the one with a higher address) of
+  // a double stack slot.
+  Operand HighOperand(LOperand* op);
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr,
+                             LOperand* value,
+                             LOperand* temp,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register object,
+                                   Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  void EnsureRelocSpaceForDeoptimization();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  XMMRegister double_scratch0() const { return xmm0; }
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  void GenerateBodyInstructionPost(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* fun,
+                   int argc,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int argc,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, argc, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  void LoadContextFromDeferred(LOperand* context);
+
+  // Generate a direct call to a known function. Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition cc, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type);
+  void DeoptimizeIf(Condition cc, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason);
+
+  bool DeoptEveryNTimes() {
+    return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+  }
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  XMMRegister ToDoubleRegister(int index) const;
+  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  ExternalReference ToExternalReference(LConstantOperand* op) const;
+
+  Operand BuildFastArrayOperand(LOperand* elements_pointer,
+                                LOperand* key,
+                                Representation key_representation,
+                                ElementsKind elements_kind,
+                                uint32_t base_offset);
+
+  Operand BuildSeqStringOperand(Register string,
+                                LOperand* index,
+                                String::Encoding encoding);
+
+  void EmitIntegerMathAbs(LMathAbs* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition cc);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition cc);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition cc);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
+                        XMMRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset,
+                    AllocationSiteMode mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
+
+  friend class LGapResolver;
+
+#ifdef _MSC_VER
+  // On windows, you may not access the stack more than one page below
+  // the most recently mapped page. To make the allocated area randomly
+  // accessible, we write an arbitrary value to each page in range
+  // esp + offset - page_size .. esp in turn.
+  void MakeSureStackPagesMapped(int offset);
+#endif
+
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool dynamic_frame_alignment_;
+  bool support_aligned_spilled_doubles_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      DCHECK(codegen_->info()->is_calling());
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  Label done_;
+  int instruction_index_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_IA32_LITHIUM_CODEGEN_IA32_H_
diff --git a/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc b/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
new file mode 100644
index 0000000..c3284df
--- /dev/null
+++ b/src/crankshaft/ia32/lithium-gap-resolver-ia32.cc
@@ -0,0 +1,493 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+#include "src/crankshaft/ia32/lithium-gap-resolver-ia32.h"
+#include "src/register-configuration.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      source_uses_(),
+      destination_uses_(),
+      spilled_register_(-1) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(HasBeenReset());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      PerformMove(i);
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  Finish();
+  DCHECK(HasBeenReset());
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) AddMove(move);
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.  We use operand swaps to resolve cycles,
+  // which means that a call to PerformMove could change any source operand
+  // in the move graph.
+
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved on the side.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does
+      // not miss any).  Assume there is a non-blocking move with source A
+      // and this move is blocked on source B and there is a swap of A and
+      // B.  Then A and B must be involved in the same cycle (or they would
+      // not be swapped).  Since this move's destination is B and there is
+      // only a single incoming edge to an operand, this move must also be
+      // involved in the same cycle.  In that case, the blocking move will
+      // be created but will be "pending" when we return from PerformMove.
+      PerformMove(i);
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and
+  // so it may now be the last move in the cycle.  If so remove it.
+  if (moves_[index].source()->Equals(destination)) {
+    RemoveMove(index);
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case
+  // we have a cycle.  Search for such a blocking move and perform a swap to
+  // resolve it.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination)) {
+      DCHECK(other_move.IsPending());
+      EmitSwap(index);
+      return;
+    }
+  }
+
+  // This move is not blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::AddMove(LMoveOperands move) {
+  LOperand* source = move.source();
+  if (source->IsRegister()) ++source_uses_[source->index()];
+
+  LOperand* destination = move.destination();
+  if (destination->IsRegister()) ++destination_uses_[destination->index()];
+
+  moves_.Add(move, cgen_->zone());
+}
+
+
+void LGapResolver::RemoveMove(int index) {
+  LOperand* source = moves_[index].source();
+  if (source->IsRegister()) {
+    --source_uses_[source->index()];
+    DCHECK(source_uses_[source->index()] >= 0);
+  }
+
+  LOperand* destination = moves_[index].destination();
+  if (destination->IsRegister()) {
+    --destination_uses_[destination->index()];
+    DCHECK(destination_uses_[destination->index()] >= 0);
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+int LGapResolver::CountSourceUses(LOperand* operand) {
+  int count = 0;
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
+      ++count;
+    }
+  }
+  return count;
+}
+
+
+Register LGapResolver::GetFreeRegisterNot(Register reg) {
+  int skip_index = reg.is(no_reg) ? -1 : reg.code();
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+    int code = config->GetAllocatableGeneralCode(i);
+    if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
+        code != skip_index) {
+      return Register::from_code(code);
+    }
+  }
+  return no_reg;
+}
+
+
+bool LGapResolver::HasBeenReset() {
+  if (!moves_.is_empty()) return false;
+  if (spilled_register_ >= 0) return false;
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+    int code = config->GetAllocatableGeneralCode(i);
+    if (source_uses_[code] != 0) return false;
+    if (destination_uses_[code] != 0) return false;
+  }
+  return true;
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Finish() {
+  if (spilled_register_ >= 0) {
+    __ pop(Register::from_code(spilled_register_));
+    spilled_register_ = -1;
+  }
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::EnsureRestored(LOperand* operand) {
+  if (operand->IsRegister() && operand->index() == spilled_register_) {
+    __ pop(Register::from_code(spilled_register_));
+    spilled_register_ = -1;
+  }
+}
+
+
+Register LGapResolver::EnsureTempRegister() {
+  // 1. We may have already spilled to create a temp register.
+  if (spilled_register_ >= 0) {
+    return Register::from_code(spilled_register_);
+  }
+
+  // 2. We may have a free register that we can use without spilling.
+  Register free = GetFreeRegisterNot(no_reg);
+  if (!free.is(no_reg)) return free;
+
+  // 3. Prefer to spill a register that is not used in any remaining move
+  // because it will not need to be restored until the end.
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+    int code = config->GetAllocatableGeneralCode(i);
+    if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
+      Register scratch = Register::from_code(code);
+      __ push(scratch);
+      spilled_register_ = code;
+      return scratch;
+    }
+  }
+
+  // 4. Use an arbitrary register.  Register 0 is as arbitrary as any other.
+  spilled_register_ = config->GetAllocatableGeneralCode(0);
+  Register scratch = Register::from_code(spilled_register_);
+  __ push(scratch);
+  return scratch;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+  EnsureRestored(source);
+  EnsureRestored(destination);
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = cgen_->ToRegister(source);
+    Operand dst = cgen_->ToOperand(destination);
+    __ mov(dst, src);
+
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      __ mov(dst, src);
+    } else {
+      // Spill on demand to use a temporary register for memory-to-memory
+      // moves.
+      Register tmp = EnsureTempRegister();
+      Operand dst = cgen_->ToOperand(destination);
+      __ mov(tmp, src);
+      __ mov(dst, tmp);
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ Move(dst, cgen_->ToImmediate(constant_source, r));
+      } else {
+        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      double v = cgen_->ToDouble(constant_source);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      if (int_val == 0) {
+        __ xorps(dst, dst);
+      } else {
+        __ push(Immediate(upper));
+        __ push(Immediate(lower));
+        __ movsd(dst, Operand(esp, 0));
+        __ add(esp, Immediate(kDoubleSize));
+      }
+    } else {
+      DCHECK(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ Move(dst, cgen_->ToImmediate(constant_source, r));
+      } else {
+        Register tmp = EnsureTempRegister();
+        __ LoadObject(tmp, cgen_->ToHandle(constant_source));
+        __ mov(dst, tmp);
+      }
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      __ movaps(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movsd(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() ||
+           destination->IsDoubleStackSlot());
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = cgen_->ToOperand(destination);
+      __ movsd(xmm0, src);
+      __ movsd(dst, xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  RemoveMove(index);
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+  EnsureRestored(source);
+  EnsureRestored(destination);
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    Register src = cgen_->ToRegister(source);
+    Register dst = cgen_->ToRegister(destination);
+    __ push(src);
+    __ mov(src, dst);
+    __ pop(dst);
+
+  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+             (source->IsStackSlot() && destination->IsRegister())) {
+    // Register-memory.  Use a free register as a temp if possible.  Do not
+    // spill on demand because the simple spill implementation cannot avoid
+    // spilling src at this point.
+    Register tmp = GetFreeRegisterNot(no_reg);
+    Register reg =
+        cgen_->ToRegister(source->IsRegister() ? source : destination);
+    Operand mem =
+        cgen_->ToOperand(source->IsRegister() ? destination : source);
+    if (tmp.is(no_reg)) {
+      __ xor_(reg, mem);
+      __ xor_(mem, reg);
+      __ xor_(reg, mem);
+    } else {
+      __ mov(tmp, mem);
+      __ mov(mem, reg);
+      __ mov(reg, tmp);
+    }
+
+  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+    // Memory-memory.  Spill on demand to use a temporary.  If there is a
+    // free register after that, use it as a second temporary.
+    Register tmp0 = EnsureTempRegister();
+    Register tmp1 = GetFreeRegisterNot(tmp0);
+    Operand src = cgen_->ToOperand(source);
+    Operand dst = cgen_->ToOperand(destination);
+    if (tmp1.is(no_reg)) {
+      // Only one temp register available to us.
+      __ mov(tmp0, dst);
+      __ xor_(tmp0, src);
+      __ xor_(src, tmp0);
+      __ xor_(tmp0, src);
+      __ mov(dst, tmp0);
+    } else {
+      __ mov(tmp0, dst);
+      __ mov(tmp1, src);
+      __ mov(dst, tmp1);
+      __ mov(src, tmp0);
+    }
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = cgen_->ToDoubleRegister(source);
+    XMMRegister dst = cgen_->ToDoubleRegister(destination);
+    __ movaps(xmm0, src);
+    __ movaps(src, dst);
+    __ movaps(dst, xmm0);
+  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+    // XMM register-memory swap.  We rely on having xmm0
+    // available as a fixed scratch register.
+    DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
+    XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+                                              ? source
+                                              : destination);
+    Operand other =
+        cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
+    __ movsd(xmm0, other);
+    __ movsd(other, reg);
+    __ movaps(reg, xmm0);
+  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+    // Double-width memory-to-memory.  Spill on demand to use a general
+    // purpose temporary register and also rely on having xmm0 available as
+    // a fixed scratch register.
+    Register tmp = EnsureTempRegister();
+    Operand src0 = cgen_->ToOperand(source);
+    Operand src1 = cgen_->HighOperand(source);
+    Operand dst0 = cgen_->ToOperand(destination);
+    Operand dst1 = cgen_->HighOperand(destination);
+    __ movsd(xmm0, dst0);  // Save destination in xmm0.
+    __ mov(tmp, src0);  // Then use tmp to copy source to destination.
+    __ mov(dst0, tmp);
+    __ mov(tmp, src1);
+    __ mov(dst1, tmp);
+    __ movsd(src0, xmm0);
+
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+
+  // The swap of source and destination has executed a move from source to
+  // destination.
+  RemoveMove(index);
+
+  // Any unperformed (including pending) move with a source of either
+  // this move's source or destination needs to have their source
+  // changed to reflect the state of affairs after the swap.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(source)) {
+      moves_[i].set_source(destination);
+    } else if (other_move.Blocks(destination)) {
+      moves_[i].set_source(source);
+    }
+  }
+
+  // In addition to swapping the actual uses as sources, we need to update
+  // the use counts.
+  if (source->IsRegister() && destination->IsRegister()) {
+    int temp = source_uses_[source->index()];
+    source_uses_[source->index()] = source_uses_[destination->index()];
+    source_uses_[destination->index()] = temp;
+  } else if (source->IsRegister()) {
+    // We don't have use counts for non-register operands like destination.
+    // Compute those counts now.
+    source_uses_[source->index()] = CountSourceUses(source);
+  } else if (destination->IsRegister()) {
+    source_uses_[destination->index()] = CountSourceUses(destination);
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/crankshaft/ia32/lithium-gap-resolver-ia32.h b/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
new file mode 100644
index 0000000..687087f
--- /dev/null
+++ b/src/crankshaft/ia32/lithium-gap-resolver-ia32.h
@@ -0,0 +1,86 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // Emit any code necessary at the end of a gap move.
+  void Finish();
+
+  // Add or delete a move from the move graph without emitting any code.
+  // Used to build up the graph and remove trivial moves.
+  void AddMove(LMoveOperands move);
+  void RemoveMove(int index);
+
+  // Report the count of uses of operand as a source in a not-yet-performed
+  // move.  Used to rebuild use counts.
+  int CountSourceUses(LOperand* operand);
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Execute a move by emitting a swap of two operands.  The move from
+  // source to destination is removed from the move graph.
+  void EmitSwap(int index);
+
+  // Ensure that the given operand is not spilled.
+  void EnsureRestored(LOperand* operand);
+
+  // Return a register that can be used as a temp register, spilling
+  // something if necessary.
+  Register EnsureTempRegister();
+
+  // Return a known free register different from the given one (which could
+  // be no_reg---returning any free register), or no_reg if there is no such
+  // register.
+  Register GetFreeRegisterNot(Register reg);
+
+  // Verify that the state is the initial one, ready to resolve a single
+  // parallel move.
+  bool HasBeenReset();
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  // Source and destination use counts for the general purpose registers.
+  int source_uses_[Register::kNumRegisters];
+  int destination_uses_[DoubleRegister::kMaxNumRegisters];
+
+  // If we had to spill on demand, the currently spilled register's
+  // allocation index.
+  int spilled_register_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_IA32_LITHIUM_GAP_RESOLVER_IA32_H_
diff --git a/src/crankshaft/ia32/lithium-ia32.cc b/src/crankshaft/ia32/lithium-ia32.cc
new file mode 100644
index 0000000..a0cb939
--- /dev/null
+++ b/src/crankshaft/ia32/lithium-ia32.cc
@@ -0,0 +1,2696 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/ia32/lithium-ia32.h"
+
+#include <sstream>
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"
+#include "src/crankshaft/lithium-inl.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+bool LInstruction::HasDoubleRegisterResult() {
+  return HasResult() && result()->IsDoubleRegister();
+}
+
+
+bool LInstruction::HasDoubleRegisterInput() {
+  for (int i = 0; i < InputCount(); i++) {
+    LOperand* op = InputAt(i);
+    if (op != NULL && op->IsDoubleRegister()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "sal-t";
+    case Token::SAR: return "sar-t";
+    case Token::SHR: return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) {
+    spill_slot_count_++;
+    spill_slot_count_ |= 1;
+    num_double_slots_++;
+  }
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // Reserve the first spill slot for the state of dynamic alignment.
+  if (info()->IsOptimizing()) {
+    int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    DCHECK_EQ(alignment_state_index, 0);
+    USE(alignment_state_index);
+  }
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                             LUnallocated::USED_AT_START));
+}
+
+
+static inline bool CanBeImmediateConstant(HValue* value) {
+  return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
+                                            Register fixed_register) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseFixed(value, fixed_register);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr,
+    int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr,
+    XMMRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(
+      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseFixed(right_value, ecx);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift by 0 and
+    // the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return MarkAsCall(DefineSameAsFirst(result), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
+                                         : DefineSameAsFirst(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left_operand = UseFixed(left, edx);
+  LOperand* right_operand = UseFixed(right, eax);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+  LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpMapAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LOperand* temp = TempRegister();
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegister(instr->receiver());
+  LOperand* function = UseRegister(instr->function());
+  LOperand* temp = TempRegister();
+  LWrapReceiver* result =
+      new(zone()) LWrapReceiver(receiver, function, temp);
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), edi);
+  LOperand* receiver = UseFixed(instr->receiver(), eax);
+  LOperand* length = UseFixed(instr->length(), ebx);
+  LOperand* elements = UseFixed(instr->elements(), ecx);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = UseAny(instr->argument(i));
+    AddInstruction(new(zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, esi);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), edi);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), esi);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+      descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathFloor* result = new(zone()) LMathFloor(input);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = FixedTemp(xmm4);
+  LMathRound* result = new(zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  LOperand* context = UseAny(instr->context());  // Deferred use.
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+  Representation r = instr->value()->representation();
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new(zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* value = UseTempRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMathSqrt(input));
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* constructor = UseFixed(instr->constructor(), edi);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(edx);
+    vector = FixedTemp(ebx);
+  }
+
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineSameAsFirst(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+          dividend, divisor, temp1, temp2), edx);
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), eax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LDivI(
+          dividend, divisor, temp), eax);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      instr->CheckFlag(HValue::kCanOverflow) ||
+      !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LOperand* temp3 =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result =
+      DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+                                                   divisor,
+                                                   temp1,
+                                                   temp2,
+                                                   temp3),
+                  edx);
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), eax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+          dividend, divisor, temp), eax);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      instr->CheckFlag(HValue::kCanOverflow)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+          dividend, divisor, temp1, temp2), eax);
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), eax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LModI(
+          dividend, divisor, temp), edx);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstant(instr->BetterRightOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      temp = TempRegister();
+    }
+    LMulI* mul = new(zone()) LMulI(left, right, temp);
+    if (instr->CheckFlag(HValue::kCanOverflow) ||
+        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineSameAsFirst(mul);
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new(zone()) LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    // Check to see if it would be advantageous to use an lea instruction rather
+    // than an add. This is the case when no overflow check is needed and there
+    // are multiple uses of the add's inputs, so using a 3-register add will
+    // preserve all input values for later uses.
+    bool use_lea = LAddI::UseLea(instr);
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    HValue* right_candidate = instr->BetterRightOperand();
+    LOperand* right = use_lea
+        ? UseRegisterOrConstantAtStart(right_candidate)
+        : UseOrConstantAtStart(right_candidate);
+    LAddI* add = new(zone()) LAddI(left, right);
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    LInstruction* result = use_lea
+        ? DefineAsRegister(add)
+        : DefineSameAsFirst(add);
+    if (can_overflow) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    bool use_lea = LAddI::UseLea(instr);
+    LOperand* left = UseRegisterAtStart(instr->left());
+    HValue* right_candidate = instr->right();
+    LOperand* right = use_lea
+        ? UseRegisterOrConstantAtStart(right_candidate)
+        : UseOrConstantAtStart(right_candidate);
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = use_lea
+        ? DefineAsRegister(add)
+        : DefineSameAsFirst(add);
+    return result;
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+  return DefineSameAsFirst(minmax);
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), xmm2);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), xmm1)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsSmiOrTagged());
+  DCHECK(instr->right()->representation().IsSmiOrTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left;
+    LOperand* right;
+    if (CanBeImmediateConstant(instr->left()) &&
+        CanBeImmediateConstant(instr->right())) {
+      // The code generator requires either both inputs to be constant
+      // operands, or neither.
+      left = UseConstant(instr->left());
+      right = UseConstant(instr->right());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+      right = UseRegisterAtStart(instr->right());
+    }
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+
+  LStringCompareAndBranch* result = new(zone())
+      LStringCompareAndBranch(context, left, right);
+
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasInstanceTypeAndBranch(
+      UseRegisterAtStart(instr->value()),
+      TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+                                           TempRegister(),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
+  if (instr->encoding() == String::ONE_BYTE_ENCODING) {
+    if (FLAG_debug_code) {
+      return UseFixed(instr->value(), eax);
+    } else {
+      return UseFixedOrConstant(instr->value(), eax);
+    }
+  } else {
+    if (FLAG_debug_code) {
+      return UseRegisterAtStart(instr->value());
+    } else {
+      return UseRegisterOrConstantAtStart(instr->value());
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegisterAtStart(instr->index())
+      : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = GetSeqStringSetCharOperand(instr);
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
+  LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+                                                       index, value);
+  if (FLAG_debug_code) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+      ? UseOrConstantAtStart(instr->length())
+      : UseAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LOperand* temp = TempRegister();
+      LInstruction* result =
+          DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegister(val);
+        return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        bool truncating = instr->CanTruncateToInt32();
+        LOperand* xmm_temp = !truncating ? FixedTemp(xmm1) : NULL;
+        LInstruction* result =
+            DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegisterAtStart(val);
+      LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      bool truncating = instr->CanTruncateToInt32();
+      bool needs_temp = !truncating;
+      LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val);
+      LOperand* temp = needs_temp ? TempRegister() : NULL;
+      LInstruction* result =
+          DefineAsRegister(new(zone()) LDoubleToI(value, temp));
+      if (!truncating) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        return DefineSameAsFirst(new(zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* temp = TempRegister();
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+        return AssignPointerMap(DefineSameAsFirst(result));
+      } else {
+        LOperand* temp = TempRegister();
+        LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
+        return AssignPointerMap(DefineSameAsFirst(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LOperand* scratch = TempRegister();
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view, scratch);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  // If the object is in new space, we'll emit a global cell compare and so
+  // want the value in a register.  If the object gets promoted before we
+  // emit code, we will still get the register but will do an immediate
+  // compare instead of the cell compare.  This is safe.
+  LOperand* value = instr->object_in_new_space()
+      ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  if (input_rep.IsDouble()) {
+    LOperand* reg = UseRegister(value);
+    return DefineFixed(new(zone()) LClampDToUint8(reg), eax);
+  } else if (input_rep.IsInteger32()) {
+    LOperand* reg = UseFixed(value, eax);
+    return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LOperand* reg = UseFixed(value, eax);
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LOperand* temp = FixedTemp(xmm1);
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
+    return AssignEnvironment(DefineFixed(result, eax));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(
+      UseFixed(instr->value(), eax), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    uint64_t const bits = instr->DoubleValueAsBits();
+    LOperand* temp = bits ? TempRegister() : nullptr;
+    return DefineAsRegister(new(zone()) LConstantD(temp));
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* value;
+  LOperand* temp;
+  LOperand* context = UseRegister(instr->context());
+  if (instr->NeedsWriteBarrier()) {
+    value = UseTempRegister(instr->value());
+    temp = TempRegister();
+  } else {
+    value = UseRegister(instr->value());
+    temp = NULL;
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = (instr->access().IsExternalMemory() &&
+                   instr->access().offset() == 0)
+      ? UseRegisterOrConstantAtStart(instr->object())
+      : UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
+      context, object, vector);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+                                         TempRegister())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  bool clobbers_key = ExternalArrayOpRequiresTemp(
+      instr->key()->representation(), elements_kind);
+  LOperand* key = clobbers_key
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = UseRegisterAtStart(instr->elements());
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK(
+        (instr->representation().IsInteger32() &&
+         !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+        (instr->representation().IsDouble() &&
+         (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadKeyedGeneric* result =
+      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+
+  // Determine if we need a byte register in this case for the value.
+  bool val_is_fixed_register =
+      elements_kind == UINT8_ELEMENTS ||
+      elements_kind == INT8_ELEMENTS ||
+      elements_kind == UINT8_CLAMPED_ELEMENTS;
+  if (val_is_fixed_register) {
+    return UseFixed(instr->value(), eax);
+  }
+
+  return UseRegister(instr->value());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    DCHECK(instr->key()->representation().IsInteger32() ||
+           instr->key()->representation().IsSmi());
+
+    if (instr->value()->representation().IsDouble()) {
+      LOperand* object = UseRegisterAtStart(instr->elements());
+      LOperand* val = NULL;
+      val = UseRegisterAtStart(instr->value());
+      LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+      return new (zone()) LStoreKeyed(object, key, val, nullptr);
+    } else {
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
+      bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+      LOperand* obj = UseRegister(instr->elements());
+      LOperand* val;
+      LOperand* key;
+      if (needs_write_barrier) {
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        val = UseRegisterOrConstantAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+      return new (zone()) LStoreKeyed(obj, key, val, nullptr);
+    }
+  }
+
+  ElementsKind elements_kind = instr->elements_kind();
+  DCHECK(
+      (instr->value()->representation().IsInteger32() &&
+       !IsDoubleOrFloatElementsKind(elements_kind)) ||
+      (instr->value()->representation().IsDouble() &&
+       IsDoubleOrFloatElementsKind(elements_kind)));
+  DCHECK(instr->elements()->representation().IsExternal());
+
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  LOperand* val = GetStoreKeyedValueOperand(instr);
+  bool clobbers_key = ExternalArrayOpRequiresTemp(
+      instr->key()->representation(), elements_kind);
+  LOperand* key = clobbers_key
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result = new (zone())
+      LStoreKeyedGeneric(context, object, key, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LOperand* temp_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL,
+                                            new_map_reg, temp_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), eax);
+    LOperand* context = UseFixed(instr->context(), esi);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, eax);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool is_external_location = instr->access().IsExternalMemory() &&
+      instr->access().offset() == 0;
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map = instr->has_transition() &&
+      instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else if (is_external_location) {
+    DCHECK(!is_in_object);
+    DCHECK(!needs_write_barrier);
+    DCHECK(!needs_write_barrier_for_map);
+    obj = UseRegisterOrConstant(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map
+        ? UseRegister(instr->object())
+        : UseRegisterAtStart(instr->object());
+  }
+
+  bool can_be_constant = instr->value()->IsConstant() &&
+      HConstant::cast(instr->value())->NotInNewSpace() &&
+      !instr->field_representation().IsDouble();
+
+  LOperand* val;
+  if (instr->field_representation().IsInteger8() ||
+      instr->field_representation().IsUInteger8()) {
+    // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+    // Just force the value to be in eax and we're safe here.
+    val = UseFixed(instr->value(), eax);
+  } else if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (can_be_constant) {
+    val = UseRegisterOrConstant(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!is_in_object || needs_write_barrier ||
+                    needs_write_barrier_for_map) ? TempRegister() : NULL;
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+  LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
+  return MarkAsCall(DefineFixed(string_add, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = instr->size()->IsConstant()
+      ? UseConstant(instr->size())
+      : UseTempRegister(instr->size());
+  LOperand* temp = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(context, size, temp);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kNotEnoughSpillSlotsForOsr);
+      spill_index = 0;
+    }
+    if (spill_index == 0) {
+      // The dynamic frame alignment state overwrites the first local.
+      // The first local is saved at the end of the unoptimized frame.
+      spill_index = graph()->osr()->UnoptimizedFrameSlots();
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LCallStub* result = new(zone()) LCallStub(context);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length;
+  LOperand* index;
+  if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+    length = UseRegisterOrConstant(instr->length());
+    index = UseOrConstant(instr->index());
+  } else {
+    length = UseTempRegister(instr->length());
+    index = Use(instr->index());
+  }
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), eax);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* value = UseFixed(instr->value(), ebx);
+  LTypeof* result = new(zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  info()->MarkAsDeferredCalling();
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), esi);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object = UseFixed(instr->enumerable(), eax);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, esi), instr);
+}
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/crankshaft/ia32/lithium-ia32.h b/src/crankshaft/ia32/lithium-ia32.h
new file mode 100644
index 0000000..ab7a4b5
--- /dev/null
+++ b/src/crankshaft/ia32/lithium-ia32.h
@@ -0,0 +1,2786 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
+#define V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class RCodeVisualizer;
+}
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(AllocateBlockContext)                    \
+  V(Allocate)                                \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(LoadRoot)                                \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {
+  }
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  bool HasDoubleRegisterResult();
+  bool HasDoubleRegisterInput();
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator support.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block) : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const final { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return false;
+  }
+
+  bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LWrapReceiver(LOperand* receiver,
+                LOperand* function,
+                LOperand* temp) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+    temps_[0] = temp;
+  }
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LModByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LModI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDivByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend,
+                       int32_t divisor,
+                       LOperand* temp1,
+                       LOperand* temp2,
+                       LOperand* temp3) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* temp3() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFloor(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LMathExp(LOperand* value,
+           LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathPowHalf(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+};
+
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  LOperand* context() { return inputs_[0]; }
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
+                                LOperand* scratch) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+    temps_[0] = scratch;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+  LOperand* scratch() const { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+  Token::Value op() const { return hydrogen()->op(); }
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+  Token::Value op() const { return op_; }
+  bool can_deopt() const { return can_deopt_; }
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 1> {
+ public:
+  explicit LConstantD(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch final : public LControlInstruction<1, 1> {
+ public:
+  LBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpMapAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  static bool UseLea(HAdd* add) {
+    return !add->CheckFlag(HValue::kCanOverflow) &&
+        add->BetterLeftOperand()->UseCount() > 1;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  explicit LReturn(LOperand* value,
+                   LOperand* context,
+                   LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+  DECLARE_HYDROGEN_ACCESSOR(Return)
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+    inputs_[0] = function;
+    temps_[0] = temp;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+  bool key_is_smi() {
+    return hydrogen()->key()->representation().IsTagged();
+  }
+};
+
+
+inline static bool ExternalArrayOpRequiresTemp(
+    Representation key_representation,
+    ElementsKind elements_kind) {
+  // Operations that require the key to be divided by two to be converted into
+  // an index cannot fold the scale operation into a load and need an extra
+  // temp register to do the work.
+  return key_representation.IsSmi() &&
+         (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
+          elements_kind == UINT8_CLAMPED_ELEMENTS);
+}
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = obj;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDoubleToI(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  explicit LNumberUntagD(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change);
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
+ public:
+  LStoreNamedField(LOperand* obj,
+                   LOperand* val,
+                   LOperand* temp,
+                   LOperand* temp_map) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+    temps_[0] = temp;
+    temps_[1] = temp_map;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp_map() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
+              LOperand* backing_store_owner) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* new_map_temp,
+                          LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+  LOperand* temp() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view, LOperand* scratch) {
+    inputs_[0] = view;
+    temps_[0] = scratch;
+  }
+
+  LOperand* view() { return inputs_[0]; }
+  LOperand* scratch() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LCheckInstanceType(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
+    inputs_[0] = value;
+    temps_[0] = temp_xmm;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp_xmm() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph),
+        num_double_slots_(0) { }
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+
+  int num_double_slots() const { return num_double_slots_; }
+
+ private:
+  int num_double_slots_;
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           XMMRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a fixed register or a constant operand.
+  MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
+                                               Register fixed_register);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  XMMRegister reg);
+  // Assigns an environment to an instruction.  An instruction which can
+  // deoptimize must have an environment.
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  // Assigns a pointer map to an instruction.  An instruction which can
+  // trigger a GC or a lazy deoptimization must have a pointer map.
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
+
+  // Marks a call for the register allocator.  Assigns a pointer map to
+  // support GC and lazy deoptimization.  Assigns an environment to support
+  // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+
+  LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_IA32_LITHIUM_IA32_H_
diff --git a/src/crankshaft/lithium-allocator-inl.h b/src/crankshaft/lithium-allocator-inl.h
new file mode 100644
index 0000000..22611b5
--- /dev/null
+++ b/src/crankshaft/lithium-allocator-inl.h
@@ -0,0 +1,60 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
+#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
+
+#include "src/crankshaft/lithium-allocator.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-ia32.h" // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-x64.h" // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/crankshaft/arm64/lithium-arm64.h" // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/crankshaft/arm/lithium-arm.h" // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/crankshaft/ppc/lithium-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/crankshaft/mips/lithium-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/crankshaft/mips64/lithium-mips64.h" // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-x87.h" // NOLINT
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+bool LAllocator::IsGapAt(int index) { return chunk_->IsGapAt(index); }
+
+
+LInstruction* LAllocator::InstructionAt(int index) {
+  return chunk_->instructions()->at(index);
+}
+
+
+LGap* LAllocator::GapAt(int index) {
+  return chunk_->GetGapAt(index);
+}
+
+
+void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
+  if (range->Kind() == DOUBLE_REGISTERS) {
+    assigned_double_registers_->Add(reg);
+  } else {
+    DCHECK(range->Kind() == GENERAL_REGISTERS);
+    assigned_registers_->Add(reg);
+  }
+  range->set_assigned_register(reg, chunk()->zone());
+}
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_INL_H_
diff --git a/src/crankshaft/lithium-allocator.cc b/src/crankshaft/lithium-allocator.cc
new file mode 100644
index 0000000..5d05292
--- /dev/null
+++ b/src/crankshaft/lithium-allocator.cc
@@ -0,0 +1,2198 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/lithium-allocator.h"
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/lithium-allocator-inl.h"
+#include "src/register-configuration.h"
+#include "src/string-stream.h"
+
+namespace v8 {
+namespace internal {
+
+static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+  return a.Value() < b.Value() ? a : b;
+}
+
+
+static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+  return a.Value() > b.Value() ? a : b;
+}
+
+
+UsePosition::UsePosition(LifetimePosition pos,
+                         LOperand* operand,
+                         LOperand* hint)
+    : operand_(operand),
+      hint_(hint),
+      pos_(pos),
+      next_(NULL),
+      requires_reg_(false),
+      register_beneficial_(true) {
+  if (operand_ != NULL && operand_->IsUnallocated()) {
+    LUnallocated* unalloc = LUnallocated::cast(operand_);
+    requires_reg_ = unalloc->HasRegisterPolicy() ||
+        unalloc->HasDoubleRegisterPolicy();
+    register_beneficial_ = !unalloc->HasAnyPolicy();
+  }
+  DCHECK(pos_.IsValid());
+}
+
+
+bool UsePosition::HasHint() const {
+  return hint_ != NULL && !hint_->IsUnallocated();
+}
+
+
+bool UsePosition::RequiresRegister() const {
+  return requires_reg_;
+}
+
+
+bool UsePosition::RegisterIsBeneficial() const {
+  return register_beneficial_;
+}
+
+
+void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
+  DCHECK(Contains(pos) && pos.Value() != start().Value());
+  UseInterval* after = new(zone) UseInterval(pos, end_);
+  after->next_ = next_;
+  next_ = after;
+  end_ = pos;
+}
+
+
+#ifdef DEBUG
+
+
+void LiveRange::Verify() const {
+  UsePosition* cur = first_pos_;
+  while (cur != NULL) {
+    DCHECK(Start().Value() <= cur->pos().Value() &&
+           cur->pos().Value() <= End().Value());
+    cur = cur->next();
+  }
+}
+
+
+bool LiveRange::HasOverlap(UseInterval* target) const {
+  UseInterval* current_interval = first_interval_;
+  while (current_interval != NULL) {
+    // Intervals overlap if the start of one is contained in the other.
+    if (current_interval->Contains(target->start()) ||
+        target->Contains(current_interval->start())) {
+      return true;
+    }
+    current_interval = current_interval->next();
+  }
+  return false;
+}
+
+
+#endif
+
+
+LiveRange::LiveRange(int id, Zone* zone)
+    : id_(id),
+      spilled_(false),
+      kind_(UNALLOCATED_REGISTERS),
+      assigned_register_(kInvalidAssignment),
+      last_interval_(NULL),
+      first_interval_(NULL),
+      first_pos_(NULL),
+      parent_(NULL),
+      next_(NULL),
+      current_interval_(NULL),
+      last_processed_use_(NULL),
+      current_hint_operand_(NULL),
+      spill_operand_(new (zone) LOperand()),
+      spill_start_index_(kMaxInt) {}
+
+
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
+  DCHECK(!HasRegisterAssigned() && !IsSpilled());
+  assigned_register_ = reg;
+  ConvertOperands(zone);
+}
+
+
+void LiveRange::MakeSpilled(Zone* zone) {
+  DCHECK(!IsSpilled());
+  DCHECK(TopLevel()->HasAllocatedSpillOperand());
+  spilled_ = true;
+  assigned_register_ = kInvalidAssignment;
+  ConvertOperands(zone);
+}
+
+
+bool LiveRange::HasAllocatedSpillOperand() const {
+  DCHECK(spill_operand_ != NULL);
+  return !spill_operand_->IsIgnored();
+}
+
+
+void LiveRange::SetSpillOperand(LOperand* operand) {
+  DCHECK(!operand->IsUnallocated());
+  DCHECK(spill_operand_ != NULL);
+  DCHECK(spill_operand_->IsIgnored());
+  spill_operand_->ConvertTo(operand->kind(), operand->index());
+}
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+  UsePosition* use_pos = last_processed_use_;
+  if (use_pos == NULL) use_pos = first_pos();
+  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+    use_pos = use_pos->next();
+  }
+  last_processed_use_ = use_pos;
+  return use_pos;
+}
+
+
+UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = first_pos();
+  UsePosition* prev = NULL;
+  while (pos != NULL && pos->pos().Value() < start.Value()) {
+    if (pos->RegisterIsBeneficial()) prev = pos;
+    pos = pos->next();
+  }
+  return prev;
+}
+
+
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RequiresRegister()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+bool LiveRange::CanBeSpilled(LifetimePosition pos) {
+  // We cannot spill a live range that has a use requiring a register
+  // at the current or the immediate next position.
+  UsePosition* use_pos = NextRegisterPosition(pos);
+  if (use_pos == NULL) return true;
+  return
+      use_pos->pos().Value() > pos.NextInstruction().InstructionEnd().Value();
+}
+
+
+LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
+  LOperand* op = NULL;
+  if (HasRegisterAssigned()) {
+    DCHECK(!IsSpilled());
+    switch (Kind()) {
+      case GENERAL_REGISTERS:
+        op = LRegister::Create(assigned_register(), zone);
+        break;
+      case DOUBLE_REGISTERS:
+        op = LDoubleRegister::Create(assigned_register(), zone);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (IsSpilled()) {
+    DCHECK(!HasRegisterAssigned());
+    op = TopLevel()->GetSpillOperand();
+    DCHECK(!op->IsUnallocated());
+  } else {
+    LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
+    unalloc->set_virtual_register(id_);
+    op = unalloc;
+  }
+  return op;
+}
+
+
+UseInterval* LiveRange::FirstSearchIntervalForPosition(
+    LifetimePosition position) const {
+  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_->start().Value() > position.Value()) {
+    current_interval_ = NULL;
+    return first_interval_;
+  }
+  return current_interval_;
+}
+
+
+void LiveRange::AdvanceLastProcessedMarker(
+    UseInterval* to_start_of, LifetimePosition but_not_past) const {
+  if (to_start_of == NULL) return;
+  if (to_start_of->start().Value() > but_not_past.Value()) return;
+  LifetimePosition start =
+      current_interval_ == NULL ? LifetimePosition::Invalid()
+                                : current_interval_->start();
+  if (to_start_of->start().Value() > start.Value()) {
+    current_interval_ = to_start_of;
+  }
+}
+
+
+void LiveRange::SplitAt(LifetimePosition position,
+                        LiveRange* result,
+                        Zone* zone) {
+  DCHECK(Start().Value() < position.Value());
+  DCHECK(result->IsEmpty());
+  // Find the last interval that ends before the position. If the
+  // position is contained in one of the intervals in the chain, we
+  // split that interval and use the first part.
+  UseInterval* current = FirstSearchIntervalForPosition(position);
+
+  // If the split position coincides with the beginning of a use interval
+  // we need to split use positons in a special way.
+  bool split_at_start = false;
+
+  if (current->start().Value() == position.Value()) {
+    // When splitting at start we need to locate the previous use interval.
+    current = first_interval_;
+  }
+
+  while (current != NULL) {
+    if (current->Contains(position)) {
+      current->SplitAt(position, zone);
+      break;
+    }
+    UseInterval* next = current->next();
+    if (next->start().Value() >= position.Value()) {
+      split_at_start = (next->start().Value() == position.Value());
+      break;
+    }
+    current = next;
+  }
+
+  // Partition original use intervals to the two live ranges.
+  UseInterval* before = current;
+  UseInterval* after = before->next();
+  result->last_interval_ = (last_interval_ == before)
+      ? after            // Only interval in the range after split.
+      : last_interval_;  // Last interval of the original range.
+  result->first_interval_ = after;
+  last_interval_ = before;
+
+  // Find the last use position before the split and the first use
+  // position after it.
+  UsePosition* use_after = first_pos_;
+  UsePosition* use_before = NULL;
+  if (split_at_start) {
+    // The split position coincides with the beginning of a use interval (the
+    // end of a lifetime hole). Use at this position should be attributed to
+    // the split child because split child owns use interval covering it.
+    while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  } else {
+    while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  }
+
+  // Partition original use positions to the two live ranges.
+  if (use_before != NULL) {
+    use_before->next_ = NULL;
+  } else {
+    first_pos_ = NULL;
+  }
+  result->first_pos_ = use_after;
+
+  // Discard cached iteration state. It might be pointing
+  // to the use that no longer belongs to this live range.
+  last_processed_use_ = NULL;
+  current_interval_ = NULL;
+
+  // Link the new live range in the chain before any of the other
+  // ranges linked from the range before the split.
+  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->kind_ = result->parent_->kind_;
+  result->next_ = next_;
+  next_ = result;
+
+#ifdef DEBUG
+  Verify();
+  result->Verify();
+#endif
+}
+
+
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions.  This is needed for the correctness of the register
+// allocation algorithm.  If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used.  This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+  LifetimePosition start = Start();
+  LifetimePosition other_start = other->Start();
+  if (start.Value() == other_start.Value()) {
+    UsePosition* pos = first_pos();
+    if (pos == NULL) return false;
+    UsePosition* other_pos = other->first_pos();
+    if (other_pos == NULL) return true;
+    return pos->pos().Value() < other_pos->pos().Value();
+  }
+  return start.Value() < other_start.Value();
+}
+
+
+void LiveRange::ShortenTo(LifetimePosition start) {
+  LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
+  DCHECK(first_interval_ != NULL);
+  DCHECK(first_interval_->start().Value() <= start.Value());
+  DCHECK(start.Value() < first_interval_->end().Value());
+  first_interval_->set_start(start);
+}
+
+
+void LiveRange::EnsureInterval(LifetimePosition start,
+                               LifetimePosition end,
+                               Zone* zone) {
+  LAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
+                         id_,
+                         start.Value(),
+                         end.Value());
+  LifetimePosition new_end = end;
+  while (first_interval_ != NULL &&
+         first_interval_->start().Value() <= end.Value()) {
+    if (first_interval_->end().Value() > end.Value()) {
+      new_end = first_interval_->end();
+    }
+    first_interval_ = first_interval_->next();
+  }
+
+  UseInterval* new_interval = new(zone) UseInterval(start, new_end);
+  new_interval->next_ = first_interval_;
+  first_interval_ = new_interval;
+  if (new_interval->next() == NULL) {
+    last_interval_ = new_interval;
+  }
+}
+
+
+void LiveRange::AddUseInterval(LifetimePosition start,
+                               LifetimePosition end,
+                               Zone* zone) {
+  LAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n",
+                         id_,
+                         start.Value(),
+                         end.Value());
+  if (first_interval_ == NULL) {
+    UseInterval* interval = new(zone) UseInterval(start, end);
+    first_interval_ = interval;
+    last_interval_ = interval;
+  } else {
+    if (end.Value() == first_interval_->start().Value()) {
+      first_interval_->set_start(start);
+    } else if (end.Value() < first_interval_->start().Value()) {
+      UseInterval* interval = new(zone) UseInterval(start, end);
+      interval->set_next(first_interval_);
+      first_interval_ = interval;
+    } else {
+      // Order of instruction's processing (see ProcessInstructions) guarantees
+      // that each new use interval either precedes or intersects with
+      // last added interval.
+      DCHECK(start.Value() < first_interval_->end().Value());
+      first_interval_->start_ = Min(start, first_interval_->start_);
+      first_interval_->end_ = Max(end, first_interval_->end_);
+    }
+  }
+}
+
+
+void LiveRange::AddUsePosition(LifetimePosition pos,
+                               LOperand* operand,
+                               LOperand* hint,
+                               Zone* zone) {
+  LAllocator::TraceAlloc("Add to live range %d use position %d\n",
+                         id_,
+                         pos.Value());
+  UsePosition* use_pos = new(zone) UsePosition(pos, operand, hint);
+  UsePosition* prev_hint = NULL;
+  UsePosition* prev = NULL;
+  UsePosition* current = first_pos_;
+  while (current != NULL && current->pos().Value() < pos.Value()) {
+    prev_hint = current->HasHint() ? current : prev_hint;
+    prev = current;
+    current = current->next();
+  }
+
+  if (prev == NULL) {
+    use_pos->set_next(first_pos_);
+    first_pos_ = use_pos;
+  } else {
+    use_pos->next_ = prev->next_;
+    prev->next_ = use_pos;
+  }
+
+  if (prev_hint == NULL && use_pos->HasHint()) {
+    current_hint_operand_ = hint;
+  }
+}
+
+
+void LiveRange::ConvertOperands(Zone* zone) {
+  LOperand* op = CreateAssignedOperand(zone);
+  UsePosition* use_pos = first_pos();
+  while (use_pos != NULL) {
+    DCHECK(Start().Value() <= use_pos->pos().Value() &&
+           use_pos->pos().Value() <= End().Value());
+
+    if (use_pos->HasOperand()) {
+      DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
+             !use_pos->RequiresRegister());
+      use_pos->operand()->ConvertTo(op->kind(), op->index());
+    }
+    use_pos = use_pos->next();
+  }
+}
+
+
+bool LiveRange::CanCover(LifetimePosition position) const {
+  if (IsEmpty()) return false;
+  return Start().Value() <= position.Value() &&
+         position.Value() < End().Value();
+}
+
+
+bool LiveRange::Covers(LifetimePosition position) {
+  if (!CanCover(position)) return false;
+  UseInterval* start_search = FirstSearchIntervalForPosition(position);
+  for (UseInterval* interval = start_search;
+       interval != NULL;
+       interval = interval->next()) {
+    DCHECK(interval->next() == NULL ||
+           interval->next()->start().Value() >= interval->start().Value());
+    AdvanceLastProcessedMarker(interval, position);
+    if (interval->Contains(position)) return true;
+    if (interval->start().Value() > position.Value()) return false;
+  }
+  return false;
+}
+
+
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
+  UseInterval* b = other->first_interval();
+  if (b == NULL) return LifetimePosition::Invalid();
+  LifetimePosition advance_last_processed_up_to = b->start();
+  UseInterval* a = FirstSearchIntervalForPosition(b->start());
+  while (a != NULL && b != NULL) {
+    if (a->start().Value() > other->End().Value()) break;
+    if (b->start().Value() > End().Value()) break;
+    LifetimePosition cur_intersection = a->Intersect(b);
+    if (cur_intersection.IsValid()) {
+      return cur_intersection;
+    }
+    if (a->start().Value() < b->start().Value()) {
+      a = a->next();
+      if (a == NULL || a->start().Value() > other->End().Value()) break;
+      AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
+    } else {
+      b = b->next();
+    }
+  }
+  return LifetimePosition::Invalid();
+}
+
+
+LAllocator::LAllocator(int num_values, HGraph* graph)
+    : chunk_(NULL),
+      live_in_sets_(graph->blocks()->length(), zone()),
+      live_ranges_(num_values * 2, zone()),
+      fixed_live_ranges_(NULL),
+      fixed_double_live_ranges_(NULL),
+      unhandled_live_ranges_(num_values * 2, zone()),
+      active_live_ranges_(8, zone()),
+      inactive_live_ranges_(8, zone()),
+      reusable_slots_(8, zone()),
+      next_virtual_register_(num_values),
+      first_artificial_register_(num_values),
+      mode_(UNALLOCATED_REGISTERS),
+      num_registers_(-1),
+      graph_(graph),
+      has_osr_entry_(false),
+      allocation_ok_(true) {}
+
+
+void LAllocator::InitializeLivenessAnalysis() {
+  // Initialize the live_in sets for each block to NULL.
+  int block_count = graph_->blocks()->length();
+  live_in_sets_.Initialize(block_count, zone());
+  live_in_sets_.AddBlock(NULL, block_count, zone());
+}
+
+
+BitVector* LAllocator::ComputeLiveOut(HBasicBlock* block) {
+  // Compute live out for the given block, except not including backward
+  // successor edges.
+  BitVector* live_out = new(zone()) BitVector(next_virtual_register_, zone());
+
+  // Process all successor blocks.
+  for (HSuccessorIterator it(block->end()); !it.Done(); it.Advance()) {
+    // Add values live on entry to the successor. Note the successor's
+    // live_in will not be computed yet for backwards edges.
+    HBasicBlock* successor = it.Current();
+    BitVector* live_in = live_in_sets_[successor->block_id()];
+    if (live_in != NULL) live_out->Union(*live_in);
+
+    // All phi input operands corresponding to this successor edge are live
+    // out from this block.
+    int index = successor->PredecessorIndexOf(block);
+    const ZoneList<HPhi*>* phis = successor->phis();
+    for (int i = 0; i < phis->length(); ++i) {
+      HPhi* phi = phis->at(i);
+      if (!phi->OperandAt(index)->IsConstant()) {
+        live_out->Add(phi->OperandAt(index)->id());
+      }
+    }
+  }
+
+  return live_out;
+}
+
+
+void LAllocator::AddInitialIntervals(HBasicBlock* block,
+                                     BitVector* live_out) {
+  // Add an interval that includes the entire block to the live range for
+  // each live_out value.
+  LifetimePosition start = LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+  LifetimePosition end = LifetimePosition::FromInstructionIndex(
+      block->last_instruction_index()).NextInstruction();
+  BitVector::Iterator iterator(live_out);
+  while (!iterator.Done()) {
+    int operand_index = iterator.Current();
+    LiveRange* range = LiveRangeFor(operand_index);
+    range->AddUseInterval(start, end, zone());
+    iterator.Advance();
+  }
+}
+
+
+int LAllocator::FixedDoubleLiveRangeID(int index) {
+  return -index - 1 - Register::kNumRegisters;
+}
+
+
+LOperand* LAllocator::AllocateFixed(LUnallocated* operand,
+                                    int pos,
+                                    bool is_tagged) {
+  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+  DCHECK(operand->HasFixedPolicy());
+  if (operand->HasFixedSlotPolicy()) {
+    operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
+  } else if (operand->HasFixedRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(LOperand::REGISTER, reg_index);
+  } else if (operand->HasFixedDoubleRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(LOperand::DOUBLE_REGISTER, reg_index);
+  } else {
+    UNREACHABLE();
+  }
+  if (is_tagged) {
+    TraceAlloc("Fixed reg is tagged at %d\n", pos);
+    LInstruction* instr = InstructionAt(pos);
+    if (instr->HasPointerMap()) {
+      instr->pointer_map()->RecordPointer(operand, chunk()->zone());
+    }
+  }
+  return operand;
+}
+
+
+LiveRange* LAllocator::FixedLiveRangeFor(int index) {
+  DCHECK(index < Register::kNumRegisters);
+  LiveRange* result = fixed_live_ranges_[index];
+  if (result == NULL) {
+    result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
+    DCHECK(result->IsFixed());
+    result->kind_ = GENERAL_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
+  DCHECK(index < DoubleRegister::kMaxNumRegisters);
+  LiveRange* result = fixed_double_live_ranges_[index];
+  if (result == NULL) {
+    result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
+                                   chunk()->zone());
+    DCHECK(result->IsFixed());
+    result->kind_ = DOUBLE_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_double_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* LAllocator::LiveRangeFor(int index) {
+  if (index >= live_ranges_.length()) {
+    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
+  }
+  LiveRange* result = live_ranges_[index];
+  if (result == NULL) {
+    result = new(zone()) LiveRange(index, chunk()->zone());
+    live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LGap* LAllocator::GetLastGap(HBasicBlock* block) {
+  int last_instruction = block->last_instruction_index();
+  int index = chunk_->NearestGapPos(last_instruction);
+  return GapAt(index);
+}
+
+
+HPhi* LAllocator::LookupPhi(LOperand* operand) const {
+  if (!operand->IsUnallocated()) return NULL;
+  int index = LUnallocated::cast(operand)->virtual_register();
+  HValue* instr = graph_->LookupValue(index);
+  if (instr != NULL && instr->IsPhi()) {
+    return HPhi::cast(instr);
+  }
+  return NULL;
+}
+
+
+LiveRange* LAllocator::LiveRangeFor(LOperand* operand) {
+  if (operand->IsUnallocated()) {
+    return LiveRangeFor(LUnallocated::cast(operand)->virtual_register());
+  } else if (operand->IsRegister()) {
+    return FixedLiveRangeFor(operand->index());
+  } else if (operand->IsDoubleRegister()) {
+    return FixedDoubleLiveRangeFor(operand->index());
+  } else {
+    return NULL;
+  }
+}
+
+
+void LAllocator::Define(LifetimePosition position,
+                        LOperand* operand,
+                        LOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+
+  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+    // Can happen if there is a definition without use.
+    range->AddUseInterval(position, position.NextInstruction(), zone());
+    range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
+  } else {
+    range->ShortenTo(position);
+  }
+
+  if (operand->IsUnallocated()) {
+    LUnallocated* unalloc_operand = LUnallocated::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+}
+
+
+void LAllocator::Use(LifetimePosition block_start,
+                     LifetimePosition position,
+                     LOperand* operand,
+                     LOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+  if (operand->IsUnallocated()) {
+    LUnallocated* unalloc_operand = LUnallocated::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+  range->AddUseInterval(block_start, position, zone());
+}
+
+
+void LAllocator::AddConstraintsGapMove(int index,
+                                       LOperand* from,
+                                       LOperand* to) {
+  LGap* gap = GapAt(index);
+  LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+                                                     chunk()->zone());
+  if (from->IsUnallocated()) {
+    const ZoneList<LMoveOperands>* move_operands = move->move_operands();
+    for (int i = 0; i < move_operands->length(); ++i) {
+      LMoveOperands cur = move_operands->at(i);
+      LOperand* cur_to = cur.destination();
+      if (cur_to->IsUnallocated()) {
+        if (LUnallocated::cast(cur_to)->virtual_register() ==
+            LUnallocated::cast(from)->virtual_register()) {
+          move->AddMove(cur.source(), to, chunk()->zone());
+          return;
+        }
+      }
+    }
+  }
+  move->AddMove(from, to, chunk()->zone());
+}
+
+
+void LAllocator::MeetRegisterConstraints(HBasicBlock* block) {
+  int start = block->first_instruction_index();
+  int end = block->last_instruction_index();
+  if (start == -1) return;
+  for (int i = start; i <= end; ++i) {
+    if (IsGapAt(i)) {
+      LInstruction* instr = NULL;
+      LInstruction* prev_instr = NULL;
+      if (i < end) instr = InstructionAt(i + 1);
+      if (i > start) prev_instr = InstructionAt(i - 1);
+      MeetConstraintsBetween(prev_instr, instr, i);
+      if (!AllocationOk()) return;
+    }
+  }
+}
+
+
+void LAllocator::MeetConstraintsBetween(LInstruction* first,
+                                        LInstruction* second,
+                                        int gap_index) {
+  // Handle fixed temporaries.
+  if (first != NULL) {
+    for (TempIterator it(first); !it.Done(); it.Advance()) {
+      LUnallocated* temp = LUnallocated::cast(it.Current());
+      if (temp->HasFixedPolicy()) {
+        AllocateFixed(temp, gap_index - 1, false);
+      }
+    }
+  }
+
+  // Handle fixed output operand.
+  if (first != NULL && first->Output() != NULL) {
+    LUnallocated* first_output = LUnallocated::cast(first->Output());
+    LiveRange* range = LiveRangeFor(first_output->virtual_register());
+    bool assigned = false;
+    if (first_output->HasFixedPolicy()) {
+      LUnallocated* output_copy = first_output->CopyUnconstrained(
+          chunk()->zone());
+      bool is_tagged = HasTaggedValue(first_output->virtual_register());
+      AllocateFixed(first_output, gap_index, is_tagged);
+
+      // This value is produced on the stack, we never need to spill it.
+      if (first_output->IsStackSlot()) {
+        range->SetSpillOperand(first_output);
+        range->SetSpillStartIndex(gap_index - 1);
+        assigned = true;
+      }
+      chunk_->AddGapMove(gap_index, first_output, output_copy);
+    }
+
+    if (!assigned) {
+      range->SetSpillStartIndex(gap_index);
+
+      // This move to spill operand is not a real use. Liveness analysis
+      // and splitting of live ranges do not account for it.
+      // Thus it should be inserted to a lifetime position corresponding to
+      // the instruction end.
+      LGap* gap = GapAt(gap_index);
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::BEFORE,
+                                                         chunk()->zone());
+      move->AddMove(first_output, range->GetSpillOperand(),
+                    chunk()->zone());
+    }
+  }
+
+  // Handle fixed input operands of second instruction.
+  if (second != NULL) {
+    for (UseIterator it(second); !it.Done(); it.Advance()) {
+      LUnallocated* cur_input = LUnallocated::cast(it.Current());
+      if (cur_input->HasFixedPolicy()) {
+        LUnallocated* input_copy = cur_input->CopyUnconstrained(
+            chunk()->zone());
+        bool is_tagged = HasTaggedValue(cur_input->virtual_register());
+        AllocateFixed(cur_input, gap_index + 1, is_tagged);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      } else if (cur_input->HasWritableRegisterPolicy()) {
+        // The live range of writable input registers always goes until the end
+        // of the instruction.
+        DCHECK(!cur_input->IsUsedAtStart());
+
+        LUnallocated* input_copy = cur_input->CopyUnconstrained(
+            chunk()->zone());
+        int vreg = GetVirtualRegister();
+        if (!AllocationOk()) return;
+        cur_input->set_virtual_register(vreg);
+
+        if (RequiredRegisterKind(input_copy->virtual_register()) ==
+            DOUBLE_REGISTERS) {
+          double_artificial_registers_.Add(
+              cur_input->virtual_register() - first_artificial_register_,
+              zone());
+        }
+
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      }
+    }
+  }
+
+  // Handle "output same as input" for second instruction.
+  if (second != NULL && second->Output() != NULL) {
+    LUnallocated* second_output = LUnallocated::cast(second->Output());
+    if (second_output->HasSameAsInputPolicy()) {
+      LUnallocated* cur_input = LUnallocated::cast(second->FirstInput());
+      int output_vreg = second_output->virtual_register();
+      int input_vreg = cur_input->virtual_register();
+
+      LUnallocated* input_copy = cur_input->CopyUnconstrained(
+          chunk()->zone());
+      cur_input->set_virtual_register(second_output->virtual_register());
+      AddConstraintsGapMove(gap_index, input_copy, cur_input);
+
+      if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+        int index = gap_index + 1;
+        LInstruction* instr = InstructionAt(index);
+        if (instr->HasPointerMap()) {
+          instr->pointer_map()->RecordPointer(input_copy, chunk()->zone());
+        }
+      } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+        // The input is assumed to immediately have a tagged representation,
+        // before the pointer map can be used. I.e. the pointer map at the
+        // instruction will include the output operand (whose value at the
+        // beginning of the instruction is equal to the input operand). If
+        // this is not desired, then the pointer map at this instruction needs
+        // to be adjusted manually.
+      }
+    }
+  }
+}
+
+
+void LAllocator::ProcessInstructions(HBasicBlock* block, BitVector* live) {
+  int block_start = block->first_instruction_index();
+  int index = block->last_instruction_index();
+
+  LifetimePosition block_start_position =
+      LifetimePosition::FromInstructionIndex(block_start);
+
+  while (index >= block_start) {
+    LifetimePosition curr_position =
+        LifetimePosition::FromInstructionIndex(index);
+
+    if (IsGapAt(index)) {
+      // We have a gap at this position.
+      LGap* gap = GapAt(index);
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+                                                         chunk()->zone());
+      const ZoneList<LMoveOperands>* move_operands = move->move_operands();
+      for (int i = 0; i < move_operands->length(); ++i) {
+        LMoveOperands* cur = &move_operands->at(i);
+        if (cur->IsIgnored()) continue;
+        LOperand* from = cur->source();
+        LOperand* to = cur->destination();
+        HPhi* phi = LookupPhi(to);
+        LOperand* hint = to;
+        if (phi != NULL) {
+          // This is a phi resolving move.
+          if (!phi->block()->IsLoopHeader()) {
+            hint = LiveRangeFor(phi->id())->current_hint_operand();
+          }
+        } else {
+          if (to->IsUnallocated()) {
+            if (live->Contains(LUnallocated::cast(to)->virtual_register())) {
+              Define(curr_position, to, from);
+              live->Remove(LUnallocated::cast(to)->virtual_register());
+            } else {
+              cur->Eliminate();
+              continue;
+            }
+          } else {
+            Define(curr_position, to, from);
+          }
+        }
+        Use(block_start_position, curr_position, from, hint);
+        if (from->IsUnallocated()) {
+          live->Add(LUnallocated::cast(from)->virtual_register());
+        }
+      }
+    } else {
+      DCHECK(!IsGapAt(index));
+      LInstruction* instr = InstructionAt(index);
+
+      if (instr != NULL) {
+        LOperand* output = instr->Output();
+        if (output != NULL) {
+          if (output->IsUnallocated()) {
+            live->Remove(LUnallocated::cast(output)->virtual_register());
+          }
+          Define(curr_position, output, NULL);
+        }
+
+        if (instr->ClobbersRegisters()) {
+          for (int i = 0; i < Register::kNumRegisters; ++i) {
+            if (Register::from_code(i).IsAllocatable()) {
+              if (output == NULL || !output->IsRegister() ||
+                  output->index() != i) {
+                LiveRange* range = FixedLiveRangeFor(i);
+                range->AddUseInterval(curr_position,
+                                      curr_position.InstructionEnd(), zone());
+              }
+            }
+          }
+        }
+
+        if (instr->ClobbersDoubleRegisters(isolate())) {
+          for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+            if (DoubleRegister::from_code(i).IsAllocatable()) {
+              if (output == NULL || !output->IsDoubleRegister() ||
+                  output->index() != i) {
+                LiveRange* range = FixedDoubleLiveRangeFor(i);
+                range->AddUseInterval(curr_position,
+                                      curr_position.InstructionEnd(), zone());
+              }
+            }
+          }
+        }
+
+        for (UseIterator it(instr); !it.Done(); it.Advance()) {
+          LOperand* input = it.Current();
+
+          LifetimePosition use_pos;
+          if (input->IsUnallocated() &&
+              LUnallocated::cast(input)->IsUsedAtStart()) {
+            use_pos = curr_position;
+          } else {
+            use_pos = curr_position.InstructionEnd();
+          }
+
+          Use(block_start_position, use_pos, input, NULL);
+          if (input->IsUnallocated()) {
+            live->Add(LUnallocated::cast(input)->virtual_register());
+          }
+        }
+
+        for (TempIterator it(instr); !it.Done(); it.Advance()) {
+          LOperand* temp = it.Current();
+          if (instr->ClobbersTemps()) {
+            if (temp->IsRegister()) continue;
+            if (temp->IsUnallocated()) {
+              LUnallocated* temp_unalloc = LUnallocated::cast(temp);
+              if (temp_unalloc->HasFixedPolicy()) {
+                continue;
+              }
+            }
+          }
+          Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
+          Define(curr_position, temp, NULL);
+
+          if (temp->IsUnallocated()) {
+            LUnallocated* temp_unalloc = LUnallocated::cast(temp);
+            if (temp_unalloc->HasDoubleRegisterPolicy()) {
+              double_artificial_registers_.Add(
+                  temp_unalloc->virtual_register() - first_artificial_register_,
+                  zone());
+            }
+          }
+        }
+      }
+    }
+
+    index = index - 1;
+  }
+}
+
+
+void LAllocator::ResolvePhis(HBasicBlock* block) {
+  const ZoneList<HPhi*>* phis = block->phis();
+  for (int i = 0; i < phis->length(); ++i) {
+    HPhi* phi = phis->at(i);
+    LUnallocated* phi_operand =
+        new (chunk()->zone()) LUnallocated(LUnallocated::NONE);
+    phi_operand->set_virtual_register(phi->id());
+    for (int j = 0; j < phi->OperandCount(); ++j) {
+      HValue* op = phi->OperandAt(j);
+      LOperand* operand = NULL;
+      if (op->IsConstant() && op->EmitAtUses()) {
+        HConstant* constant = HConstant::cast(op);
+        operand = chunk_->DefineConstantOperand(constant);
+      } else {
+        DCHECK(!op->EmitAtUses());
+        LUnallocated* unalloc =
+            new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
+        unalloc->set_virtual_register(op->id());
+        operand = unalloc;
+      }
+      HBasicBlock* cur_block = block->predecessors()->at(j);
+      // The gap move must be added without any special processing as in
+      // the AddConstraintsGapMove.
+      chunk_->AddGapMove(cur_block->last_instruction_index() - 1,
+                         operand,
+                         phi_operand);
+
+      // We are going to insert a move before the branch instruction.
+      // Some branch instructions (e.g. loops' back edges)
+      // can potentially cause a GC so they have a pointer map.
+      // By inserting a move we essentially create a copy of a
+      // value which is invisible to PopulatePointerMaps(), because we store
+      // it into a location different from the operand of a live range
+      // covering a branch instruction.
+      // Thus we need to manually record a pointer.
+      LInstruction* branch =
+          InstructionAt(cur_block->last_instruction_index());
+      if (branch->HasPointerMap()) {
+        if (phi->representation().IsTagged() && !phi->type().IsSmi()) {
+          branch->pointer_map()->RecordPointer(phi_operand, chunk()->zone());
+        } else if (!phi->representation().IsDouble()) {
+          branch->pointer_map()->RecordUntagged(phi_operand, chunk()->zone());
+        }
+      }
+    }
+
+    LiveRange* live_range = LiveRangeFor(phi->id());
+    LLabel* label = chunk_->GetLabel(phi->block()->block_id());
+    label->GetOrCreateParallelMove(LGap::START, chunk()->zone())->
+        AddMove(phi_operand, live_range->GetSpillOperand(), chunk()->zone());
+    live_range->SetSpillStartIndex(phi->block()->first_instruction_index());
+  }
+}
+
+
+bool LAllocator::Allocate(LChunk* chunk) {
+  DCHECK(chunk_ == NULL);
+  chunk_ = static_cast<LPlatformChunk*>(chunk);
+  assigned_registers_ =
+      new (chunk->zone()) BitVector(Register::kNumRegisters, chunk->zone());
+  assigned_double_registers_ = new (chunk->zone())
+      BitVector(DoubleRegister::kMaxNumRegisters, chunk->zone());
+  MeetRegisterConstraints();
+  if (!AllocationOk()) return false;
+  ResolvePhis();
+  BuildLiveRanges();
+  AllocateGeneralRegisters();
+  if (!AllocationOk()) return false;
+  AllocateDoubleRegisters();
+  if (!AllocationOk()) return false;
+  PopulatePointerMaps();
+  ConnectRanges();
+  ResolveControlFlow();
+  return true;
+}
+
+
+void LAllocator::MeetRegisterConstraints() {
+  LAllocatorPhase phase("L_Register constraints", this);
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+  for (int i = 0; i < blocks->length(); ++i) {
+    HBasicBlock* block = blocks->at(i);
+    MeetRegisterConstraints(block);
+    if (!AllocationOk()) return;
+  }
+}
+
+
+void LAllocator::ResolvePhis() {
+  LAllocatorPhase phase("L_Resolve phis", this);
+
+  // Process the blocks in reverse order.
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    ResolvePhis(block);
+  }
+}
+
+
+void LAllocator::ResolveControlFlow(LiveRange* range,
+                                    HBasicBlock* block,
+                                    HBasicBlock* pred) {
+  LifetimePosition pred_end =
+      LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+  LifetimePosition cur_start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LiveRange* pred_cover = NULL;
+  LiveRange* cur_cover = NULL;
+  LiveRange* cur_range = range;
+  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
+    if (cur_range->CanCover(cur_start)) {
+      DCHECK(cur_cover == NULL);
+      cur_cover = cur_range;
+    }
+    if (cur_range->CanCover(pred_end)) {
+      DCHECK(pred_cover == NULL);
+      pred_cover = cur_range;
+    }
+    cur_range = cur_range->next();
+  }
+
+  if (cur_cover->IsSpilled()) return;
+  DCHECK(pred_cover != NULL && cur_cover != NULL);
+  if (pred_cover != cur_cover) {
+    LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
+    LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
+    if (!pred_op->Equals(cur_op)) {
+      LGap* gap = NULL;
+      if (block->predecessors()->length() == 1) {
+        gap = GapAt(block->first_instruction_index());
+      } else {
+        DCHECK(pred->end()->SecondSuccessor() == NULL);
+        gap = GetLastGap(pred);
+
+        // We are going to insert a move before the branch instruction.
+        // Some branch instructions (e.g. loops' back edges)
+        // can potentially cause a GC so they have a pointer map.
+        // By inserting a move we essentially create a copy of a
+        // value which is invisible to PopulatePointerMaps(), because we store
+        // it into a location different from the operand of a live range
+        // covering a branch instruction.
+        // Thus we need to manually record a pointer.
+        LInstruction* branch = InstructionAt(pred->last_instruction_index());
+        if (branch->HasPointerMap()) {
+          if (HasTaggedValue(range->id())) {
+            branch->pointer_map()->RecordPointer(cur_op, chunk()->zone());
+          } else if (!cur_op->IsDoubleStackSlot() &&
+                     !cur_op->IsDoubleRegister()) {
+            branch->pointer_map()->RemovePointer(cur_op);
+          }
+        }
+      }
+      gap->GetOrCreateParallelMove(
+          LGap::START, chunk()->zone())->AddMove(pred_op, cur_op,
+                                                 chunk()->zone());
+    }
+  }
+}
+
+
+LParallelMove* LAllocator::GetConnectingParallelMove(LifetimePosition pos) {
+  int index = pos.InstructionIndex();
+  if (IsGapAt(index)) {
+    LGap* gap = GapAt(index);
+    return gap->GetOrCreateParallelMove(
+        pos.IsInstructionStart() ? LGap::START : LGap::END, chunk()->zone());
+  }
+  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+  return GapAt(gap_pos)->GetOrCreateParallelMove(
+      (gap_pos < index) ? LGap::AFTER : LGap::BEFORE, chunk()->zone());
+}
+
+
+HBasicBlock* LAllocator::GetBlock(LifetimePosition pos) {
+  LGap* gap = GapAt(chunk_->NearestGapPos(pos.InstructionIndex()));
+  return gap->block();
+}
+
+
+void LAllocator::ConnectRanges() {
+  LAllocatorPhase phase("L_Connect ranges", this);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* first_range = live_ranges()->at(i);
+    if (first_range == NULL || first_range->parent() != NULL) continue;
+
+    LiveRange* second_range = first_range->next();
+    while (second_range != NULL) {
+      LifetimePosition pos = second_range->Start();
+
+      if (!second_range->IsSpilled()) {
+        // Add gap move if the two live ranges touch and there is no block
+        // boundary.
+        if (first_range->End().Value() == pos.Value()) {
+          bool should_insert = true;
+          if (IsBlockBoundary(pos)) {
+            should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+          }
+          if (should_insert) {
+            LParallelMove* move = GetConnectingParallelMove(pos);
+            LOperand* prev_operand = first_range->CreateAssignedOperand(
+                chunk()->zone());
+            LOperand* cur_operand = second_range->CreateAssignedOperand(
+                chunk()->zone());
+            move->AddMove(prev_operand, cur_operand,
+                          chunk()->zone());
+          }
+        }
+      }
+
+      first_range = second_range;
+      second_range = second_range->next();
+    }
+  }
+}
+
+
+bool LAllocator::CanEagerlyResolveControlFlow(HBasicBlock* block) const {
+  if (block->predecessors()->length() != 1) return false;
+  return block->predecessors()->first()->block_id() == block->block_id() - 1;
+}
+
+
+void LAllocator::ResolveControlFlow() {
+  LAllocatorPhase phase("L_Resolve control flow", this);
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+  for (int block_id = 1; block_id < blocks->length(); ++block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    if (CanEagerlyResolveControlFlow(block)) continue;
+    BitVector* live = live_in_sets_[block->block_id()];
+    BitVector::Iterator iterator(live);
+    while (!iterator.Done()) {
+      int operand_index = iterator.Current();
+      for (int i = 0; i < block->predecessors()->length(); ++i) {
+        HBasicBlock* cur = block->predecessors()->at(i);
+        LiveRange* cur_range = LiveRangeFor(operand_index);
+        ResolveControlFlow(cur_range, block, cur);
+      }
+      iterator.Advance();
+    }
+  }
+}
+
+
+void LAllocator::BuildLiveRanges() {
+  LAllocatorPhase phase("L_Build live ranges", this);
+  InitializeLivenessAnalysis();
+  // Process the blocks in reverse order.
+  const ZoneList<HBasicBlock*>* blocks = graph_->blocks();
+  for (int block_id = blocks->length() - 1; block_id >= 0; --block_id) {
+    HBasicBlock* block = blocks->at(block_id);
+    BitVector* live = ComputeLiveOut(block);
+    // Initially consider all live_out values live for the entire block. We
+    // will shorten these intervals if necessary.
+    AddInitialIntervals(block, live);
+
+    // Process the instructions in reverse order, generating and killing
+    // live values.
+    ProcessInstructions(block, live);
+    // All phi output operands are killed by this block.
+    const ZoneList<HPhi*>* phis = block->phis();
+    for (int i = 0; i < phis->length(); ++i) {
+      // The live range interval already ends at the first instruction of the
+      // block.
+      HPhi* phi = phis->at(i);
+      live->Remove(phi->id());
+
+      LOperand* hint = NULL;
+      LOperand* phi_operand = NULL;
+      LGap* gap = GetLastGap(phi->block()->predecessors()->at(0));
+      LParallelMove* move = gap->GetOrCreateParallelMove(LGap::START,
+                                                         chunk()->zone());
+      for (int j = 0; j < move->move_operands()->length(); ++j) {
+        LOperand* to = move->move_operands()->at(j).destination();
+        if (to->IsUnallocated() &&
+            LUnallocated::cast(to)->virtual_register() == phi->id()) {
+          hint = move->move_operands()->at(j).source();
+          phi_operand = to;
+          break;
+        }
+      }
+      DCHECK(hint != NULL);
+
+      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
+              block->first_instruction_index());
+      Define(block_start, phi_operand, hint);
+    }
+
+    // Now live is live_in for this block except not including values live
+    // out on backward successor edges.
+    live_in_sets_[block_id] = live;
+
+    // If this block is a loop header go back and patch up the necessary
+    // predecessor blocks.
+    if (block->IsLoopHeader()) {
+      // TODO(kmillikin): Need to be able to get the last block of the loop
+      // in the loop information. Add a live range stretching from the first
+      // loop instruction to the last for each value live on entry to the
+      // header.
+      HBasicBlock* back_edge = block->loop_information()->GetLastBackEdge();
+      BitVector::Iterator iterator(live);
+      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      LifetimePosition end = LifetimePosition::FromInstructionIndex(
+          back_edge->last_instruction_index()).NextInstruction();
+      while (!iterator.Done()) {
+        int operand_index = iterator.Current();
+        LiveRange* range = LiveRangeFor(operand_index);
+        range->EnsureInterval(start, end, zone());
+        iterator.Advance();
+      }
+
+      for (int i = block->block_id() + 1; i <= back_edge->block_id(); ++i) {
+        live_in_sets_[i]->Union(*live);
+      }
+    }
+
+#ifdef DEBUG
+    if (block_id == 0) {
+      BitVector::Iterator iterator(live);
+      bool found = false;
+      while (!iterator.Done()) {
+        found = true;
+        int operand_index = iterator.Current();
+        {
+          AllowHandleDereference allow_deref;
+          PrintF("Function: %s\n", chunk_->info()->GetDebugName().get());
+        }
+        PrintF("Value %d used before first definition!\n", operand_index);
+        LiveRange* range = LiveRangeFor(operand_index);
+        PrintF("First use is at %d\n", range->first_pos()->pos().Value());
+        iterator.Advance();
+      }
+      DCHECK(!found);
+    }
+#endif
+  }
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+    }
+  }
+}
+
+
+bool LAllocator::SafePointsAreInOrder() const {
+  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
+  int safe_point = 0;
+  for (int i = 0; i < pointer_maps->length(); ++i) {
+    LPointerMap* map = pointer_maps->at(i);
+    if (safe_point > map->lithium_position()) return false;
+    safe_point = map->lithium_position();
+  }
+  return true;
+}
+
+
+void LAllocator::PopulatePointerMaps() {
+  LAllocatorPhase phase("L_Populate pointer maps", this);
+  const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
+
+  DCHECK(SafePointsAreInOrder());
+
+  // Iterate over all safe point positions and record a pointer
+  // for all spilled live ranges at this point.
+  int first_safe_point_index = 0;
+  int last_range_start = 0;
+  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
+    LiveRange* range = live_ranges()->at(range_idx);
+    if (range == NULL) continue;
+    // Iterate over the first parts of multi-part live ranges.
+    if (range->parent() != NULL) continue;
+    // Skip non-pointer values.
+    if (!HasTaggedValue(range->id())) continue;
+    // Skip empty live ranges.
+    if (range->IsEmpty()) continue;
+
+    // Find the extent of the range and its children.
+    int start = range->Start().InstructionIndex();
+    int end = 0;
+    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
+      LifetimePosition this_end = cur->End();
+      if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
+      DCHECK(cur->Start().InstructionIndex() >= start);
+    }
+
+    // Most of the ranges are in order, but not all.  Keep an eye on when
+    // they step backwards and reset the first_safe_point_index so we don't
+    // miss any safe points.
+    if (start < last_range_start) {
+      first_safe_point_index = 0;
+    }
+    last_range_start = start;
+
+    // Step across all the safe points that are before the start of this range,
+    // recording how far we step in order to save doing this for the next range.
+    while (first_safe_point_index < pointer_maps->length()) {
+      LPointerMap* map = pointer_maps->at(first_safe_point_index);
+      int safe_point = map->lithium_position();
+      if (safe_point >= start) break;
+      first_safe_point_index++;
+    }
+
+    // Step through the safe points to see whether they are in the range.
+    for (int safe_point_index = first_safe_point_index;
+         safe_point_index < pointer_maps->length();
+         ++safe_point_index) {
+      LPointerMap* map = pointer_maps->at(safe_point_index);
+      int safe_point = map->lithium_position();
+
+      // The safe points are sorted so we can stop searching here.
+      if (safe_point - 1 > end) break;
+
+      // Advance to the next active range that covers the current
+      // safe point position.
+      LifetimePosition safe_point_pos =
+          LifetimePosition::FromInstructionIndex(safe_point);
+      LiveRange* cur = range;
+      while (cur != NULL && !cur->Covers(safe_point_pos)) {
+        cur = cur->next();
+      }
+      if (cur == NULL) continue;
+
+      // Check if the live range is spilled and the safe point is after
+      // the spill position.
+      if (range->HasAllocatedSpillOperand() &&
+          safe_point >= range->spill_start_index()) {
+        TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
+                   range->id(), range->spill_start_index(), safe_point);
+        map->RecordPointer(range->GetSpillOperand(), chunk()->zone());
+      }
+
+      if (!cur->IsSpilled()) {
+        TraceAlloc("Pointer in register for range %d (start at %d) "
+                   "at safe point %d\n",
+                   cur->id(), cur->Start().Value(), safe_point);
+        LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
+        DCHECK(!operand->IsStackSlot());
+        map->RecordPointer(operand, chunk()->zone());
+      }
+    }
+  }
+}
+
+
+void LAllocator::AllocateGeneralRegisters() {
+  LAllocatorPhase phase("L_Allocate general registers", this);
+  num_registers_ =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+          ->num_allocatable_general_registers();
+  allocatable_register_codes_ =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+          ->allocatable_general_codes();
+  mode_ = GENERAL_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void LAllocator::AllocateDoubleRegisters() {
+  LAllocatorPhase phase("L_Allocate double registers", this);
+  num_registers_ =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+          ->num_allocatable_double_registers();
+  allocatable_register_codes_ =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT)
+          ->allocatable_double_codes();
+  mode_ = DOUBLE_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void LAllocator::AllocateRegisters() {
+  DCHECK(unhandled_live_ranges_.is_empty());
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      if (live_ranges_[i]->Kind() == mode_) {
+        AddToUnhandledUnsorted(live_ranges_[i]);
+      }
+    }
+  }
+  SortUnhandled();
+  DCHECK(UnhandledIsSorted());
+
+  DCHECK(reusable_slots_.is_empty());
+  DCHECK(active_live_ranges_.is_empty());
+  DCHECK(inactive_live_ranges_.is_empty());
+
+  if (mode_ == DOUBLE_REGISTERS) {
+    for (int i = 0; i < fixed_double_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_double_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  } else {
+    DCHECK(mode_ == GENERAL_REGISTERS);
+    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  }
+
+  while (!unhandled_live_ranges_.is_empty()) {
+    DCHECK(UnhandledIsSorted());
+    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    DCHECK(UnhandledIsSorted());
+    LifetimePosition position = current->Start();
+#ifdef DEBUG
+    allocation_finger_ = position;
+#endif
+    TraceAlloc("Processing interval %d start=%d\n",
+               current->id(),
+               position.Value());
+
+    if (current->HasAllocatedSpillOperand()) {
+      TraceAlloc("Live range %d already has a spill operand\n", current->id());
+      LifetimePosition next_pos = position;
+      if (IsGapAt(next_pos.InstructionIndex())) {
+        next_pos = next_pos.NextInstruction();
+      }
+      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      // If the range already has a spill operand and it doesn't need a
+      // register immediately, split it and spill the first part of the range.
+      if (pos == NULL) {
+        Spill(current);
+        continue;
+      } else if (pos->pos().Value() >
+                 current->Start().NextInstruction().Value()) {
+        // Do not spill live range eagerly if use position that can benefit from
+        // the register is too close to the start of live range.
+        SpillBetween(current, current->Start(), pos->pos());
+        if (!AllocationOk()) return;
+        DCHECK(UnhandledIsSorted());
+        continue;
+      }
+    }
+
+    for (int i = 0; i < active_live_ranges_.length(); ++i) {
+      LiveRange* cur_active = active_live_ranges_.at(i);
+      if (cur_active->End().Value() <= position.Value()) {
+        ActiveToHandled(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      } else if (!cur_active->Covers(position)) {
+        ActiveToInactive(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      }
+    }
+
+    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+      if (cur_inactive->End().Value() <= position.Value()) {
+        InactiveToHandled(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      } else if (cur_inactive->Covers(position)) {
+        InactiveToActive(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      }
+    }
+
+    DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
+
+    bool result = TryAllocateFreeReg(current);
+    if (!AllocationOk()) return;
+
+    if (!result) AllocateBlockedReg(current);
+    if (!AllocationOk()) return;
+
+    if (current->HasRegisterAssigned()) {
+      AddToActive(current);
+    }
+  }
+
+  reusable_slots_.Rewind(0);
+  active_live_ranges_.Rewind(0);
+  inactive_live_ranges_.Rewind(0);
+}
+
+
+const char* LAllocator::RegisterName(int allocation_index) {
+  if (mode_ == GENERAL_REGISTERS) {
+    return Register::from_code(allocation_index).ToString();
+  } else {
+    return DoubleRegister::from_code(allocation_index).ToString();
+  }
+}
+
+
+void LAllocator::TraceAlloc(const char* msg, ...) {
+  if (FLAG_trace_alloc) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+bool LAllocator::HasTaggedValue(int virtual_register) const {
+  HValue* value = graph_->LookupValue(virtual_register);
+  if (value == NULL) return false;
+  return value->representation().IsTagged() && !value->type().IsSmi();
+}
+
+
+RegisterKind LAllocator::RequiredRegisterKind(int virtual_register) const {
+  if (virtual_register < first_artificial_register_) {
+    HValue* value = graph_->LookupValue(virtual_register);
+    if (value != NULL && value->representation().IsDouble()) {
+      return DOUBLE_REGISTERS;
+    }
+  } else if (double_artificial_registers_.Contains(
+      virtual_register - first_artificial_register_)) {
+    return DOUBLE_REGISTERS;
+  }
+
+  return GENERAL_REGISTERS;
+}
+
+
+void LAllocator::AddToActive(LiveRange* range) {
+  TraceAlloc("Add live range %d to active\n", range->id());
+  active_live_ranges_.Add(range, zone());
+}
+
+
+void LAllocator::AddToInactive(LiveRange* range) {
+  TraceAlloc("Add live range %d to inactive\n", range->id());
+  inactive_live_ranges_.Add(range, zone());
+}
+
+
+void LAllocator::AddToUnhandledSorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+  DCHECK(allocation_finger_.Value() <= range->Start().Value());
+  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
+    LiveRange* cur_range = unhandled_live_ranges_.at(i);
+    if (range->ShouldBeAllocatedBefore(cur_range)) {
+      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+      unhandled_live_ranges_.InsertAt(i + 1, range, zone());
+      DCHECK(UnhandledIsSorted());
+      return;
+    }
+  }
+  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+  unhandled_live_ranges_.InsertAt(0, range, zone());
+  DCHECK(UnhandledIsSorted());
+}
+
+
+void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+  unhandled_live_ranges_.Add(range, zone());
+}
+
+
+static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
+  DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
+         !(*b)->ShouldBeAllocatedBefore(*a));
+  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
+  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
+  return (*a)->id() - (*b)->id();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list.  This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void LAllocator::SortUnhandled() {
+  TraceAlloc("Sort unhandled\n");
+  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+}
+
+
+bool LAllocator::UnhandledIsSorted() {
+  int len = unhandled_live_ranges_.length();
+  for (int i = 1; i < len; i++) {
+    LiveRange* a = unhandled_live_ranges_.at(i - 1);
+    LiveRange* b = unhandled_live_ranges_.at(i);
+    if (a->Start().Value() < b->Start().Value()) return false;
+  }
+  return true;
+}
+
+
+void LAllocator::FreeSpillSlot(LiveRange* range) {
+  // Check that we are the last range.
+  if (range->next() != NULL) return;
+
+  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
+
+  int index = range->TopLevel()->GetSpillOperand()->index();
+  if (index >= 0) {
+    reusable_slots_.Add(range, zone());
+  }
+}
+
+
+LOperand* LAllocator::TryReuseSpillSlot(LiveRange* range) {
+  if (reusable_slots_.is_empty()) return NULL;
+  if (reusable_slots_.first()->End().Value() >
+      range->TopLevel()->Start().Value()) {
+    return NULL;
+  }
+  LOperand* result = reusable_slots_.first()->TopLevel()->GetSpillOperand();
+  reusable_slots_.Remove(0);
+  return result;
+}
+
+
+void LAllocator::ActiveToHandled(LiveRange* range) {
+  DCHECK(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from active to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void LAllocator::ActiveToInactive(LiveRange* range) {
+  DCHECK(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  inactive_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+}
+
+
+void LAllocator::InactiveToHandled(LiveRange* range) {
+  DCHECK(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void LAllocator::InactiveToActive(LiveRange* range) {
+  DCHECK(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  active_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+}
+
+
+bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
+  DCHECK(DoubleRegister::kMaxNumRegisters >= Register::kNumRegisters);
+
+  LifetimePosition free_until_pos[DoubleRegister::kMaxNumRegisters];
+
+  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
+    free_until_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* cur_active = active_live_ranges_.at(i);
+    free_until_pos[cur_active->assigned_register()] =
+        LifetimePosition::FromInstructionIndex(0);
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    DCHECK(cur_inactive->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection =
+        cur_inactive->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = cur_inactive->assigned_register();
+    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+  }
+
+  LOperand* hint = current->FirstHint();
+  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+    int register_index = hint->index();
+    TraceAlloc(
+        "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+        RegisterName(register_index),
+        free_until_pos[register_index].Value(),
+        current->id(),
+        current->End().Value());
+
+    // The desired register is free until the end of the current live range.
+    if (free_until_pos[register_index].Value() >= current->End().Value()) {
+      TraceAlloc("Assigning preferred reg %s to live range %d\n",
+                 RegisterName(register_index),
+                 current->id());
+      SetLiveRangeAssignedRegister(current, register_index);
+      return true;
+    }
+  }
+
+  // Find the register which stays free for the longest time.
+  int reg = allocatable_register_codes_[0];
+  for (int i = 1; i < RegisterCount(); ++i) {
+    int code = allocatable_register_codes_[i];
+    if (free_until_pos[code].Value() > free_until_pos[reg].Value()) {
+      reg = code;
+    }
+  }
+
+  LifetimePosition pos = free_until_pos[reg];
+
+  if (pos.Value() <= current->Start().Value()) {
+    // All registers are blocked.
+    return false;
+  }
+
+  if (pos.Value() < current->End().Value()) {
+    // Register reg is available at the range start but becomes blocked before
+    // the range end. Split current at position where it becomes blocked.
+    LiveRange* tail = SplitRangeAt(current, pos);
+    if (!AllocationOk()) return false;
+    AddToUnhandledSorted(tail);
+  }
+
+
+  // Register reg is available at the range start and is free until
+  // the range end.
+  DCHECK(pos.Value() >= current->End().Value());
+  TraceAlloc("Assigning free reg %s to live range %d\n",
+             RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  return true;
+}
+
+
+void LAllocator::AllocateBlockedReg(LiveRange* current) {
+  UsePosition* register_use = current->NextRegisterPosition(current->Start());
+  if (register_use == NULL) {
+    // There is no use in the current live range that requires a register.
+    // We can just spill it.
+    Spill(current);
+    return;
+  }
+
+
+  LifetimePosition use_pos[DoubleRegister::kMaxNumRegisters];
+  LifetimePosition block_pos[DoubleRegister::kMaxNumRegisters];
+
+  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
+    use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+      block_pos[cur_reg] = use_pos[cur_reg] =
+          LifetimePosition::FromInstructionIndex(0);
+    } else {
+      UsePosition* next_use = range->NextUsePositionRegisterIsBeneficial(
+          current->Start());
+      if (next_use == NULL) {
+        use_pos[cur_reg] = range->End();
+      } else {
+        use_pos[cur_reg] = next_use->pos();
+      }
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_.at(i);
+    DCHECK(range->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed()) {
+      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    } else {
+      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+    }
+  }
+
+  int reg = allocatable_register_codes_[0];
+  for (int i = 1; i < RegisterCount(); ++i) {
+    int code = allocatable_register_codes_[i];
+    if (use_pos[code].Value() > use_pos[reg].Value()) {
+      reg = code;
+    }
+  }
+
+  LifetimePosition pos = use_pos[reg];
+
+  if (pos.Value() < register_use->pos().Value()) {
+    // All registers are blocked before the first use that requires a register.
+    // Spill starting part of live range up to that use.
+    SpillBetween(current, current->Start(), register_use->pos());
+    return;
+  }
+
+  if (block_pos[reg].Value() < current->End().Value()) {
+    // Register becomes blocked before the current range end. Split before that
+    // position.
+    LiveRange* tail = SplitBetween(current,
+                                   current->Start(),
+                                   block_pos[reg].InstructionStart());
+    if (!AllocationOk()) return;
+    AddToUnhandledSorted(tail);
+  }
+
+  // Register reg is not blocked for the whole range.
+  DCHECK(block_pos[reg].Value() >= current->End().Value());
+  TraceAlloc("Assigning blocked reg %s to live range %d\n",
+             RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  // This register was not free. Thus we need to find and spill
+  // parts of active and inactive live regions that use the same register
+  // at the same lifetime positions as current.
+  SplitAndSpillIntersecting(current);
+}
+
+
+LifetimePosition LAllocator::FindOptimalSpillingPos(LiveRange* range,
+                                                    LifetimePosition pos) {
+  HBasicBlock* block = GetBlock(pos.InstructionStart());
+  HBasicBlock* loop_header =
+      block->IsLoopHeader() ? block : block->parent_loop_header();
+
+  if (loop_header == NULL) return pos;
+
+  UsePosition* prev_use =
+    range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+  while (loop_header != NULL) {
+    // We are going to spill live range inside the loop.
+    // If possible try to move spilling position backwards to loop header.
+    // This will reduce number of memory moves on the back edge.
+    LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+        loop_header->first_instruction_index());
+
+    if (range->Covers(loop_start)) {
+      if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+        // No register beneficial use inside the loop before the pos.
+        pos = loop_start;
+      }
+    }
+
+    // Try hoisting out to an outer loop.
+    loop_header = loop_header->parent_loop_header();
+  }
+
+  return pos;
+}
+
+
+void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+  DCHECK(current->HasRegisterAssigned());
+  int reg = current->assigned_register();
+  LifetimePosition split_pos = current->Start();
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    if (range->assigned_register() == reg) {
+      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+      LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+      if (next_pos == NULL) {
+        SpillAfter(range, spill_pos);
+      } else {
+        // When spilling between spill_pos and next_pos ensure that the range
+        // remains spilled at least until the start of the current live range.
+        // This guarantees that we will not introduce new unhandled ranges that
+        // start before the current range as this violates allocation invariant
+        // and will lead to an inconsistent state of active and inactive
+        // live-ranges: ranges are allocated in order of their start positions,
+        // ranges are retired from active/inactive when the start of the
+        // current live-range is larger than their end.
+        SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+      }
+      if (!AllocationOk()) return;
+      ActiveToHandled(range);
+      --i;
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_[i];
+    DCHECK(range->End().Value() > current->Start().Value());
+    if (range->assigned_register() == reg && !range->IsFixed()) {
+      LifetimePosition next_intersection = range->FirstIntersection(current);
+      if (next_intersection.IsValid()) {
+        UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+        if (next_pos == NULL) {
+          SpillAfter(range, split_pos);
+        } else {
+          next_intersection = Min(next_intersection, next_pos->pos());
+          SpillBetween(range, split_pos, next_intersection);
+        }
+        if (!AllocationOk()) return;
+        InactiveToHandled(range);
+        --i;
+      }
+    }
+  }
+}
+
+
+bool LAllocator::IsBlockBoundary(LifetimePosition pos) {
+  return pos.IsInstructionStart() &&
+      InstructionAt(pos.InstructionIndex())->IsLabel();
+}
+
+
+LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
+  DCHECK(!range->IsFixed());
+  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+
+  if (pos.Value() <= range->Start().Value()) return range;
+
+  // We can't properly connect liveranges if split occured at the end
+  // of control instruction.
+  DCHECK(pos.IsInstructionStart() ||
+         !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
+
+  int vreg = GetVirtualRegister();
+  if (!AllocationOk()) return NULL;
+  LiveRange* result = LiveRangeFor(vreg);
+  range->SplitAt(pos, result, zone());
+  return result;
+}
+
+
+LiveRange* LAllocator::SplitBetween(LiveRange* range,
+                                    LifetimePosition start,
+                                    LifetimePosition end) {
+  DCHECK(!range->IsFixed());
+  TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
+             range->id(),
+             start.Value(),
+             end.Value());
+
+  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+  DCHECK(split_pos.Value() >= start.Value());
+  return SplitRangeAt(range, split_pos);
+}
+
+
+LifetimePosition LAllocator::FindOptimalSplitPos(LifetimePosition start,
+                                                 LifetimePosition end) {
+  int start_instr = start.InstructionIndex();
+  int end_instr = end.InstructionIndex();
+  DCHECK(start_instr <= end_instr);
+
+  // We have no choice
+  if (start_instr == end_instr) return end;
+
+  HBasicBlock* start_block = GetBlock(start);
+  HBasicBlock* end_block = GetBlock(end);
+
+  if (end_block == start_block) {
+    // The interval is split in the same basic block. Split at the latest
+    // possible position.
+    return end;
+  }
+
+  HBasicBlock* block = end_block;
+  // Find header of outermost loop.
+  while (block->parent_loop_header() != NULL &&
+      block->parent_loop_header()->block_id() > start_block->block_id()) {
+    block = block->parent_loop_header();
+  }
+
+  // We did not find any suitable outer loop. Split at the latest possible
+  // position unless end_block is a loop header itself.
+  if (block == end_block && !end_block->IsLoopHeader()) return end;
+
+  return LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+}
+
+
+void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+  LiveRange* second_part = SplitRangeAt(range, pos);
+  if (!AllocationOk()) return;
+  Spill(second_part);
+}
+
+
+void LAllocator::SpillBetween(LiveRange* range,
+                              LifetimePosition start,
+                              LifetimePosition end) {
+  SpillBetweenUntil(range, start, start, end);
+}
+
+
+void LAllocator::SpillBetweenUntil(LiveRange* range,
+                                   LifetimePosition start,
+                                   LifetimePosition until,
+                                   LifetimePosition end) {
+  CHECK(start.Value() < end.Value());
+  LiveRange* second_part = SplitRangeAt(range, start);
+  if (!AllocationOk()) return;
+
+  if (second_part->Start().Value() < end.Value()) {
+    // The split result intersects with [start, end[.
+    // Split it at position between ]start+1, end[, spill the middle part
+    // and put the rest to unhandled.
+    LiveRange* third_part = SplitBetween(
+        second_part,
+        Max(second_part->Start().InstructionEnd(), until),
+        end.PrevInstruction().InstructionEnd());
+    if (!AllocationOk()) return;
+
+    DCHECK(third_part != second_part);
+
+    Spill(second_part);
+    AddToUnhandledSorted(third_part);
+  } else {
+    // The split result does not intersect with [start, end[.
+    // Nothing to spill. Just put it to unhandled as whole.
+    AddToUnhandledSorted(second_part);
+  }
+}
+
+
+void LAllocator::Spill(LiveRange* range) {
+  DCHECK(!range->IsSpilled());
+  TraceAlloc("Spilling live range %d\n", range->id());
+  LiveRange* first = range->TopLevel();
+
+  if (!first->HasAllocatedSpillOperand()) {
+    LOperand* op = TryReuseSpillSlot(range);
+    if (op == NULL) op = chunk_->GetNextSpillSlot(range->Kind());
+    first->SetSpillOperand(op);
+  }
+  range->MakeSpilled(chunk()->zone());
+}
+
+
+int LAllocator::RegisterCount() const {
+  return num_registers_;
+}
+
+
+#ifdef DEBUG
+
+
+void LAllocator::Verify() const {
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* current = live_ranges()->at(i);
+    if (current != NULL) current->Verify();
+  }
+}
+
+
+#endif
+
+
+LAllocatorPhase::LAllocatorPhase(const char* name, LAllocator* allocator)
+    : CompilationPhase(name, allocator->graph()->info()),
+      allocator_(allocator) {
+  if (FLAG_hydrogen_stats) {
+    allocator_zone_start_allocation_size_ =
+        allocator->zone()->allocation_size();
+  }
+}
+
+
+LAllocatorPhase::~LAllocatorPhase() {
+  if (FLAG_hydrogen_stats) {
+    size_t size = allocator_->zone()->allocation_size() -
+                  allocator_zone_start_allocation_size_;
+    isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size);
+  }
+
+  if (ShouldProduceTraceOutput()) {
+    isolate()->GetHTracer()->TraceLithium(name(), allocator_->chunk());
+    isolate()->GetHTracer()->TraceLiveRanges(name(), allocator_);
+  }
+
+#ifdef DEBUG
+  if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/lithium-allocator.h b/src/crankshaft/lithium-allocator.h
new file mode 100644
index 0000000..46289e0
--- /dev/null
+++ b/src/crankshaft/lithium-allocator.h
@@ -0,0 +1,574 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
+#define V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
+
+#include "src/allocation.h"
+#include "src/crankshaft/lithium.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HBasicBlock;
+class HGraph;
+class HPhi;
+class HTracer;
+class HValue;
+class BitVector;
+class StringStream;
+
+class LPlatformChunk;
+class LOperand;
+class LUnallocated;
+class LGap;
+class LParallelMove;
+class LPointerMap;
+
+
+// This class represents a single point of a LOperand's lifetime.
+// For each lithium instruction there are exactly two lifetime positions:
+// the beginning and the end of the instruction. Lifetime positions for
+// different lithium instructions are disjoint.
+class LifetimePosition {
+ public:
+  // Return the lifetime position that corresponds to the beginning of
+  // the instruction with the given index.
+  static LifetimePosition FromInstructionIndex(int index) {
+    return LifetimePosition(index * kStep);
+  }
+
+  // Returns a numeric representation of this lifetime position.
+  int Value() const {
+    return value_;
+  }
+
+  // Returns the index of the instruction to which this lifetime position
+  // corresponds.
+  int InstructionIndex() const {
+    DCHECK(IsValid());
+    return value_ / kStep;
+  }
+
+  // Returns true if this lifetime position corresponds to the instruction
+  // start.
+  bool IsInstructionStart() const {
+    return (value_ & (kStep - 1)) == 0;
+  }
+
+  // Returns the lifetime position for the start of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionStart() const {
+    DCHECK(IsValid());
+    return LifetimePosition(value_ & ~(kStep - 1));
+  }
+
+  // Returns the lifetime position for the end of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionEnd() const {
+    DCHECK(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep/2);
+  }
+
+  // Returns the lifetime position for the beginning of the next instruction.
+  LifetimePosition NextInstruction() const {
+    DCHECK(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep);
+  }
+
+  // Returns the lifetime position for the beginning of the previous
+  // instruction.
+  LifetimePosition PrevInstruction() const {
+    DCHECK(IsValid());
+    DCHECK(value_ > 1);
+    return LifetimePosition(InstructionStart().Value() - kStep);
+  }
+
+  // Constructs the lifetime position which does not correspond to any
+  // instruction.
+  LifetimePosition() : value_(-1) {}
+
+  // Returns true if this lifetime positions corrensponds to some
+  // instruction.
+  bool IsValid() const { return value_ != -1; }
+
+  static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+  static inline LifetimePosition MaxPosition() {
+    // We have to use this kind of getter instead of static member due to
+    // crash bug in GDB.
+    return LifetimePosition(kMaxInt);
+  }
+
+ private:
+  static const int kStep = 2;
+
+  // Code relies on kStep being a power of two.
+  STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+
+  explicit LifetimePosition(int value) : value_(value) { }
+
+  int value_;
+};
+
+
+// Representation of the non-empty interval [start,end[.
+class UseInterval: public ZoneObject {
+ public:
+  UseInterval(LifetimePosition start, LifetimePosition end)
+      : start_(start), end_(end), next_(NULL) {
+    DCHECK(start.Value() < end.Value());
+  }
+
+  LifetimePosition start() const { return start_; }
+  LifetimePosition end() const { return end_; }
+  UseInterval* next() const { return next_; }
+
+  // Split this interval at the given position without effecting the
+  // live range that owns it. The interval must contain the position.
+  void SplitAt(LifetimePosition pos, Zone* zone);
+
+  // If this interval intersects with other return smallest position
+  // that belongs to both of them.
+  LifetimePosition Intersect(const UseInterval* other) const {
+    if (other->start().Value() < start_.Value()) return other->Intersect(this);
+    if (other->start().Value() < end_.Value()) return other->start();
+    return LifetimePosition::Invalid();
+  }
+
+  bool Contains(LifetimePosition point) const {
+    return start_.Value() <= point.Value() && point.Value() < end_.Value();
+  }
+
+ private:
+  void set_start(LifetimePosition start) { start_ = start; }
+  void set_next(UseInterval* next) { next_ = next; }
+
+  LifetimePosition start_;
+  LifetimePosition end_;
+  UseInterval* next_;
+
+  friend class LiveRange;  // Assigns to start_.
+};
+
+// Representation of a use position.
+class UsePosition: public ZoneObject {
+ public:
+  UsePosition(LifetimePosition pos, LOperand* operand, LOperand* hint);
+
+  LOperand* operand() const { return operand_; }
+  bool HasOperand() const { return operand_ != NULL; }
+
+  LOperand* hint() const { return hint_; }
+  bool HasHint() const;
+  bool RequiresRegister() const;
+  bool RegisterIsBeneficial() const;
+
+  LifetimePosition pos() const { return pos_; }
+  UsePosition* next() const { return next_; }
+
+ private:
+  void set_next(UsePosition* next) { next_ = next; }
+
+  LOperand* const operand_;
+  LOperand* const hint_;
+  LifetimePosition const pos_;
+  UsePosition* next_;
+  bool requires_reg_;
+  bool register_beneficial_;
+
+  friend class LiveRange;
+};
+
+// Representation of SSA values' live ranges as a collection of (continuous)
+// intervals over the instruction ordering.
+class LiveRange: public ZoneObject {
+ public:
+  static const int kInvalidAssignment = 0x7fffffff;
+
+  LiveRange(int id, Zone* zone);
+
+  UseInterval* first_interval() const { return first_interval_; }
+  UsePosition* first_pos() const { return first_pos_; }
+  LiveRange* parent() const { return parent_; }
+  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* next() const { return next_; }
+  bool IsChild() const { return parent() != NULL; }
+  int id() const { return id_; }
+  bool IsFixed() const { return id_ < 0; }
+  bool IsEmpty() const { return first_interval() == NULL; }
+  LOperand* CreateAssignedOperand(Zone* zone);
+  int assigned_register() const { return assigned_register_; }
+  int spill_start_index() const { return spill_start_index_; }
+  void set_assigned_register(int reg, Zone* zone);
+  void MakeSpilled(Zone* zone);
+
+  // Returns use position in this live range that follows both start
+  // and last processed use position.
+  // Modifies internal state of live range!
+  UsePosition* NextUsePosition(LifetimePosition start);
+
+  // Returns use position for which register is required in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextRegisterPosition(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which precedes start.
+  UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Can this live range be spilled at this position.
+  bool CanBeSpilled(LifetimePosition pos);
+
+  // Split this live range at the given position which must follow the start of
+  // the range.
+  // All uses following the given position will be moved from this
+  // live range to the result live range.
+  void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+
+  RegisterKind Kind() const { return kind_; }
+  bool HasRegisterAssigned() const {
+    return assigned_register_ != kInvalidAssignment;
+  }
+  bool IsSpilled() const { return spilled_; }
+
+  LOperand* current_hint_operand() const {
+    DCHECK(current_hint_operand_ == FirstHint());
+    return current_hint_operand_;
+  }
+  LOperand* FirstHint() const {
+    UsePosition* pos = first_pos_;
+    while (pos != NULL && !pos->HasHint()) pos = pos->next();
+    if (pos != NULL) return pos->hint();
+    return NULL;
+  }
+
+  LifetimePosition Start() const {
+    DCHECK(!IsEmpty());
+    return first_interval()->start();
+  }
+
+  LifetimePosition End() const {
+    DCHECK(!IsEmpty());
+    return last_interval_->end();
+  }
+
+  bool HasAllocatedSpillOperand() const;
+  LOperand* GetSpillOperand() const { return spill_operand_; }
+  void SetSpillOperand(LOperand* operand);
+
+  void SetSpillStartIndex(int start) {
+    spill_start_index_ = Min(start, spill_start_index_);
+  }
+
+  bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+  bool CanCover(LifetimePosition position) const;
+  bool Covers(LifetimePosition position);
+  LifetimePosition FirstIntersection(LiveRange* other);
+
+  // Add a new interval or a new use position to this live range.
+  void EnsureInterval(LifetimePosition start,
+                      LifetimePosition end,
+                      Zone* zone);
+  void AddUseInterval(LifetimePosition start,
+                      LifetimePosition end,
+                      Zone* zone);
+  void AddUsePosition(LifetimePosition pos,
+                      LOperand* operand,
+                      LOperand* hint,
+                      Zone* zone);
+
+  // Shorten the most recently added interval by setting a new start.
+  void ShortenTo(LifetimePosition start);
+
+#ifdef DEBUG
+  // True if target overlaps an existing interval.
+  bool HasOverlap(UseInterval* target) const;
+  void Verify() const;
+#endif
+
+ private:
+  void ConvertOperands(Zone* zone);
+  UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+  void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+                                  LifetimePosition but_not_past) const;
+
+  int id_;
+  bool spilled_;
+  RegisterKind kind_;
+  int assigned_register_;
+  UseInterval* last_interval_;
+  UseInterval* first_interval_;
+  UsePosition* first_pos_;
+  LiveRange* parent_;
+  LiveRange* next_;
+  // This is used as a cache, it doesn't affect correctness.
+  mutable UseInterval* current_interval_;
+  UsePosition* last_processed_use_;
+  // This is used as a cache, it's invalid outside of BuildLiveRanges.
+  LOperand* current_hint_operand_;
+  LOperand* spill_operand_;
+  int spill_start_index_;
+
+  friend class LAllocator;  // Assigns to kind_.
+};
+
+
+class LAllocator BASE_EMBEDDED {
+ public:
+  LAllocator(int first_virtual_register, HGraph* graph);
+
+  static void TraceAlloc(const char* msg, ...);
+
+  // Checks whether the value of a given virtual register is tagged.
+  bool HasTaggedValue(int virtual_register) const;
+
+  // Returns the register kind required by the given virtual register.
+  RegisterKind RequiredRegisterKind(int virtual_register) const;
+
+  bool Allocate(LChunk* chunk);
+
+  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
+  const Vector<LiveRange*>* fixed_live_ranges() const {
+    return &fixed_live_ranges_;
+  }
+  const Vector<LiveRange*>* fixed_double_live_ranges() const {
+    return &fixed_double_live_ranges_;
+  }
+
+  LPlatformChunk* chunk() const { return chunk_; }
+  HGraph* graph() const { return graph_; }
+  Isolate* isolate() const { return graph_->isolate(); }
+  Zone* zone() { return &zone_; }
+
+  int GetVirtualRegister() {
+    if (next_virtual_register_ >= LUnallocated::kMaxVirtualRegisters) {
+      allocation_ok_ = false;
+      // Maintain the invariant that we return something below the maximum.
+      return 0;
+    }
+    return next_virtual_register_++;
+  }
+
+  bool AllocationOk() { return allocation_ok_; }
+
+  void MarkAsOsrEntry() {
+    // There can be only one.
+    DCHECK(!has_osr_entry_);
+    // Simply set a flag to find and process instruction later.
+    has_osr_entry_ = true;
+  }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+  BitVector* assigned_registers() {
+    return assigned_registers_;
+  }
+  BitVector* assigned_double_registers() {
+    return assigned_double_registers_;
+  }
+
+ private:
+  void MeetRegisterConstraints();
+  void ResolvePhis();
+  void BuildLiveRanges();
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+  void ConnectRanges();
+  void ResolveControlFlow();
+  void PopulatePointerMaps();
+  void AllocateRegisters();
+  bool CanEagerlyResolveControlFlow(HBasicBlock* block) const;
+  inline bool SafePointsAreInOrder() const;
+
+  // Liveness analysis support.
+  void InitializeLivenessAnalysis();
+  BitVector* ComputeLiveOut(HBasicBlock* block);
+  void AddInitialIntervals(HBasicBlock* block, BitVector* live_out);
+  void ProcessInstructions(HBasicBlock* block, BitVector* live);
+  void MeetRegisterConstraints(HBasicBlock* block);
+  void MeetConstraintsBetween(LInstruction* first,
+                              LInstruction* second,
+                              int gap_index);
+  void ResolvePhis(HBasicBlock* block);
+
+  // Helper methods for building intervals.
+  LOperand* AllocateFixed(LUnallocated* operand, int pos, bool is_tagged);
+  LiveRange* LiveRangeFor(LOperand* operand);
+  void Define(LifetimePosition position, LOperand* operand, LOperand* hint);
+  void Use(LifetimePosition block_start,
+           LifetimePosition position,
+           LOperand* operand,
+           LOperand* hint);
+  void AddConstraintsGapMove(int index, LOperand* from, LOperand* to);
+
+  // Helper methods for updating the life range lists.
+  void AddToActive(LiveRange* range);
+  void AddToInactive(LiveRange* range);
+  void AddToUnhandledSorted(LiveRange* range);
+  void AddToUnhandledUnsorted(LiveRange* range);
+  void SortUnhandled();
+  bool UnhandledIsSorted();
+  void ActiveToHandled(LiveRange* range);
+  void ActiveToInactive(LiveRange* range);
+  void InactiveToHandled(LiveRange* range);
+  void InactiveToActive(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  LOperand* TryReuseSpillSlot(LiveRange* range);
+
+  // Helper methods for allocating registers.
+  bool TryAllocateFreeReg(LiveRange* range);
+  void AllocateBlockedReg(LiveRange* range);
+
+  // Live range splitting helpers.
+
+  // Split the given range at the given position.
+  // If range starts at or after the given position then the
+  // original range is returned.
+  // Otherwise returns the live range that starts at pos and contains
+  // all uses from the original range that follow pos. Uses at pos will
+  // still be owned by the original range after splitting.
+  LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+
+  // Split the given range in a position from the interval [start, end].
+  LiveRange* SplitBetween(LiveRange* range,
+                          LifetimePosition start,
+                          LifetimePosition end);
+
+  // Find a lifetime position in the interval [start, end] which
+  // is optimal for splitting: it is either header of the outermost
+  // loop covered by this interval or the latest possible position.
+  LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+                                       LifetimePosition end);
+
+  // Spill the given life range after position pos.
+  void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+  // Spill the given life range after position [start] and up to position [end].
+  void SpillBetween(LiveRange* range,
+                    LifetimePosition start,
+                    LifetimePosition end);
+
+  // Spill the given life range after position [start] and up to position [end].
+  // Range is guaranteed to be spilled at least until position [until].
+  void SpillBetweenUntil(LiveRange* range,
+                         LifetimePosition start,
+                         LifetimePosition until,
+                         LifetimePosition end);
+
+  void SplitAndSpillIntersecting(LiveRange* range);
+
+  // If we are trying to spill a range inside the loop try to
+  // hoist spill position out to the point just before the loop.
+  LifetimePosition FindOptimalSpillingPos(LiveRange* range,
+                                          LifetimePosition pos);
+
+  void Spill(LiveRange* range);
+  bool IsBlockBoundary(LifetimePosition pos);
+
+  // Helper methods for resolving control flow.
+  void ResolveControlFlow(LiveRange* range,
+                          HBasicBlock* block,
+                          HBasicBlock* pred);
+
+  inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
+
+  // Return parallel move that should be used to connect ranges split at the
+  // given position.
+  LParallelMove* GetConnectingParallelMove(LifetimePosition pos);
+
+  // Return the block which contains give lifetime position.
+  HBasicBlock* GetBlock(LifetimePosition pos);
+
+  // Helper methods for the fixed registers.
+  int RegisterCount() const;
+  static int FixedLiveRangeID(int index) { return -index - 1; }
+  static int FixedDoubleLiveRangeID(int index);
+  LiveRange* FixedLiveRangeFor(int index);
+  LiveRange* FixedDoubleLiveRangeFor(int index);
+  LiveRange* LiveRangeFor(int index);
+  HPhi* LookupPhi(LOperand* operand) const;
+  LGap* GetLastGap(HBasicBlock* block);
+
+  const char* RegisterName(int allocation_index);
+
+  inline bool IsGapAt(int index);
+
+  inline LInstruction* InstructionAt(int index);
+
+  inline LGap* GapAt(int index);
+
+  Zone zone_;
+
+  LPlatformChunk* chunk_;
+
+  // During liveness analysis keep a mapping from block id to live_in sets
+  // for blocks already analyzed.
+  ZoneList<BitVector*> live_in_sets_;
+
+  // Liveness analysis results.
+  ZoneList<LiveRange*> live_ranges_;
+
+  // Lists of live ranges
+  EmbeddedVector<LiveRange*, Register::kNumRegisters> fixed_live_ranges_;
+  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumRegisters>
+      fixed_double_live_ranges_;
+  ZoneList<LiveRange*> unhandled_live_ranges_;
+  ZoneList<LiveRange*> active_live_ranges_;
+  ZoneList<LiveRange*> inactive_live_ranges_;
+  ZoneList<LiveRange*> reusable_slots_;
+
+  // Next virtual register number to be assigned to temporaries.
+  int next_virtual_register_;
+  int first_artificial_register_;
+  GrowableBitVector double_artificial_registers_;
+
+  RegisterKind mode_;
+  int num_registers_;
+  const int* allocatable_register_codes_;
+
+  BitVector* assigned_registers_;
+  BitVector* assigned_double_registers_;
+
+  HGraph* graph_;
+
+  bool has_osr_entry_;
+
+  // Indicates success or failure during register allocation.
+  bool allocation_ok_;
+
+#ifdef DEBUG
+  LifetimePosition allocation_finger_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(LAllocator);
+};
+
+
+class LAllocatorPhase : public CompilationPhase {
+ public:
+  LAllocatorPhase(const char* name, LAllocator* allocator);
+  ~LAllocatorPhase();
+
+ private:
+  LAllocator* allocator_;
+  size_t allocator_zone_start_allocation_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(LAllocatorPhase);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_LITHIUM_ALLOCATOR_H_
diff --git a/src/crankshaft/lithium-codegen.cc b/src/crankshaft/lithium-codegen.cc
new file mode 100644
index 0000000..5bd1e6a
--- /dev/null
+++ b/src/crankshaft/lithium-codegen.cc
@@ -0,0 +1,358 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/lithium-codegen.h"
+
+#include <sstream>
+
+#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-x64.h"  // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/crankshaft/arm/lithium-arm.h"  // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/crankshaft/arm64/lithium-arm64.h"  // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/crankshaft/mips/lithium-mips.h"  // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/crankshaft/mips64/lithium-mips64.h"  // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-x87.h"  // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h"  // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/crankshaft/ppc/lithium-ppc.h"          // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"  // NOLINT
+#else
+#error Unsupported target architecture.
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+HGraph* LCodeGenBase::graph() const {
+  return chunk()->graph();
+}
+
+
+LCodeGenBase::LCodeGenBase(LChunk* chunk, MacroAssembler* assembler,
+                           CompilationInfo* info)
+    : chunk_(static_cast<LPlatformChunk*>(chunk)),
+      masm_(assembler),
+      info_(info),
+      zone_(info->zone()),
+      status_(UNUSED),
+      current_block_(-1),
+      current_instruction_(-1),
+      instructions_(chunk->instructions()),
+      deoptimizations_(4, info->zone()),
+      deoptimization_literals_(8, info->zone()),
+      translations_(info->zone()),
+      inlined_function_count_(0),
+      last_lazy_deopt_pc_(0),
+      osr_pc_offset_(-1) {}
+
+
+bool LCodeGenBase::GenerateBody() {
+  DCHECK(is_generating());
+  bool emit_instructions = true;
+  LCodeGen* codegen = static_cast<LCodeGen*>(this);
+  for (current_instruction_ = 0;
+       !is_aborted() && current_instruction_ < instructions_->length();
+       current_instruction_++) {
+    LInstruction* instr = instructions_->at(current_instruction_);
+
+    // Don't emit code for basic blocks with a replacement.
+    if (instr->IsLabel()) {
+      emit_instructions = !LLabel::cast(instr)->HasReplacement() &&
+          (!FLAG_unreachable_code_elimination ||
+           instr->hydrogen_value()->block()->IsReachable());
+      if (FLAG_code_comments && !emit_instructions) {
+        Comment(
+            ";;; <@%d,#%d> -------------------- B%d (unreachable/replaced) "
+            "--------------------",
+            current_instruction_,
+            instr->hydrogen_value()->id(),
+            instr->hydrogen_value()->block()->block_id());
+      }
+    }
+    if (!emit_instructions) continue;
+
+    if (FLAG_code_comments && instr->HasInterestingComment(codegen)) {
+      Comment(";;; <@%d,#%d> %s",
+              current_instruction_,
+              instr->hydrogen_value()->id(),
+              instr->Mnemonic());
+    }
+
+    GenerateBodyInstructionPre(instr);
+
+    HValue* value = instr->hydrogen_value();
+    if (!value->position().IsUnknown()) {
+      RecordAndWritePosition(
+        chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+    }
+
+    instr->CompileToNative(codegen);
+
+    GenerateBodyInstructionPost(instr);
+  }
+  EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  return !is_aborted();
+}
+
+
+void LCodeGenBase::CheckEnvironmentUsage() {
+#ifdef DEBUG
+  bool dead_block = false;
+  for (int i = 0; i < instructions_->length(); i++) {
+    LInstruction* instr = instructions_->at(i);
+    HValue* hval = instr->hydrogen_value();
+    if (instr->IsLabel()) dead_block = LLabel::cast(instr)->HasReplacement();
+    if (dead_block || !hval->block()->IsReachable()) continue;
+
+    HInstruction* hinstr = HInstruction::cast(hval);
+    if (!hinstr->CanDeoptimize() && instr->HasEnvironment()) {
+      V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)",
+               hinstr->Mnemonic(), instr->Mnemonic());
+    }
+
+    if (instr->HasEnvironment() && !instr->environment()->has_been_used()) {
+      V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)",
+               hinstr->Mnemonic(), instr->Mnemonic());
+    }
+  }
+#endif
+}
+
+
+void LCodeGenBase::Comment(const char* format, ...) {
+  if (!FLAG_code_comments) return;
+  char buffer[4 * KB];
+  StringBuilder builder(buffer, arraysize(buffer));
+  va_list arguments;
+  va_start(arguments, format);
+  builder.AddFormattedList(format, arguments);
+  va_end(arguments);
+
+  // Copy the string before recording it in the assembler to avoid
+  // issues when the stack allocated buffer goes out of scope.
+  size_t length = builder.position();
+  Vector<char> copy = Vector<char>::New(static_cast<int>(length) + 1);
+  MemCopy(copy.start(), builder.Finalize(), copy.length());
+  masm()->RecordComment(copy.start());
+}
+
+
+void LCodeGenBase::DeoptComment(const Deoptimizer::DeoptInfo& deopt_info) {
+  masm()->RecordDeoptReason(deopt_info.deopt_reason, deopt_info.position);
+}
+
+
+int LCodeGenBase::GetNextEmittedBlock() const {
+  for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
+    if (!graph()->blocks()->at(i)->IsReachable()) continue;
+    if (!chunk_->GetLabel(i)->HasReplacement()) return i;
+  }
+  return -1;
+}
+
+
+void LCodeGenBase::Abort(BailoutReason reason) {
+  info()->AbortOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+void LCodeGenBase::Retry(BailoutReason reason) {
+  info()->RetryOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) {
+  if (map->is_deprecated()) return Retry(kMapBecameDeprecated);
+  chunk_->AddDeprecationDependency(map);
+}
+
+
+void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
+  if (!map->is_stable()) return Retry(kMapBecameUnstable);
+  chunk_->AddStabilityDependency(map);
+}
+
+
+int LCodeGenBase::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal, zone());
+  return result;
+}
+
+
+void LCodeGenBase::WriteTranslationFrame(LEnvironment* environment,
+                                         Translation* translation) {
+  int translation_size = environment->translation_size();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  switch (environment->frame_type()) {
+    case JS_FUNCTION: {
+      int shared_id = DefineDeoptimizationLiteral(
+          environment->entry() ? environment->entry()->shared()
+                               : info()->shared_info());
+      translation->BeginJSFrame(environment->ast_id(), shared_id, height);
+      if (info()->closure().is_identical_to(environment->closure())) {
+        translation->StoreJSFrameFunction();
+      } else {
+        int closure_id = DefineDeoptimizationLiteral(environment->closure());
+        translation->StoreLiteral(closure_id);
+      }
+      break;
+    }
+    case JS_CONSTRUCT: {
+      int shared_id = DefineDeoptimizationLiteral(
+          environment->entry() ? environment->entry()->shared()
+                               : info()->shared_info());
+      translation->BeginConstructStubFrame(shared_id, translation_size);
+      if (info()->closure().is_identical_to(environment->closure())) {
+        translation->StoreJSFrameFunction();
+      } else {
+        int closure_id = DefineDeoptimizationLiteral(environment->closure());
+        translation->StoreLiteral(closure_id);
+      }
+      break;
+    }
+    case JS_GETTER: {
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
+      int shared_id = DefineDeoptimizationLiteral(
+          environment->entry() ? environment->entry()->shared()
+                               : info()->shared_info());
+      translation->BeginGetterStubFrame(shared_id);
+      if (info()->closure().is_identical_to(environment->closure())) {
+        translation->StoreJSFrameFunction();
+      } else {
+        int closure_id = DefineDeoptimizationLiteral(environment->closure());
+        translation->StoreLiteral(closure_id);
+      }
+      break;
+    }
+    case JS_SETTER: {
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
+      int shared_id = DefineDeoptimizationLiteral(
+          environment->entry() ? environment->entry()->shared()
+                               : info()->shared_info());
+      translation->BeginSetterStubFrame(shared_id);
+      if (info()->closure().is_identical_to(environment->closure())) {
+        translation->StoreJSFrameFunction();
+      } else {
+        int closure_id = DefineDeoptimizationLiteral(environment->closure());
+        translation->StoreLiteral(closure_id);
+      }
+      break;
+    }
+    case ARGUMENTS_ADAPTOR: {
+      int shared_id = DefineDeoptimizationLiteral(
+          environment->entry() ? environment->entry()->shared()
+                               : info()->shared_info());
+      translation->BeginArgumentsAdaptorFrame(shared_id, translation_size);
+      if (info()->closure().is_identical_to(environment->closure())) {
+        translation->StoreJSFrameFunction();
+      } else {
+        int closure_id = DefineDeoptimizationLiteral(environment->closure());
+        translation->StoreLiteral(closure_id);
+      }
+      break;
+    }
+    case STUB:
+      translation->BeginCompiledStubFrame(translation_size);
+      break;
+  }
+}
+
+
+void LCodeGenBase::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  Handle<DeoptimizationInputData> data =
+      DeoptimizationInputData::New(isolate(), length, TENURED);
+
+  Handle<ByteArray> translations =
+      translations_.CreateByteArray(isolate()->factory());
+  data->SetTranslationByteArray(*translations);
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+  if (info_->IsOptimizing()) {
+    // Reference to shared function info does not change between phases.
+    AllowDeferredHandleDereference allow_handle_dereference;
+    data->SetSharedFunctionInfo(*info_->shared_info());
+  } else {
+    data->SetSharedFunctionInfo(Smi::FromInt(0));
+  }
+  data->SetWeakCellCache(Smi::FromInt(0));
+
+  Handle<FixedArray> literals =
+      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+  {
+    AllowDeferredHandleDereference copy_handles;
+    for (int i = 0; i < deoptimization_literals_.length(); i++) {
+      literals->set(i, *deoptimization_literals_[i]);
+    }
+    data->SetLiteralArray(*literals);
+  }
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, env->ast_id());
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+void LCodeGenBase::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  DCHECK_EQ(0, deoptimization_literals_.length());
+  for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
+    DefineDeoptimizationLiteral(function);
+  }
+  inlined_function_count_ = deoptimization_literals_.length();
+
+  // Define deoptimization literals for all unoptimized code objects of inlined
+  // functions. This ensures unoptimized code is kept alive by optimized code.
+  AllowDeferredHandleDereference allow_shared_function_info_dereference;
+  for (Handle<SharedFunctionInfo> function : chunk()->inlined_functions()) {
+    DefineDeoptimizationLiteral(handle(function->code()));
+  }
+}
+
+
+Deoptimizer::DeoptInfo LCodeGenBase::MakeDeoptInfo(
+    LInstruction* instr, Deoptimizer::DeoptReason deopt_reason) {
+  Deoptimizer::DeoptInfo deopt_info(instr->hydrogen_value()->position(),
+                                    instr->Mnemonic(), deopt_reason);
+  HEnterInlined* enter_inlined = instr->environment()->entry();
+  deopt_info.inlining_id = enter_inlined ? enter_inlined->inlining_id() : 0;
+  return deopt_info;
+}
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/lithium-codegen.h b/src/crankshaft/lithium-codegen.h
new file mode 100644
index 0000000..b1f7dac
--- /dev/null
+++ b/src/crankshaft/lithium-codegen.h
@@ -0,0 +1,104 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
+#define V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
+
+#include "src/bailout-reason.h"
+#include "src/compiler.h"
+#include "src/deoptimizer.h"
+
+namespace v8 {
+namespace internal {
+
+class LEnvironment;
+class LInstruction;
+class LPlatformChunk;
+
+class LCodeGenBase BASE_EMBEDDED {
+ public:
+  LCodeGenBase(LChunk* chunk,
+               MacroAssembler* assembler,
+               CompilationInfo* info);
+  virtual ~LCodeGenBase() {}
+
+  // Simple accessors.
+  MacroAssembler* masm() const { return masm_; }
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() const { return info_->isolate(); }
+  Factory* factory() const { return isolate()->factory(); }
+  Heap* heap() const { return isolate()->heap(); }
+  Zone* zone() const { return zone_; }
+  LPlatformChunk* chunk() const { return chunk_; }
+  HGraph* graph() const;
+
+  void FPRINTF_CHECKING Comment(const char* format, ...);
+  void DeoptComment(const Deoptimizer::DeoptInfo& deopt_info);
+  static Deoptimizer::DeoptInfo MakeDeoptInfo(
+      LInstruction* instr, Deoptimizer::DeoptReason deopt_reason);
+
+  bool GenerateBody();
+  virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
+  virtual void GenerateBodyInstructionPost(LInstruction* instr) {}
+
+  virtual void EnsureSpaceForLazyDeopt(int space_needed) = 0;
+  virtual void RecordAndWritePosition(int position) = 0;
+
+  int GetNextEmittedBlock() const;
+
+  void RegisterWeakObjectsInOptimizedCode(Handle<Code> code);
+
+  void WriteTranslationFrame(LEnvironment* environment,
+                             Translation* translation);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationData(Handle<Code> code);
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  // Check that an environment assigned via AssignEnvironment is actually being
+  // used. Redundant assignments keep things alive longer than necessary, and
+  // consequently lead to worse code, so it's important to minimize this.
+  void CheckEnvironmentUsage();
+
+ protected:
+  enum Status {
+    UNUSED,
+    GENERATING,
+    DONE,
+    ABORTED
+  };
+
+  LPlatformChunk* const chunk_;
+  MacroAssembler* const masm_;
+  CompilationInfo* const info_;
+  Zone* zone_;
+  Status status_;
+  int current_block_;
+  int current_instruction_;
+  const ZoneList<LInstruction*>* instructions_;
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  TranslationBuffer translations_;
+  int inlined_function_count_;
+  int last_lazy_deopt_pc_;
+  int osr_pc_offset_;
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_generating() const { return status_ == GENERATING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  void Abort(BailoutReason reason);
+  void Retry(BailoutReason reason);
+
+  // Methods for code dependencies.
+  void AddDeprecationDependency(Handle<Map> map);
+  void AddStabilityDependency(Handle<Map> map);
+};
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_LITHIUM_CODEGEN_H_
diff --git a/src/crankshaft/lithium-inl.h b/src/crankshaft/lithium-inl.h
new file mode 100644
index 0000000..9044b4c
--- /dev/null
+++ b/src/crankshaft/lithium-inl.h
@@ -0,0 +1,114 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_LITHIUM_INL_H_
+#define V8_CRANKSHAFT_LITHIUM_INL_H_
+
+#include "src/crankshaft/lithium.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/crankshaft/arm64/lithium-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/crankshaft/arm/lithium-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/crankshaft/mips/lithium-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/crankshaft/mips64/lithium-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/crankshaft/ppc/lithium-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-x87.h"  // NOLINT
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+TempIterator::TempIterator(LInstruction* instr)
+    : instr_(instr), limit_(instr->TempCount()), current_(0) {
+  SkipUninteresting();
+}
+
+
+bool TempIterator::Done() { return current_ >= limit_; }
+
+
+LOperand* TempIterator::Current() {
+  DCHECK(!Done());
+  return instr_->TempAt(current_);
+}
+
+
+void TempIterator::SkipUninteresting() {
+  while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
+}
+
+
+void TempIterator::Advance() {
+  ++current_;
+  SkipUninteresting();
+}
+
+
+InputIterator::InputIterator(LInstruction* instr)
+    : instr_(instr), limit_(instr->InputCount()), current_(0) {
+  SkipUninteresting();
+}
+
+
+bool InputIterator::Done() { return current_ >= limit_; }
+
+
+LOperand* InputIterator::Current() {
+  DCHECK(!Done());
+  DCHECK(instr_->InputAt(current_) != NULL);
+  return instr_->InputAt(current_);
+}
+
+
+void InputIterator::Advance() {
+  ++current_;
+  SkipUninteresting();
+}
+
+
+void InputIterator::SkipUninteresting() {
+  while (current_ < limit_) {
+    LOperand* current = instr_->InputAt(current_);
+    if (current != NULL && !current->IsConstantOperand()) break;
+    ++current_;
+  }
+}
+
+
+UseIterator::UseIterator(LInstruction* instr)
+    : input_iterator_(instr), env_iterator_(instr->environment()) {}
+
+
+bool UseIterator::Done() {
+  return input_iterator_.Done() && env_iterator_.Done();
+}
+
+
+LOperand* UseIterator::Current() {
+  DCHECK(!Done());
+  LOperand* result = input_iterator_.Done() ? env_iterator_.Current()
+                                            : input_iterator_.Current();
+  DCHECK(result != NULL);
+  return result;
+}
+
+
+void UseIterator::Advance() {
+  input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance();
+}
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_LITHIUM_INL_H_
diff --git a/src/crankshaft/lithium.cc b/src/crankshaft/lithium.cc
new file mode 100644
index 0000000..82ad696
--- /dev/null
+++ b/src/crankshaft/lithium.cc
@@ -0,0 +1,666 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/lithium.h"
+
+#include "src/ast/scopes.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/crankshaft/ia32/lithium-ia32.h"  // NOLINT
+#include "src/crankshaft/ia32/lithium-codegen-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/crankshaft/x64/lithium-x64.h"  // NOLINT
+#include "src/crankshaft/x64/lithium-codegen-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/crankshaft/arm/lithium-arm.h"  // NOLINT
+#include "src/crankshaft/arm/lithium-codegen-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_PPC
+#include "src/crankshaft/ppc/lithium-ppc.h"          // NOLINT
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/crankshaft/mips/lithium-mips.h"  // NOLINT
+#include "src/crankshaft/mips/lithium-codegen-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/crankshaft/arm64/lithium-arm64.h"  // NOLINT
+#include "src/crankshaft/arm64/lithium-codegen-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/crankshaft/mips64/lithium-mips64.h"  // NOLINT
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/crankshaft/x87/lithium-x87.h"  // NOLINT
+#include "src/crankshaft/x87/lithium-codegen-x87.h"  // NOLINT
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+void LOperand::PrintTo(StringStream* stream) {
+  LUnallocated* unalloc = NULL;
+  switch (kind()) {
+    case INVALID:
+      stream->Add("(0)");
+      break;
+    case UNALLOCATED:
+      unalloc = LUnallocated::cast(this);
+      stream->Add("v%d", unalloc->virtual_register());
+      if (unalloc->basic_policy() == LUnallocated::FIXED_SLOT) {
+        stream->Add("(=%dS)", unalloc->fixed_slot_index());
+        break;
+      }
+      switch (unalloc->extended_policy()) {
+        case LUnallocated::NONE:
+          break;
+        case LUnallocated::FIXED_REGISTER: {
+          int reg_index = unalloc->fixed_register_index();
+          if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
+            stream->Add("(=invalid_reg#%d)", reg_index);
+          } else {
+            const char* register_name =
+                Register::from_code(reg_index).ToString();
+            stream->Add("(=%s)", register_name);
+          }
+          break;
+        }
+        case LUnallocated::FIXED_DOUBLE_REGISTER: {
+          int reg_index = unalloc->fixed_register_index();
+          if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
+            stream->Add("(=invalid_double_reg#%d)", reg_index);
+          } else {
+            const char* double_register_name =
+                DoubleRegister::from_code(reg_index).ToString();
+            stream->Add("(=%s)", double_register_name);
+          }
+          break;
+        }
+        case LUnallocated::MUST_HAVE_REGISTER:
+          stream->Add("(R)");
+          break;
+        case LUnallocated::MUST_HAVE_DOUBLE_REGISTER:
+          stream->Add("(D)");
+          break;
+        case LUnallocated::WRITABLE_REGISTER:
+          stream->Add("(WR)");
+          break;
+        case LUnallocated::SAME_AS_FIRST_INPUT:
+          stream->Add("(1)");
+          break;
+        case LUnallocated::ANY:
+          stream->Add("(-)");
+          break;
+      }
+      break;
+    case CONSTANT_OPERAND:
+      stream->Add("[constant:%d]", index());
+      break;
+    case STACK_SLOT:
+      stream->Add("[stack:%d]", index());
+      break;
+    case DOUBLE_STACK_SLOT:
+      stream->Add("[double_stack:%d]", index());
+      break;
+    case REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 || reg_index >= Register::kNumRegisters) {
+        stream->Add("(=invalid_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]", Register::from_code(reg_index).ToString());
+      }
+      break;
+    }
+    case DOUBLE_REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 || reg_index >= DoubleRegister::kMaxNumRegisters) {
+        stream->Add("(=invalid_double_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]", DoubleRegister::from_code(reg_index).ToString());
+      }
+      break;
+    }
+  }
+}
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+LSubKindOperand<kOperandKind, kNumCachedOperands>*
+LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+  if (cache) return;
+  cache = new LSubKindOperand[kNumCachedOperands];
+  for (int i = 0; i < kNumCachedOperands; i++) {
+    cache[i].ConvertTo(kOperandKind, i);
+  }
+}
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+  delete[] cache;
+  cache = NULL;
+}
+
+
+void LOperand::SetUpCaches() {
+#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
+#undef LITHIUM_OPERAND_SETUP
+}
+
+
+void LOperand::TearDownCaches() {
+#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
+#undef LITHIUM_OPERAND_TEARDOWN
+}
+
+
+bool LParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+void LParallelMove::PrintDataTo(StringStream* stream) const {
+  bool first = true;
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsEliminated()) {
+      LOperand* source = move_operands_[i].source();
+      LOperand* destination = move_operands_[i].destination();
+      if (!first) stream->Add(" ");
+      first = false;
+      if (source->Equals(destination)) {
+        destination->PrintTo(stream);
+      } else {
+        destination->PrintTo(stream);
+        stream->Add(" = ");
+        source->PrintTo(stream);
+      }
+      stream->Add(";");
+    }
+  }
+}
+
+
+void LEnvironment::PrintTo(StringStream* stream) {
+  stream->Add("[id=%d|", ast_id().ToInt());
+  if (deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
+    stream->Add("deopt_id=%d|", deoptimization_index());
+  }
+  stream->Add("parameters=%d|", parameter_count());
+  stream->Add("arguments_stack_height=%d|", arguments_stack_height());
+  for (int i = 0; i < values_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    if (values_[i] == NULL) {
+      stream->Add("[hole]");
+    } else {
+      values_[i]->PrintTo(stream);
+    }
+  }
+  stream->Add("]");
+}
+
+
+void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op, zone);
+}
+
+
+void LPointerMap::RemovePointer(LOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (pointer_operands_[i]->Equals(op)) {
+      pointer_operands_.Remove(i);
+      --i;
+    }
+  }
+}
+
+
+void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op, zone);
+}
+
+
+void LPointerMap::PrintTo(StringStream* stream) {
+  stream->Add("{");
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (i != 0) stream->Add(";");
+    pointer_operands_[i]->PrintTo(stream);
+  }
+  stream->Add("}");
+}
+
+
+int StackSlotOffset(int index) {
+  if (index >= 0) {
+    // Local or spill slot. Skip the frame pointer, function, and
+    // context in the fixed part of the frame.
+    return -(index + 1) * kPointerSize -
+        StandardFrameConstants::kFixedFrameSizeFromFp;
+  } else {
+    // Incoming parameter. Skip the return address.
+    return -(index + 1) * kPointerSize + kFPOnStackSize + kPCOnStackSize;
+  }
+}
+
+
+LChunk::LChunk(CompilationInfo* info, HGraph* graph)
+    : spill_slot_count_(0),
+      info_(info),
+      graph_(graph),
+      instructions_(32, info->zone()),
+      pointer_maps_(8, info->zone()),
+      inlined_functions_(1, info->zone()),
+      deprecation_dependencies_(32, info->zone()),
+      stability_dependencies_(8, info->zone()) {}
+
+
+LLabel* LChunk::GetLabel(int block_id) const {
+  HBasicBlock* block = graph_->blocks()->at(block_id);
+  int first_instruction = block->first_instruction_index();
+  return LLabel::cast(instructions_[first_instruction]);
+}
+
+
+int LChunk::LookupDestination(int block_id) const {
+  LLabel* cur = GetLabel(block_id);
+  while (cur->replacement() != NULL) {
+    cur = cur->replacement();
+  }
+  return cur->block_id();
+}
+
+Label* LChunk::GetAssemblyLabel(int block_id) const {
+  LLabel* label = GetLabel(block_id);
+  DCHECK(!label->HasReplacement());
+  return label->label();
+}
+
+
+void LChunk::MarkEmptyBlocks() {
+  LPhase phase("L_Mark empty blocks", this);
+  for (int i = 0; i < graph()->blocks()->length(); ++i) {
+    HBasicBlock* block = graph()->blocks()->at(i);
+    int first = block->first_instruction_index();
+    int last = block->last_instruction_index();
+    LInstruction* first_instr = instructions()->at(first);
+    LInstruction* last_instr = instructions()->at(last);
+
+    LLabel* label = LLabel::cast(first_instr);
+    if (last_instr->IsGoto()) {
+      LGoto* goto_instr = LGoto::cast(last_instr);
+      if (label->IsRedundant() &&
+          !label->is_loop_header()) {
+        bool can_eliminate = true;
+        for (int i = first + 1; i < last && can_eliminate; ++i) {
+          LInstruction* cur = instructions()->at(i);
+          if (cur->IsGap()) {
+            LGap* gap = LGap::cast(cur);
+            if (!gap->IsRedundant()) {
+              can_eliminate = false;
+            }
+          } else {
+            can_eliminate = false;
+          }
+        }
+        if (can_eliminate) {
+          label->set_replacement(GetLabel(goto_instr->block_id()));
+        }
+      }
+    }
+  }
+}
+
+
+void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
+  LInstructionGap* gap = new (zone()) LInstructionGap(block);
+  gap->set_hydrogen_value(instr->hydrogen_value());
+  int index = -1;
+  if (instr->IsControl()) {
+    instructions_.Add(gap, zone());
+    index = instructions_.length();
+    instructions_.Add(instr, zone());
+  } else {
+    index = instructions_.length();
+    instructions_.Add(instr, zone());
+    instructions_.Add(gap, zone());
+  }
+  if (instr->HasPointerMap()) {
+    pointer_maps_.Add(instr->pointer_map(), zone());
+    instr->pointer_map()->set_lithium_position(index);
+  }
+}
+
+
+LConstantOperand* LChunk::DefineConstantOperand(HConstant* constant) {
+  return LConstantOperand::Create(constant->id(), zone());
+}
+
+
+int LChunk::GetParameterStackSlot(int index) const {
+  // The receiver is at index 0, the first parameter at index 1, so we
+  // shift all parameter indexes down by the number of parameters, and
+  // make sure they end up negative so they are distinguishable from
+  // spill slots.
+  int result = index - info()->num_parameters() - 1;
+
+  DCHECK(result < 0);
+  return result;
+}
+
+
+// A parameter relative to ebp in the arguments stub.
+int LChunk::ParameterAt(int index) {
+  DCHECK(-1 <= index);  // -1 is the receiver.
+  return (1 + info()->scope()->num_parameters() - index) *
+      kPointerSize;
+}
+
+
+LGap* LChunk::GetGapAt(int index) const {
+  return LGap::cast(instructions_[index]);
+}
+
+
+bool LChunk::IsGapAt(int index) const {
+  return instructions_[index]->IsGap();
+}
+
+
+int LChunk::NearestGapPos(int index) const {
+  while (!IsGapAt(index)) index--;
+  return index;
+}
+
+
+void LChunk::AddGapMove(int index, LOperand* from, LOperand* to) {
+  GetGapAt(index)->GetOrCreateParallelMove(
+      LGap::START, zone())->AddMove(from, to, zone());
+}
+
+
+HConstant* LChunk::LookupConstant(LConstantOperand* operand) const {
+  return HConstant::cast(graph_->LookupValue(operand->index()));
+}
+
+
+Representation LChunk::LookupLiteralRepresentation(
+    LConstantOperand* operand) const {
+  return graph_->LookupValue(operand->index())->representation();
+}
+
+
+void LChunk::CommitDependencies(Handle<Code> code) const {
+  if (!code->is_optimized_code()) return;
+  HandleScope scope(isolate());
+
+  for (Handle<Map> map : deprecation_dependencies_) {
+    DCHECK(!map->is_deprecated());
+    DCHECK(map->CanBeDeprecated());
+    Map::AddDependentCode(map, DependentCode::kTransitionGroup, code);
+  }
+
+  for (Handle<Map> map : stability_dependencies_) {
+    DCHECK(map->is_stable());
+    DCHECK(map->CanTransition());
+    Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code);
+  }
+
+  info_->dependencies()->Commit(code);
+}
+
+
+LChunk* LChunk::NewChunk(HGraph* graph) {
+  DisallowHandleAllocation no_handles;
+  DisallowHeapAllocation no_gc;
+  graph->DisallowAddingNewValues();
+  int values = graph->GetMaximumValueID();
+  CompilationInfo* info = graph->info();
+  if (values > LUnallocated::kMaxVirtualRegisters) {
+    info->AbortOptimization(kNotEnoughVirtualRegistersForValues);
+    return NULL;
+  }
+  LAllocator allocator(values, graph);
+  LChunkBuilder builder(info, graph, &allocator);
+  LChunk* chunk = builder.Build();
+  if (chunk == NULL) return NULL;
+
+  if (!allocator.Allocate(chunk)) {
+    info->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+    return NULL;
+  }
+
+  chunk->set_allocated_double_registers(
+      allocator.assigned_double_registers());
+
+  return chunk;
+}
+
+
+Handle<Code> LChunk::Codegen() {
+  MacroAssembler assembler(info()->isolate(), NULL, 0,
+                           CodeObjectRequired::kYes);
+  LOG_CODE_EVENT(info()->isolate(),
+                 CodeStartLinePosInfoRecordEvent(
+                     assembler.positions_recorder()));
+  // Code serializer only takes unoptimized code.
+  DCHECK(!info()->will_serialize());
+  LCodeGen generator(this, &assembler, info());
+
+  MarkEmptyBlocks();
+
+  if (generator.GenerateCode()) {
+    generator.CheckEnvironmentUsage();
+    CodeGenerator::MakeCodePrologue(info(), "optimized");
+    Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&assembler, info());
+    generator.FinishCode(code);
+    CommitDependencies(code);
+    code->set_is_crankshafted(true);
+    void* jit_handler_data =
+        assembler.positions_recorder()->DetachJITHandlerData();
+    LOG_CODE_EVENT(info()->isolate(),
+                   CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
+
+    CodeGenerator::PrintCode(code, info());
+    DCHECK(!(info()->isolate()->serializer_enabled() &&
+             info()->GetMustNotHaveEagerFrame() &&
+             generator.NeedsEagerFrame()));
+    return code;
+  }
+  assembler.AbortedCodeGeneration();
+  return Handle<Code>::null();
+}
+
+
+void LChunk::set_allocated_double_registers(BitVector* allocated_registers) {
+  allocated_double_registers_ = allocated_registers;
+  BitVector* doubles = allocated_double_registers();
+  BitVector::Iterator iterator(doubles);
+  while (!iterator.Done()) {
+    if (info()->saves_caller_doubles()) {
+      if (kDoubleSize == kPointerSize * 2) {
+        spill_slot_count_ += 2;
+      } else {
+        spill_slot_count_++;
+      }
+    }
+    iterator.Advance();
+  }
+}
+
+
+void LChunkBuilderBase::Abort(BailoutReason reason) {
+  info()->AbortOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+void LChunkBuilderBase::Retry(BailoutReason reason) {
+  info()->RetryOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+LEnvironment* LChunkBuilderBase::CreateEnvironment(
+    HEnvironment* hydrogen_env, int* argument_index_accumulator,
+    ZoneList<HValue*>* objects_to_materialize) {
+  if (hydrogen_env == NULL) return NULL;
+
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
+                        objects_to_materialize);
+  BailoutId ast_id = hydrogen_env->ast_id();
+  DCHECK(!ast_id.IsNone() ||
+         hydrogen_env->frame_type() != JS_FUNCTION);
+
+  int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
+                          ? 0
+                          : hydrogen_env->specials_count();
+
+  int value_count = hydrogen_env->length() - omitted_count;
+  LEnvironment* result =
+      new(zone()) LEnvironment(hydrogen_env->closure(),
+                               hydrogen_env->frame_type(),
+                               ast_id,
+                               hydrogen_env->parameter_count(),
+                               argument_count_,
+                               value_count,
+                               outer,
+                               hydrogen_env->entry(),
+                               zone());
+  int argument_index = *argument_index_accumulator;
+
+  // Store the environment description into the environment
+  // (with holes for nested objects)
+  for (int i = 0; i < hydrogen_env->length(); ++i) {
+    if (hydrogen_env->is_special_index(i) &&
+        hydrogen_env->frame_type() != JS_FUNCTION) {
+      continue;
+    }
+    LOperand* op;
+    HValue* value = hydrogen_env->values()->at(i);
+    CHECK(!value->IsPushArguments());  // Do not deopt outgoing arguments
+    if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+      op = LEnvironment::materialization_marker();
+    } else {
+      op = UseAny(value);
+    }
+    result->AddValue(op,
+                     value->representation(),
+                     value->CheckFlag(HInstruction::kUint32));
+  }
+
+  // Recursively store the nested objects into the environment
+  for (int i = 0; i < hydrogen_env->length(); ++i) {
+    if (hydrogen_env->is_special_index(i)) continue;
+
+    HValue* value = hydrogen_env->values()->at(i);
+    if (value->IsArgumentsObject() || value->IsCapturedObject()) {
+      AddObjectToMaterialize(value, objects_to_materialize, result);
+    }
+  }
+
+  if (hydrogen_env->frame_type() == JS_FUNCTION) {
+    *argument_index_accumulator = argument_index;
+  }
+
+  return result;
+}
+
+
+// Add an object to the supplied environment and object materialization list.
+//
+// Notes:
+//
+// We are building three lists here:
+//
+// 1. In the result->object_mapping_ list (added to by the
+//    LEnvironment::Add*Object methods), we store the lengths (number
+//    of fields) of the captured objects in depth-first traversal order, or
+//    in case of duplicated objects, we store the index to the duplicate object
+//    (with a tag to differentiate between captured and duplicated objects).
+//
+// 2. The object fields are stored in the result->values_ list
+//    (added to by the LEnvironment.AddValue method) sequentially as lists
+//    of fields with holes for nested objects (the holes will be expanded
+//    later by LCodegen::AddToTranslation according to the
+//    LEnvironment.object_mapping_ list).
+//
+// 3. The auxiliary objects_to_materialize array stores the hydrogen values
+//    in the same order as result->object_mapping_ list. This is used
+//    to detect duplicate values and calculate the corresponding object index.
+void LChunkBuilderBase::AddObjectToMaterialize(HValue* value,
+    ZoneList<HValue*>* objects_to_materialize, LEnvironment* result) {
+  int object_index = objects_to_materialize->length();
+  // Store the hydrogen value into the de-duplication array
+  objects_to_materialize->Add(value, zone());
+  // Find out whether we are storing a duplicated value
+  int previously_materialized_object = -1;
+  for (int prev = 0; prev < object_index; ++prev) {
+    if (objects_to_materialize->at(prev) == value) {
+      previously_materialized_object = prev;
+      break;
+    }
+  }
+  // Store the captured object length (or duplicated object index)
+  // into the environment. For duplicated objects, we stop here.
+  int length = value->OperandCount();
+  bool is_arguments = value->IsArgumentsObject();
+  if (previously_materialized_object >= 0) {
+    result->AddDuplicateObject(previously_materialized_object);
+    return;
+  } else {
+    result->AddNewObject(is_arguments ? length - 1 : length, is_arguments);
+  }
+  // Store the captured object's fields into the environment
+  for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+    LOperand* op;
+    HValue* arg_value = value->OperandAt(i);
+    if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+      // Insert a hole for nested objects
+      op = LEnvironment::materialization_marker();
+    } else {
+      DCHECK(!arg_value->IsPushArguments());
+      // For ordinary values, tell the register allocator we need the value
+      // to be alive here
+      op = UseAny(arg_value);
+    }
+    result->AddValue(op,
+                     arg_value->representation(),
+                     arg_value->CheckFlag(HInstruction::kUint32));
+  }
+  // Recursively store all the nested captured objects into the environment
+  for (int i = is_arguments ? 1 : 0; i < length; ++i) {
+    HValue* arg_value = value->OperandAt(i);
+    if (arg_value->IsArgumentsObject() || arg_value->IsCapturedObject()) {
+      AddObjectToMaterialize(arg_value, objects_to_materialize, result);
+    }
+  }
+}
+
+
+LPhase::~LPhase() {
+  if (ShouldProduceTraceOutput()) {
+    isolate()->GetHTracer()->TraceLithium(name(), chunk_);
+  }
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/lithium.h b/src/crankshaft/lithium.h
new file mode 100644
index 0000000..10e980e
--- /dev/null
+++ b/src/crankshaft/lithium.h
@@ -0,0 +1,840 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_LITHIUM_H_
+#define V8_CRANKSHAFT_LITHIUM_H_
+
+#include <set>
+
+#include "src/allocation.h"
+#include "src/bailout-reason.h"
+#include "src/crankshaft/hydrogen.h"
+#include "src/safepoint-table.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+#define LITHIUM_OPERAND_LIST(V)               \
+  V(ConstantOperand, CONSTANT_OPERAND,  128)  \
+  V(StackSlot,       STACK_SLOT,        128)  \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)  \
+  V(Register,        REGISTER,          16)   \
+  V(DoubleRegister,  DOUBLE_REGISTER,   16)
+
+class LOperand : public ZoneObject {
+ public:
+  enum Kind {
+    INVALID,
+    UNALLOCATED,
+    CONSTANT_OPERAND,
+    STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    REGISTER,
+    DOUBLE_REGISTER
+  };
+
+  LOperand() : value_(KindField::encode(INVALID)) { }
+
+  Kind kind() const { return KindField::decode(value_); }
+  int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
+#define LITHIUM_OPERAND_PREDICATE(name, type, number) \
+  bool Is##name() const { return kind() == type; }
+  LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
+  LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+  LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
+#undef LITHIUM_OPERAND_PREDICATE
+  bool Equals(LOperand* other) const { return value_ == other->value_; }
+
+  void PrintTo(StringStream* stream);
+  void ConvertTo(Kind kind, int index) {
+    if (kind == REGISTER) DCHECK(index >= 0);
+    value_ = KindField::encode(kind);
+    value_ |= index << kKindFieldWidth;
+    DCHECK(this->index() == index);
+  }
+
+  // Calls SetUpCache()/TearDownCache() for each subclass.
+  static void SetUpCaches();
+  static void TearDownCaches();
+
+ protected:
+  static const int kKindFieldWidth = 3;
+  class KindField : public BitField<Kind, 0, kKindFieldWidth> { };
+
+  LOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+  unsigned value_;
+};
+
+
+class LUnallocated : public LOperand {
+ public:
+  enum BasicPolicy {
+    FIXED_SLOT,
+    EXTENDED_POLICY
+  };
+
+  enum ExtendedPolicy {
+    NONE,
+    ANY,
+    FIXED_REGISTER,
+    FIXED_DOUBLE_REGISTER,
+    MUST_HAVE_REGISTER,
+    MUST_HAVE_DOUBLE_REGISTER,
+    WRITABLE_REGISTER,
+    SAME_AS_FIRST_INPUT
+  };
+
+  // Lifetime of operand inside the instruction.
+  enum Lifetime {
+    // USED_AT_START operand is guaranteed to be live only at
+    // instruction start. Register allocator is free to assign the same register
+    // to some other operand used inside instruction (i.e. temporary or
+    // output).
+    USED_AT_START,
+
+    // USED_AT_END operand is treated as live until the end of
+    // instruction. This means that register allocator will not reuse it's
+    // register for any other operand inside instruction.
+    USED_AT_END
+  };
+
+  explicit LUnallocated(ExtendedPolicy policy) : LOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+  }
+
+  LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+    DCHECK(policy == FIXED_SLOT);
+    value_ |= BasicPolicyField::encode(policy);
+    value_ |= index << FixedSlotIndexField::kShift;
+    DCHECK(this->fixed_slot_index() == index);
+  }
+
+  LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
+    DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+    value_ |= FixedRegisterField::encode(index);
+  }
+
+  LUnallocated(ExtendedPolicy policy, Lifetime lifetime)
+      : LOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(lifetime);
+  }
+
+  LUnallocated* CopyUnconstrained(Zone* zone) {
+    LUnallocated* result = new(zone) LUnallocated(ANY);
+    result->set_virtual_register(virtual_register());
+    return result;
+  }
+
+  static LUnallocated* cast(LOperand* op) {
+    DCHECK(op->IsUnallocated());
+    return reinterpret_cast<LUnallocated*>(op);
+  }
+
+  // The encoding used for LUnallocated operands depends on the policy that is
+  // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+  // because it accommodates a larger pay-load.
+  //
+  // For FIXED_SLOT policy:
+  //     +------------------------------------------+
+  //     |       slot_index      |  vreg  | 0 | 001 |
+  //     +------------------------------------------+
+  //
+  // For all other (extended) policies:
+  //     +------------------------------------------+
+  //     |  reg_index  | L | PPP |  vreg  | 1 | 001 |    L ... Lifetime
+  //     +------------------------------------------+    P ... Policy
+  //
+  // The slot index is a signed value which requires us to decode it manually
+  // instead of using the BitField utility class.
+
+  // The superclass has a KindField.
+  STATIC_ASSERT(kKindFieldWidth == 3);
+
+  // BitFields for all unallocated operands.
+  class BasicPolicyField     : public BitField<BasicPolicy,     3,  1> {};
+  class VirtualRegisterField : public BitField<unsigned,        4, 18> {};
+
+  // BitFields specific to BasicPolicy::FIXED_SLOT.
+  class FixedSlotIndexField  : public BitField<int,            22, 10> {};
+
+  // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+  class ExtendedPolicyField  : public BitField<ExtendedPolicy, 22,  3> {};
+  class LifetimeField        : public BitField<Lifetime,       25,  1> {};
+  class FixedRegisterField   : public BitField<int,            26,  6> {};
+
+  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+  static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+  static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+  static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+  // Predicates for the operand policy.
+  bool HasAnyPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+        extended_policy() == ANY;
+  }
+  bool HasFixedPolicy() const {
+    return basic_policy() == FIXED_SLOT ||
+        extended_policy() == FIXED_REGISTER ||
+        extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+  bool HasRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY && (
+        extended_policy() == WRITABLE_REGISTER ||
+        extended_policy() == MUST_HAVE_REGISTER);
+  }
+  bool HasDoubleRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+        extended_policy() == MUST_HAVE_DOUBLE_REGISTER;
+  }
+  bool HasSameAsInputPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+        extended_policy() == SAME_AS_FIRST_INPUT;
+  }
+  bool HasFixedSlotPolicy() const {
+    return basic_policy() == FIXED_SLOT;
+  }
+  bool HasFixedRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+        extended_policy() == FIXED_REGISTER;
+  }
+  bool HasFixedDoubleRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+        extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+  bool HasWritableRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+        extended_policy() == WRITABLE_REGISTER;
+  }
+
+  // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+  BasicPolicy basic_policy() const {
+    return BasicPolicyField::decode(value_);
+  }
+
+  // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+  ExtendedPolicy extended_policy() const {
+    DCHECK(basic_policy() == EXTENDED_POLICY);
+    return ExtendedPolicyField::decode(value_);
+  }
+
+  // [fixed_slot_index]: Only for FIXED_SLOT.
+  int fixed_slot_index() const {
+    DCHECK(HasFixedSlotPolicy());
+    return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+  }
+
+  // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+  int fixed_register_index() const {
+    DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+    return FixedRegisterField::decode(value_);
+  }
+
+  // [virtual_register]: The virtual register ID for this operand.
+  int virtual_register() const {
+    return VirtualRegisterField::decode(value_);
+  }
+  void set_virtual_register(unsigned id) {
+    value_ = VirtualRegisterField::update(value_, id);
+  }
+
+  // [lifetime]: Only for non-FIXED_SLOT.
+  bool IsUsedAtStart() {
+    DCHECK(basic_policy() == EXTENDED_POLICY);
+    return LifetimeField::decode(value_) == USED_AT_START;
+  }
+
+  static bool TooManyParameters(int num_parameters) {
+    const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
+    return num_parameters + 1 > parameter_limit;
+  }
+
+  static bool TooManyParametersOrStackSlots(int num_parameters,
+                                            int num_stack_slots) {
+    const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
+    return num_parameters + 1 + num_stack_slots > locals_limit;
+  }
+};
+
+
+class LMoveOperands final BASE_EMBEDDED {
+ public:
+  LMoveOperands(LOperand* source, LOperand* destination)
+      : source_(source), destination_(destination) {
+  }
+
+  LOperand* source() const { return source_; }
+  void set_source(LOperand* operand) { source_ = operand; }
+
+  LOperand* destination() const { return destination_; }
+  void set_destination(LOperand* operand) { destination_ = operand; }
+
+  // The gap resolver marks moves as "in-progress" by clearing the
+  // destination (but not the source).
+  bool IsPending() const {
+    return destination_ == NULL && source_ != NULL;
+  }
+
+  // True if this move a move into the given destination operand.
+  bool Blocks(LOperand* operand) const {
+    return !IsEliminated() && source()->Equals(operand);
+  }
+
+  // A move is redundant if it's been eliminated, if its source and
+  // destination are the same, or if its destination is unneeded or constant.
+  bool IsRedundant() const {
+    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+           (destination_ != NULL && destination_->IsConstantOperand());
+  }
+
+  bool IsIgnored() const {
+    return destination_ != NULL && destination_->IsIgnored();
+  }
+
+  // We clear both operands to indicate move that's been eliminated.
+  void Eliminate() { source_ = destination_ = NULL; }
+  bool IsEliminated() const {
+    DCHECK(source_ != NULL || destination_ == NULL);
+    return source_ == NULL;
+  }
+
+ private:
+  LOperand* source_;
+  LOperand* destination_;
+};
+
+
+template <LOperand::Kind kOperandKind, int kNumCachedOperands>
+class LSubKindOperand final : public LOperand {
+ public:
+  static LSubKindOperand* Create(int index, Zone* zone) {
+    DCHECK(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new(zone) LSubKindOperand(index);
+  }
+
+  static LSubKindOperand* cast(LOperand* op) {
+    DCHECK(op->kind() == kOperandKind);
+    return reinterpret_cast<LSubKindOperand*>(op);
+  }
+
+  static void SetUpCache();
+  static void TearDownCache();
+
+ private:
+  static LSubKindOperand* cache;
+
+  LSubKindOperand() : LOperand() { }
+  explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { }
+};
+
+
+#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number)   \
+typedef LSubKindOperand<LOperand::type, number> L##name;
+LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
+
+
+class LParallelMove final : public ZoneObject {
+ public:
+  explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
+
+  void AddMove(LOperand* from, LOperand* to, Zone* zone) {
+    move_operands_.Add(LMoveOperands(from, to), zone);
+  }
+
+  bool IsRedundant() const;
+
+  ZoneList<LMoveOperands>* move_operands() { return &move_operands_; }
+
+  void PrintDataTo(StringStream* stream) const;
+
+ private:
+  ZoneList<LMoveOperands> move_operands_;
+};
+
+
+class LPointerMap final : public ZoneObject {
+ public:
+  explicit LPointerMap(Zone* zone)
+      : pointer_operands_(8, zone),
+        untagged_operands_(0, zone),
+        lithium_position_(-1) { }
+
+  const ZoneList<LOperand*>* GetNormalizedOperands() {
+    for (int i = 0; i < untagged_operands_.length(); ++i) {
+      RemovePointer(untagged_operands_[i]);
+    }
+    untagged_operands_.Clear();
+    return &pointer_operands_;
+  }
+  int lithium_position() const { return lithium_position_; }
+
+  void set_lithium_position(int pos) {
+    DCHECK(lithium_position_ == -1);
+    lithium_position_ = pos;
+  }
+
+  void RecordPointer(LOperand* op, Zone* zone);
+  void RemovePointer(LOperand* op);
+  void RecordUntagged(LOperand* op, Zone* zone);
+  void PrintTo(StringStream* stream);
+
+ private:
+  ZoneList<LOperand*> pointer_operands_;
+  ZoneList<LOperand*> untagged_operands_;
+  int lithium_position_;
+};
+
+
+class LEnvironment final : public ZoneObject {
+ public:
+  LEnvironment(Handle<JSFunction> closure,
+               FrameType frame_type,
+               BailoutId ast_id,
+               int parameter_count,
+               int argument_count,
+               int value_count,
+               LEnvironment* outer,
+               HEnterInlined* entry,
+               Zone* zone)
+      : closure_(closure),
+        frame_type_(frame_type),
+        arguments_stack_height_(argument_count),
+        deoptimization_index_(Safepoint::kNoDeoptimizationIndex),
+        translation_index_(-1),
+        ast_id_(ast_id),
+        translation_size_(value_count),
+        parameter_count_(parameter_count),
+        pc_offset_(-1),
+        values_(value_count, zone),
+        is_tagged_(value_count, zone),
+        is_uint32_(value_count, zone),
+        object_mapping_(0, zone),
+        outer_(outer),
+        entry_(entry),
+        zone_(zone),
+        has_been_used_(false) { }
+
+  Handle<JSFunction> closure() const { return closure_; }
+  FrameType frame_type() const { return frame_type_; }
+  int arguments_stack_height() const { return arguments_stack_height_; }
+  int deoptimization_index() const { return deoptimization_index_; }
+  int translation_index() const { return translation_index_; }
+  BailoutId ast_id() const { return ast_id_; }
+  int translation_size() const { return translation_size_; }
+  int parameter_count() const { return parameter_count_; }
+  int pc_offset() const { return pc_offset_; }
+  const ZoneList<LOperand*>* values() const { return &values_; }
+  LEnvironment* outer() const { return outer_; }
+  HEnterInlined* entry() { return entry_; }
+  Zone* zone() const { return zone_; }
+
+  bool has_been_used() const { return has_been_used_; }
+  void set_has_been_used() { has_been_used_ = true; }
+
+  void AddValue(LOperand* operand,
+                Representation representation,
+                bool is_uint32) {
+    values_.Add(operand, zone());
+    if (representation.IsSmiOrTagged()) {
+      DCHECK(!is_uint32);
+      is_tagged_.Add(values_.length() - 1, zone());
+    }
+
+    if (is_uint32) {
+      is_uint32_.Add(values_.length() - 1, zone());
+    }
+  }
+
+  bool HasTaggedValueAt(int index) const {
+    return is_tagged_.Contains(index);
+  }
+
+  bool HasUint32ValueAt(int index) const {
+    return is_uint32_.Contains(index);
+  }
+
+  void AddNewObject(int length, bool is_arguments) {
+    uint32_t encoded = LengthOrDupeField::encode(length) |
+                       IsArgumentsField::encode(is_arguments) |
+                       IsDuplicateField::encode(false);
+    object_mapping_.Add(encoded, zone());
+  }
+
+  void AddDuplicateObject(int dupe_of) {
+    uint32_t encoded = LengthOrDupeField::encode(dupe_of) |
+                       IsDuplicateField::encode(true);
+    object_mapping_.Add(encoded, zone());
+  }
+
+  int ObjectDuplicateOfAt(int index) {
+    DCHECK(ObjectIsDuplicateAt(index));
+    return LengthOrDupeField::decode(object_mapping_[index]);
+  }
+
+  int ObjectLengthAt(int index) {
+    DCHECK(!ObjectIsDuplicateAt(index));
+    return LengthOrDupeField::decode(object_mapping_[index]);
+  }
+
+  bool ObjectIsArgumentsAt(int index) {
+    DCHECK(!ObjectIsDuplicateAt(index));
+    return IsArgumentsField::decode(object_mapping_[index]);
+  }
+
+  bool ObjectIsDuplicateAt(int index) {
+    return IsDuplicateField::decode(object_mapping_[index]);
+  }
+
+  void Register(int deoptimization_index,
+                int translation_index,
+                int pc_offset) {
+    DCHECK(!HasBeenRegistered());
+    deoptimization_index_ = deoptimization_index;
+    translation_index_ = translation_index;
+    pc_offset_ = pc_offset;
+  }
+  bool HasBeenRegistered() const {
+    return deoptimization_index_ != Safepoint::kNoDeoptimizationIndex;
+  }
+
+  void PrintTo(StringStream* stream);
+
+  // Marker value indicating a de-materialized object.
+  static LOperand* materialization_marker() { return NULL; }
+
+  // Encoding used for the object_mapping map below.
+  class LengthOrDupeField : public BitField<int,   0, 30> { };
+  class IsArgumentsField  : public BitField<bool, 30,  1> { };
+  class IsDuplicateField  : public BitField<bool, 31,  1> { };
+
+ private:
+  Handle<JSFunction> closure_;
+  FrameType frame_type_;
+  int arguments_stack_height_;
+  int deoptimization_index_;
+  int translation_index_;
+  BailoutId ast_id_;
+  int translation_size_;
+  int parameter_count_;
+  int pc_offset_;
+
+  // Value array: [parameters] [locals] [expression stack] [de-materialized].
+  //              |>--------- translation_size ---------<|
+  ZoneList<LOperand*> values_;
+  GrowableBitVector is_tagged_;
+  GrowableBitVector is_uint32_;
+
+  // Map with encoded information about materialization_marker operands.
+  ZoneList<uint32_t> object_mapping_;
+
+  LEnvironment* outer_;
+  HEnterInlined* entry_;
+  Zone* zone_;
+  bool has_been_used_;
+};
+
+
+// Iterates over the non-null, non-constant operands in an environment.
+class ShallowIterator final BASE_EMBEDDED {
+ public:
+  explicit ShallowIterator(LEnvironment* env)
+      : env_(env),
+        limit_(env != NULL ? env->values()->length() : 0),
+        current_(0) {
+    SkipUninteresting();
+  }
+
+  bool Done() { return current_ >= limit_; }
+
+  LOperand* Current() {
+    DCHECK(!Done());
+    DCHECK(env_->values()->at(current_) != NULL);
+    return env_->values()->at(current_);
+  }
+
+  void Advance() {
+    DCHECK(!Done());
+    ++current_;
+    SkipUninteresting();
+  }
+
+  LEnvironment* env() { return env_; }
+
+ private:
+  bool ShouldSkip(LOperand* op) {
+    return op == NULL || op->IsConstantOperand();
+  }
+
+  // Skip until something interesting, beginning with and including current_.
+  void SkipUninteresting() {
+    while (current_ < limit_ && ShouldSkip(env_->values()->at(current_))) {
+      ++current_;
+    }
+  }
+
+  LEnvironment* env_;
+  int limit_;
+  int current_;
+};
+
+
+// Iterator for non-null, non-constant operands incl. outer environments.
+class DeepIterator final BASE_EMBEDDED {
+ public:
+  explicit DeepIterator(LEnvironment* env)
+      : current_iterator_(env) {
+    SkipUninteresting();
+  }
+
+  bool Done() { return current_iterator_.Done(); }
+
+  LOperand* Current() {
+    DCHECK(!current_iterator_.Done());
+    DCHECK(current_iterator_.Current() != NULL);
+    return current_iterator_.Current();
+  }
+
+  void Advance() {
+    current_iterator_.Advance();
+    SkipUninteresting();
+  }
+
+ private:
+  void SkipUninteresting() {
+    while (current_iterator_.env() != NULL && current_iterator_.Done()) {
+      current_iterator_ = ShallowIterator(current_iterator_.env()->outer());
+    }
+  }
+
+  ShallowIterator current_iterator_;
+};
+
+
+class LPlatformChunk;
+class LGap;
+class LLabel;
+
+// Superclass providing data and behavior common to all the
+// arch-specific LPlatformChunk classes.
+class LChunk : public ZoneObject {
+ public:
+  static LChunk* NewChunk(HGraph* graph);
+
+  void AddInstruction(LInstruction* instruction, HBasicBlock* block);
+  LConstantOperand* DefineConstantOperand(HConstant* constant);
+  HConstant* LookupConstant(LConstantOperand* operand) const;
+  Representation LookupLiteralRepresentation(LConstantOperand* operand) const;
+
+  int ParameterAt(int index);
+  int GetParameterStackSlot(int index) const;
+  int spill_slot_count() const { return spill_slot_count_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+  Isolate* isolate() const { return graph_->isolate(); }
+  const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
+  void AddGapMove(int index, LOperand* from, LOperand* to);
+  LGap* GetGapAt(int index) const;
+  bool IsGapAt(int index) const;
+  int NearestGapPos(int index) const;
+  void MarkEmptyBlocks();
+  const ZoneList<LPointerMap*>* pointer_maps() const { return &pointer_maps_; }
+  LLabel* GetLabel(int block_id) const;
+  int LookupDestination(int block_id) const;
+  Label* GetAssemblyLabel(int block_id) const;
+
+  const ZoneList<Handle<SharedFunctionInfo>>& inlined_functions() const {
+    return inlined_functions_;
+  }
+
+  void AddInlinedFunction(Handle<SharedFunctionInfo> closure) {
+    inlined_functions_.Add(closure, zone());
+  }
+
+  void AddDeprecationDependency(Handle<Map> map) {
+    DCHECK(!map->is_deprecated());
+    if (!map->CanBeDeprecated()) return;
+    DCHECK(!info_->IsStub());
+    deprecation_dependencies_.Add(map, zone());
+  }
+
+  void AddStabilityDependency(Handle<Map> map) {
+    DCHECK(map->is_stable());
+    if (!map->CanTransition()) return;
+    DCHECK(!info_->IsStub());
+    stability_dependencies_.Add(map, zone());
+  }
+
+  Zone* zone() const { return info_->zone(); }
+
+  Handle<Code> Codegen();
+
+  void set_allocated_double_registers(BitVector* allocated_registers);
+  BitVector* allocated_double_registers() {
+    return allocated_double_registers_;
+  }
+
+ protected:
+  LChunk(CompilationInfo* info, HGraph* graph);
+
+  int spill_slot_count_;
+
+ private:
+  void CommitDependencies(Handle<Code> code) const;
+
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  BitVector* allocated_double_registers_;
+  ZoneList<LInstruction*> instructions_;
+  ZoneList<LPointerMap*> pointer_maps_;
+  ZoneList<Handle<SharedFunctionInfo>> inlined_functions_;
+  ZoneList<Handle<Map>> deprecation_dependencies_;
+  ZoneList<Handle<Map>> stability_dependencies_;
+};
+
+
+class LChunkBuilderBase BASE_EMBEDDED {
+ public:
+  explicit LChunkBuilderBase(CompilationInfo* info, HGraph* graph)
+      : argument_count_(0),
+        chunk_(NULL),
+        info_(info),
+        graph_(graph),
+        status_(UNUSED),
+        zone_(graph->zone()) {}
+
+  virtual ~LChunkBuilderBase() { }
+
+  void Abort(BailoutReason reason);
+  void Retry(BailoutReason reason);
+
+ protected:
+  enum Status { UNUSED, BUILDING, DONE, ABORTED };
+
+  LPlatformChunk* chunk() const { return chunk_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+  int argument_count() const { return argument_count_; }
+  Isolate* isolate() const { return graph_->isolate(); }
+  Heap* heap() const { return isolate()->heap(); }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
+
+  LEnvironment* CreateEnvironment(HEnvironment* hydrogen_env,
+                                  int* argument_index_accumulator,
+                                  ZoneList<HValue*>* objects_to_materialize);
+  void AddObjectToMaterialize(HValue* value,
+                              ZoneList<HValue*>* objects_to_materialize,
+                              LEnvironment* result);
+
+  Zone* zone() const { return zone_; }
+
+  int argument_count_;
+  LPlatformChunk* chunk_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  Status status_;
+
+ private:
+  Zone* zone_;
+};
+
+
+int StackSlotOffset(int index);
+
+enum NumberUntagDMode {
+  NUMBER_CANDIDATE_IS_SMI,
+  NUMBER_CANDIDATE_IS_ANY_TAGGED
+};
+
+
+class LPhase : public CompilationPhase {
+ public:
+  LPhase(const char* name, LChunk* chunk)
+      : CompilationPhase(name, chunk->info()),
+        chunk_(chunk) { }
+  ~LPhase();
+
+ private:
+  LChunk* chunk_;
+
+  DISALLOW_COPY_AND_ASSIGN(LPhase);
+};
+
+
+// A register-allocator view of a Lithium instruction. It contains the id of
+// the output operand and a list of input operand uses.
+
+enum RegisterKind {
+  UNALLOCATED_REGISTERS,
+  GENERAL_REGISTERS,
+  DOUBLE_REGISTERS
+};
+
+// Iterator for non-null temp operands.
+class TempIterator BASE_EMBEDDED {
+ public:
+  inline explicit TempIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  inline void SkipUninteresting();
+  LInstruction* instr_;
+  int limit_;
+  int current_;
+};
+
+
+// Iterator for non-constant input operands.
+class InputIterator BASE_EMBEDDED {
+ public:
+  inline explicit InputIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  inline void SkipUninteresting();
+  LInstruction* instr_;
+  int limit_;
+  int current_;
+};
+
+
+class UseIterator BASE_EMBEDDED {
+ public:
+  inline explicit UseIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  InputIterator input_iterator_;
+  DeepIterator env_iterator_;
+};
+
+class LInstruction;
+class LCodeGen;
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_LITHIUM_H_
diff --git a/src/crankshaft/mips/OWNERS b/src/crankshaft/mips/OWNERS
new file mode 100644
index 0000000..89455a4
--- /dev/null
+++ b/src/crankshaft/mips/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/src/crankshaft/mips/lithium-codegen-mips.cc b/src/crankshaft/mips/lithium-codegen-mips.cc
new file mode 100644
index 0000000..2414f0d
--- /dev/null
+++ b/src/crankshaft/mips/lithium-codegen-mips.cc
@@ -0,0 +1,5637 @@
+// Copyright 2012 the V8 project authors. All rights reserved.7
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
+#endif
+
+    // a1: Callee's JS function.
+    // cp: Callee's context.
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
+  }
+
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue());
+    }
+    frame_is_built_ = true;
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ Subu(sp,  sp, Operand(slots * kPointerSize));
+      __ Push(a0, a1);
+      __ Addu(a0, sp, Operand(slots *  kPointerSize));
+      __ li(a1, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ Subu(a0, a0, Operand(kPointerSize));
+      __ sw(a1, MemOperand(a0, 2 * kPointerSize));
+      __ Branch(&loop, ne, a0, Operand(sp));
+      __ Pop(a0, a1);
+    } else {
+      __ Subu(sp, sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info()->scope()->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in a1.
+    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ push(a1);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(a1);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in both v0. It replaces the context passed to us.
+    // It's saved in the stack and kept live in cp.
+    __ mov(cp, v0);
+    __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ lw(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextMemOperand(cp, var->index());
+        __ sw(a0, target);
+        // Update the write barrier. This clobbers a3 and a0.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+              cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, a0, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ Subu(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ pop(at);
+        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+        frame_is_built_ = false;
+      }
+      __ jmp(code->exit());
+    }
+  }
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  if (jump_table_.length() > 0) {
+    Label needs_frame, call_deopt_entry;
+
+    Comment(";;; -------------------- Jump table --------------------");
+    Address base = jump_table_[0].address;
+
+    Register entry_offset = t9;
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
+
+      DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->deopt_info);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load an immediate
+      // offset which will be added to the base address later.
+      __ li(entry_offset, Operand(entry - base));
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        Comment(";;; call deopt with frame");
+        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+        __ Call(&needs_frame);
+      } else {
+        __ Call(&call_deopt_entry);
+      }
+      info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                   table_entry->deopt_info.inlining_id);
+    }
+
+    if (needs_frame.is_linked()) {
+      __ bind(&needs_frame);
+      // This variant of deopt can only be used with stubs. Since we don't
+      // have a function pointer to install in the stack frame that we're
+      // building, install a special marker there instead.
+      DCHECK(info()->IsStub());
+      __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+      __ push(at);
+      __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+    }
+
+    Comment(";;; call deopt");
+    __ bind(&call_deopt_entry);
+
+    if (info()->saves_caller_doubles()) {
+      DCHECK(info()->IsStub());
+      RestoreCallerDoubles();
+    }
+
+    // Add the base address to the offset previously loaded in entry_offset.
+    __ Addu(entry_offset, entry_offset,
+            Operand(ExternalReference::ForDeoptEntry(base)));
+    __ Jump(entry_offset);
+  }
+  __ RecordComment("]");
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::from_code(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::from_code(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      AllowDeferredHandleDereference get_number;
+      DCHECK(literal->IsNumber());
+      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
+    } else if (r.IsDouble()) {
+      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+    } else {
+      DCHECK(r.IsSmiOrTagged());
+      __ li(scratch, literal);
+    }
+    return scratch;
+  } else if (op->IsStackSlot()) {
+    __ lw(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                FloatRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      DCHECK(literal->IsNumber());
+      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+      __ mtc1(at, flt_scratch);
+      __ cvt_d_w(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort(kUnsupportedDoubleImmediate);
+    } else if (r.IsTagged()) {
+      Abort(kUnsupportedTaggedImmediate);
+    }
+  } else if (op->IsStackSlot()) {
+    MemOperand mem_op = ToMemOperand(op);
+    __ ldc1(dbl_scratch, mem_op);
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                   const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand(0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand(0);
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(
+        sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr,
+                           SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Move(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ lw(cp, ToMemOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type,
+                            Register src1, const Operand& src2) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    Register scratch = scratch0();
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    __ Push(a1, scratch);
+    __ li(scratch, Operand(count));
+    __ lw(a1, MemOperand(scratch));
+    __ Subu(a1, a1, Operand(1));
+    __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
+    __ li(a1, Operand(FLAG_deopt_every_n_times));
+    __ sw(a1, MemOperand(scratch));
+    __ Pop(a1, scratch);
+
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ sw(a1, MemOperand(scratch));
+    __ Pop(a1, scratch);
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    Label skip;
+    if (condition != al) {
+      __ Branch(&skip, NegateCondition(condition), src1, src2);
+    }
+    __ stop("trap_on_deopt");
+    __ bind(&skip);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (condition == al && frame_is_built_ &&
+      !info()->saves_caller_doubles()) {
+    DeoptComment(deopt_info);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ Branch(&jump_table_.last().label, condition, src1, src2);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Register src1, const Operand& src2) {
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
+    // Note: The code below even works when right contains kMinInt.
+    __ subu(dividend, zero_reg, dividend);
+    __ And(dividend, dividend, Operand(mask));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                   Operand(zero_reg));
+    }
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ subu(dividend, zero_reg, dividend);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ And(dividend, dividend, Operand(mask));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr);
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ Mul(result, result, Operand(Abs(divisor)));
+  __ Subu(result, dividend, Operand(result));
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+  const Register left_reg = ToRegister(instr->left());
+  const Register right_reg = ToRegister(instr->right());
+  const Register result_reg = ToRegister(instr->result());
+
+  // div runs in the background while we check for special cases.
+  __ Mod(result_reg, left_reg, right_reg);
+
+  Label done;
+  // Check for x % 0, we have to deopt in this case because we can't return a
+  // NaN.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
+                 Operand(zero_reg));
+  }
+
+  // Check for kMinInt % -1, div will return kMinInt, which is not what we
+  // want. We have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
+    } else {
+      __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+      __ Branch(USE_DELAY_SLOT, &done);
+      __ mov(result_reg, zero_reg);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  __ Branch(&done, ge, left_reg, Operand(zero_reg));
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
+                 Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ And(at, dividend, Operand(mask));
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ Subu(result, zero_reg, dividend);
+    return;
+  }
+  uint16_t shift = WhichPowerOf2Abs(divisor);
+  if (shift == 0) {
+    __ Move(result, dividend);
+  } else if (shift == 1) {
+    __ srl(result, dividend, 31);
+    __ Addu(result, dividend, Operand(result));
+  } else {
+    __ sra(result, dividend, 31);
+    __ srl(result, result, 32 - shift);
+    __ Addu(result, dividend, Operand(result));
+  }
+  if (shift > 0) __ sra(result, result, shift);
+  if (divisor < 0) __ Subu(result, zero_reg, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Subu(result, zero_reg, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ Mul(scratch0(), result, Operand(divisor));
+    __ Subu(scratch0(), scratch0(), dividend);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
+                 Operand(zero_reg));
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  const Register result = ToRegister(instr->result());
+  Register remainder = ToRegister(instr->temp());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ Div(remainder, result, dividend, divisor);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+                 Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+                 Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
+                 Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+  DoubleRegister addend = ToDoubleRegister(instr->addend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+  // This is computed in-place.
+  DCHECK(addend.is(ToDoubleRegister(instr->result())));
+
+  __ madd_d(addend, addend, multiplier, multiplicand);
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  Register result = ToRegister(instr->result());
+  int32_t divisor = instr->divisor();
+  Register scratch = result.is(dividend) ? scratch0() : dividend;
+  DCHECK(!result.is(dividend) || !scratch.is(dividend));
+
+  // If the divisor is 1, return the dividend.
+  if (divisor == 1) {
+    __ Move(result, dividend);
+    return;
+  }
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  uint16_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ sra(result, dividend, shift);
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+
+  // dividend can be the same register as result so save the value of it
+  // for checking overflow.
+  __ Move(scratch, dividend);
+
+  __ Subu(result, zero_reg, dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  __ Xor(scratch, scratch, result);
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+                   Operand(zero_reg));
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ sra(result, result, shift);
+    return;
+  }
+
+  Label no_overflow, done;
+  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
+  __ li(result, Operand(kMinInt / divisor));
+  __ Branch(&done);
+  __ bind(&no_overflow);
+  __ sra(result, result, shift);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ Subu(result, zero_reg, result);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(!temp.is(dividend) && !temp.is(result));
+  Label needs_adjustment, done;
+  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+            dividend, Operand(zero_reg));
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Subu(result, zero_reg, result);
+  __ jmp(&done);
+  __ bind(&needs_adjustment);
+  __ Addu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ Subu(result, zero_reg, result);
+  __ Subu(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  const Register result = ToRegister(instr->result());
+  Register remainder = scratch0();
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ Div(remainder, result, dividend, divisor);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+                 Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+                 Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  // We performed a truncating division. Correct the result if necessary.
+  Label done;
+  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+  __ Xor(remainder, remainder, Operand(divisor));
+  __ Branch(&done, ge, remainder, Operand(zero_reg));
+  __ Subu(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+    }
+
+    switch (constant) {
+      case -1:
+        if (overflow) {
+          Label no_overflow;
+          __ SubBranchNoOvf(result, zero_reg, Operand(left), &no_overflow);
+          DeoptimizeIf(al, instr);
+          __ bind(&no_overflow);
+        } else {
+          __ Subu(result, zero_reg, left);
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+                       Operand(zero_reg));
+        }
+        __ mov(result, zero_reg);
+        break;
+      case 1:
+        // Nothing to do.
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ sll(result, left, shift);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0)  __ Subu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ sll(scratch, left, shift);
+          __ Addu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0)  __ Subu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ sll(scratch, left, shift);
+          __ Subu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0)  __ Subu(result, zero_reg, result);
+        } else {
+          // Generate standard code.
+          __ li(at, constant);
+          __ Mul(result, left, at);
+        }
+    }
+
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (overflow) {
+      // hi:lo = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ Mul(scratch, result, result, right);
+      } else {
+        __ Mul(scratch, result, left, right);
+      }
+      __ sra(at, result, 31);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+    } else {
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ Mul(result, result, right);
+      } else {
+        __ Mul(result, left, right);
+      }
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+      __ Xor(at, left, right);
+      __ Branch(&done, ge, at, Operand(zero_reg));
+      // Bail out if the result is minus zero.
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->left();
+  LOperand* right_op = instr->right();
+  DCHECK(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot()) {
+    right = Operand(EmitLoadRegister(right_op, at));
+  } else {
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+        __ Nor(result, zero_reg, left);
+      } else {
+        __ Xor(result, left, right);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  if (right_op->IsRegister()) {
+    // No need to mask the right operand on MIPS, it is built into the variable
+    // shift instructions.
+    switch (instr->op()) {
+      case Token::ROR:
+        __ Ror(result, left, Operand(ToRegister(right_op)));
+        break;
+      case Token::SAR:
+        __ srav(result, left, ToRegister(right_op));
+        break;
+      case Token::SHR:
+        __ srlv(result, left, ToRegister(right_op));
+        if (instr->can_deopt()) {
+          DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
+                       Operand(zero_reg));
+        }
+        break;
+      case Token::SHL:
+        __ sllv(result, left, ToRegister(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count != 0) {
+          __ Ror(result, left, Operand(shift_count));
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sra(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srl(result, left, shift_count);
+        } else {
+          if (instr->can_deopt()) {
+            __ And(at, left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
+                         Operand(zero_reg));
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          if (instr->hydrogen_value()->representation().IsSmi() &&
+              instr->can_deopt()) {
+            if (shift_count != 1) {
+              __ sll(result, left, shift_count - 1);
+              __ SmiTagCheckOverflow(result, result, scratch);
+            } else {
+              __ SmiTagCheckOverflow(result, left, scratch);
+            }
+            DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+                         Operand(zero_reg));
+          } else {
+            __ sll(result, left, shift_count);
+          }
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
+      __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register scratch = scratch0();
+    Label no_overflow_label;
+    if (right->IsStackSlot()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ SubBranchNoOvf(ToRegister(result), ToRegister(left),
+                        Operand(right_reg), &no_overflow_label);
+    } else {
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
+      __ SubBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+                        &no_overflow_label, scratch);
+    }
+    DeoptimizeIf(al, instr);
+    __ bind(&no_overflow_label);
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ li(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+                                           LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+  Register scratch = scratch0();
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ Addu(scratch, string, ToRegister(index));
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    __ sll(scratch, ToRegister(index), 1);
+    __ Addu(scratch, string, scratch);
+  }
+  return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    __ lw(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    __ And(scratch, scratch,
+           Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ Subu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+                                ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ lbu(result, operand);
+  } else {
+    __ lhu(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ sb(value, operand);
+  } else {
+    __ sh(value, operand);
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
+      __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register scratch = scratch1();
+    Label no_overflow_label;
+    if (right->IsStackSlot()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ AddBranchNoOvf(ToRegister(result), ToRegister(left),
+                        Operand(right_reg), &no_overflow_label);
+    } else {
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
+      __ AddBranchNoOvf(ToRegister(result), ToRegister(left), ToOperand(right),
+                        &no_overflow_label, scratch);
+    }
+    DeoptimizeIf(al, instr);
+    __ bind(&no_overflow_label);
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Register left_reg = ToRegister(left);
+    Register right_reg = EmitLoadRegister(right, scratch0());
+    Register result_reg = ToRegister(instr->result());
+    Label return_right, done;
+    Register scratch = scratch1();
+    __ Slt(scratch, left_reg, Operand(right_reg));
+    if (condition == ge) {
+     __  Movz(result_reg, left_reg, scratch);
+     __  Movn(result_reg, right_reg, scratch);
+    } else {
+     DCHECK(condition == le);
+     __  Movn(result_reg, left_reg, scratch);
+     __  Movz(result_reg, right_reg, scratch);
+    }
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    FPURegister left_reg = ToDoubleRegister(left);
+    FPURegister right_reg = ToDoubleRegister(right);
+    FPURegister result_reg = ToDoubleRegister(instr->result());
+    Label check_nan_left, check_zero, return_left, return_right, done;
+    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
+    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
+    __ Branch(&return_right);
+
+    __ bind(&check_zero);
+    // left == right != 0.
+    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      __ neg_d(left_reg, left_reg);
+      __ sub_d(result_reg, left_reg, right_reg);
+      __ neg_d(result_reg, result_reg);
+    } else {
+      __ add_d(result_reg, left_reg, right_reg);
+    }
+    __ Branch(&done);
+
+    __ bind(&check_nan_left);
+    // left == NaN.
+    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
+    __ bind(&return_right);
+    if (!right_reg.is(result_reg)) {
+      __ mov_d(result_reg, right_reg);
+    }
+    __ Branch(&done);
+
+    __ bind(&return_left);
+    if (!left_reg.is(result_reg)) {
+      __ mov_d(result_reg, left_reg);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ add_d(result, left, right);
+      break;
+    case Token::SUB:
+      __ sub_d(result, left, right);
+      break;
+    case Token::MUL:
+      __ mul_d(result, left, right);
+      break;
+    case Token::DIV:
+      __ div_d(result, left, right);
+      break;
+    case Token::MOD: {
+      // Save a0-a3 on the stack.
+      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+      __ MultiPush(saved_regs);
+
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ MovToFloatParameters(left, right);
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()),
+          0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(result);
+
+      // Restore saved register.
+      __ MultiPop(saved_regs);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  // Other arch use a nop here, to signal that there is no inlined
+  // patchable code. Mips does not need the nop, since our marker
+  // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr,
+                          Condition condition,
+                          Register src1,
+                          const Operand& src2) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+  if (right_block == left_block || condition == al) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(right_block),
+              NegateCondition(condition), src1, src2);
+  } else if (right_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+  } else {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchF(InstrType instr,
+                           Condition condition,
+                           FPURegister src1,
+                           FPURegister src2) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+               NegateFpuCondition(condition), src1, src2);
+  } else if (right_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+               condition, src1, src2);
+  } else {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+               condition, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
+                              Register src1, const Operand& src2) {
+  int true_block = instr->TrueDestination(chunk_);
+  __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
+                               Register src1, const Operand& src2) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranchF(InstrType instr,
+                                Condition condition,
+                                FPURegister src1,
+                                FPURegister src2) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
+             condition, src1, src2);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ stop("LDebugBreak");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32() || r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    EmitBranch(instr, ne, reg, Operand(zero_reg));
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    DoubleRegister reg = ToDoubleRegister(instr->value());
+    // Test the double value. Zero and NaN are false.
+    EmitBranchF(instr, ogl, reg, kDoubleRegZero);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ LoadRoot(at, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq, reg, Operand(at));
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, ne, reg, Operand(zero_reg));
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      DoubleRegister dbl_scratch = double_scratch0();
+      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+      EmitBranch(instr, ne, at, Operand(zero_reg));
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ LoadRoot(at, Heap::kTrueValueRootIndex);
+        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
+        __ LoadRoot(at, Heap::kFalseValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ LoadRoot(at, Heap::kNullValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ SmiTst(reg, at);
+        DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ And(at, at, Operand(1 << Map::kIsUndetectable));
+          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_),
+                  ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+        __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
+        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
+        __ Branch(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        const Register scratch = scratch1();
+        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        const Register scratch = scratch1();
+        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_), eq, scratch,
+                  Operand(SIMD128_VALUE_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DoubleRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+        __ Branch(&not_heap_number, ne, map, Operand(at));
+        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                   ne, dbl_scratch, kDoubleRegZero);
+        // Falls through if dbl_scratch == 0.
+        __ Branch(instr->FalseLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
+                     Operand(zero_reg));
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right as doubles and load the
+      // resulting flags into the normal status register.
+      FPURegister left_reg = ToDoubleRegister(left);
+      FPURegister right_reg = ToDoubleRegister(right);
+
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
+                 left_reg, right_reg);
+
+      EmitBranchF(instr, cond, left_reg, right_reg);
+    } else {
+      Register cmp_left;
+      Operand cmp_right = Operand(0);
+
+      if (right->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          cmp_left = ToRegister(left);
+          cmp_right = Operand(Smi::FromInt(value));
+        } else {
+          cmp_left = ToRegister(left);
+          cmp_right = Operand(value);
+        }
+      } else if (left->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+           cmp_left = ToRegister(right);
+           cmp_right = Operand(Smi::FromInt(value));
+        } else {
+          cmp_left = ToRegister(right);
+          cmp_right = Operand(value);
+        }
+        // We commuted the operands, so commute the condition.
+        cond = CommuteCondition(cond);
+      } else {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToRegister(right));
+      }
+
+      EmitBranch(instr, cond, cmp_left, cmp_right);
+    }
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  EmitBranch(instr, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ li(at, Operand(factory()->the_hole_value()));
+    EmitBranch(instr, eq, input_reg, Operand(at));
+    return;
+  }
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->object());
+  EmitFalseBranchF(instr, eq, input_reg, input_reg);
+
+  Register scratch = scratch0();
+  __ FmoveHigh(scratch, input_reg);
+  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
+    __ FmoveHigh(scratch, value);
+    __ li(at, 0x80000000);
+  } else {
+    Register value = ToRegister(instr->value());
+    __ CheckMap(value,
+                scratch,
+                Heap::kHeapNumberMapRootIndex,
+                instr->FalseLabel(chunk()),
+                DO_SMI_CHECK);
+    __ lw(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
+    __ lw(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+    __ mov(at, zero_reg);
+  }
+  EmitBranch(instr, eq, scratch, Operand(at));
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ GetObjectType(input, temp1, temp1);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond, temp1,
+             Operand(FIRST_NONSTRING_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register input_reg = EmitLoadRegister(instr->value(), at);
+  __ And(at, input_reg, kSmiTagMask);
+  EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ lw(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(instr, ne, at, Operand(zero_reg));
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+
+  EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
+             Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ GetObjectType(input, scratch, scratch);
+  EmitBranch(instr,
+             BranchCondition(instr->hydrogen()),
+             scratch,
+             Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ lw(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ lw(scratch,
+         FieldMemOperand(input, String::kHashFieldOffset));
+  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+  EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+  __ GetObjectType(input, temp, temp2);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // Check if the constructor in the map is a function.
+  Register instance_type = scratch1();
+  DCHECK(!instance_type.is(temp));
+  __ GetMapConstructor(temp, temp, temp2, instance_type);
+
+  // Objects with a non-function constructor have class 'Object'.
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
+    __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ lw(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+
+  // End with the address of this class_name instance in temp register.
+  // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temp());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                  class_name, input, temp, temp2);
+
+  EmitBranch(instr, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ lw(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  EmitBranch(instr, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = scratch0();
+  Register const object_instance_type = scratch1();
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ SmiTst(object, at);
+    EmitFalseBranch(instr, eq, at, Operand(zero_reg));
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ lw(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ lbu(object_instance_type,
+         FieldMemOperand(object_map, Map::kBitFieldOffset));
+  __ And(object_instance_type, object_instance_type,
+         Operand(1 << Map::kIsAccessCheckNeeded));
+  DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+               Operand(zero_reg));
+  // Deoptimize for proxies.
+  __ lbu(object_instance_type,
+         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+  DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+               Operand(JS_PROXY_TYPE));
+
+  __ lw(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+  EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
+  __ LoadRoot(at, Heap::kNullValueRootIndex);
+  EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+  __ Branch(USE_DELAY_SLOT, &loop);
+  __ lw(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+  Condition condition = ComputeCompareCondition(op);
+  // A minor optimization that relies on LoadRoot always emitting one
+  // instruction.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+  Label done, check;
+  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+  __ bind(&check);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(v0);
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  if (NeedsEagerFrame()) {
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+  }
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+    if (sp_delta != 0) {
+      __ Addu(sp, sp, Operand(sp_delta));
+    }
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    __ SmiUntag(reg);
+    __ sll(at, reg, kPointerSizeLog2);
+    __ Addu(sp, sp, at);
+  }
+
+  __ Jump(ra);
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(a0));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ li(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ li(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ li(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ li(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+
+  __ lw(result, ContextMemOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+    } else {
+      Label is_not_hole;
+      __ Branch(&is_not_hole, ne, result, Operand(at));
+      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+      __ bind(&is_not_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ lw(scratch, target);
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
+    } else {
+      __ Branch(&skip_assignment, ne, scratch, Operand(at));
+    }
+  }
+
+  __ sw(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              GetRAState(),
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = MemOperand(object, offset);
+    __ Load(result, operand, access.representation());
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ ldc1(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+  MemOperand operand = FieldMemOperand(object, offset);
+  __ Load(result, operand, access.representation());
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Name is always in a2.
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ lw(result,
+         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ GetObjectType(result, scratch, scratch);
+  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them add one more.
+  if (instr->length()->IsConstantOperand()) {
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (instr->index()->IsConstantOperand()) {
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int index = (const_length - const_index) + 1;
+      __ lw(result, MemOperand(arguments, index * kPointerSize));
+    } else {
+      Register index = ToRegister(instr->index());
+      __ li(at, Operand(const_length + 1));
+      __ Subu(result, at, index);
+      __ sll(at, result, kPointerSizeLog2);
+      __ Addu(at, arguments, at);
+      __ lw(result, MemOperand(at));
+    }
+  } else if (instr->index()->IsConstantOperand()) {
+    Register length = ToRegister(instr->length());
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int loc = const_index - 1;
+    if (loc != 0) {
+      __ Subu(result, length, Operand(loc));
+      __ sll(at, result, kPointerSizeLog2);
+      __ Addu(at, arguments, at);
+      __ lw(result, MemOperand(at));
+    } else {
+      __ sll(at, length, kPointerSizeLog2);
+      __ Addu(at, arguments, at);
+      __ lw(result, MemOperand(at));
+    }
+  } else {
+    Register length = ToRegister(instr->length());
+    Register index = ToRegister(instr->index());
+    __ Subu(result, length, index);
+    __ Addu(result, result, 1);
+    __ sll(at, result, kPointerSizeLog2);
+    __ Addu(at, arguments, at);
+    __ lw(result, MemOperand(at));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - kSmiTagSize) : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    FPURegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
+    } else {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), scratch0(), external_pointer);
+    }
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ lwc1(result, MemOperand(scratch0(), base_offset));
+      __ cvt_d_s(result, result);
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ ldc1(result, MemOperand(scratch0(), base_offset));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size, base_offset);
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ lb(result, mem_operand);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ lbu(result, mem_operand);
+        break;
+      case INT16_ELEMENTS:
+        __ lh(result, mem_operand);
+        break;
+      case UINT16_ELEMENTS:
+        __ lhu(result, mem_operand);
+        break;
+      case INT32_ELEMENTS:
+        __ lw(result, mem_operand);
+        break;
+      case UINT32_ELEMENTS:
+        __ lw(result, mem_operand);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
+                       result, Operand(0x80000000));
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+  int base_offset = instr->base_offset();
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    base_offset += constant_key * kDoubleSize;
+  }
+  __ Addu(scratch, elements, Operand(base_offset));
+
+  if (!key_is_constant) {
+    key = ToRegister(instr->key());
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - kSmiTagSize) : element_size_shift;
+    __ sll(at, key, shift_size);
+    __ Addu(scratch, scratch, at);
+  }
+
+  __ ldc1(result, MemOperand(scratch));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
+    DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
+                 Operand(kHoleNanUpper32));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    Register key = ToRegister(instr->key());
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+      __ addu(scratch, elements, scratch);
+    } else {
+      __ sll(scratch, key, kPointerSizeLog2);
+      __ addu(scratch, elements, scratch);
+    }
+  }
+  __ lw(result, MemOperand(store_base, offset));
+
+  // Check for the hole value.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+      __ SmiTst(result, scratch);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
+                   Operand(zero_reg));
+    } else {
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ Branch(&done, ne, result, Operand(scratch));
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+      __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
+      DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
+                   Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+    }
+    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+                                         Register base,
+                                         bool key_is_constant,
+                                         int constant_key,
+                                         int element_size,
+                                         int shift_size,
+                                         int base_offset) {
+  if (key_is_constant) {
+    return MemOperand(base, (constant_key << element_size) + base_offset);
+  }
+
+  if (base_offset == 0) {
+    if (shift_size >= 0) {
+      __ sll(scratch0(), key, shift_size);
+      __ Addu(scratch0(), base, scratch0());
+      return MemOperand(scratch0());
+    } else {
+      DCHECK_EQ(-1, shift_size);
+      __ srl(scratch0(), key, 1);
+      __ Addu(scratch0(), base, scratch0());
+      return MemOperand(scratch0());
+    }
+  }
+
+  if (shift_size >= 0) {
+    __ sll(scratch0(), key, shift_size);
+    __ Addu(scratch0(), base, scratch0());
+    return MemOperand(scratch0(), base_offset);
+  } else {
+    DCHECK_EQ(-1, shift_size);
+    __ sra(scratch0(), key, 1);
+    __ Addu(scratch0(), base, scratch0());
+    return MemOperand(scratch0(), base_offset);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register temp = scratch1();
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ Subu(result, sp, 2 * kPointerSize);
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ lw(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ lw(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
+    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ Addu(result, zero_reg, Operand(scope()->num_parameters()));
+  __ Branch(&done, eq, fp, Operand(elem));
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ lw(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ lw(result,
+        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, result_in_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions.
+    __ lw(scratch,
+           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ lw(scratch,
+           FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+
+    // Do not transform the receiver to object for builtins.
+    int32_t strict_mode_function_mask =
+        1 <<  (SharedFunctionInfo::kStrictModeFunction + kSmiTagSize);
+    int32_t native_mask = 1 << (SharedFunctionInfo::kNative + kSmiTagSize);
+    __ And(scratch, scratch, Operand(strict_mode_function_mask | native_mask));
+    __ Branch(&result_in_receiver, ne, scratch, Operand(zero_reg));
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+  // Deoptimize if the receiver is not a JS object.
+  __ SmiTst(receiver, scratch);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
+
+  __ GetObjectType(receiver, scratch, scratch);
+  DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
+               Operand(FIRST_JS_RECEIVER_TYPE));
+
+  __ Branch(&result_in_receiver);
+  __ bind(&global_object);
+  __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ lw(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+  __ lw(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
+  if (result.is(receiver)) {
+    __ bind(&result_in_receiver);
+  } else {
+    Label result_ok;
+    __ Branch(&result_ok);
+    __ bind(&result_in_receiver);
+    __ mov(result, receiver);
+    __ bind(&result_ok);
+  }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DCHECK(receiver.is(a0));  // Used for parameter count.
+  DCHECK(function.is(a1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
+               Operand(kArgumentsLimit));
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ Move(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ Addu(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+  __ bind(&loop);
+  __ Addu(scratch, elements, scratch);
+  __ lw(scratch, MemOperand(scratch));
+  __ push(scratch);
+  __ Subu(length, length, Operand(1));
+  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+  __ sll(scratch, length, 2);
+
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is a0, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort(kDoPushArgumentNotImplementedForDoubleType);
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, at);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ lw(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  __ li(scratch0(), instr->hydrogen()->pairs());
+  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+  __ Push(scratch0(), scratch1());
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = a1;
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ lw(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+    __ li(a0, Operand(arity));
+
+    // Invoke function.
+    __ lw(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+    __ Call(at);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ Move(result, input);
+  __ And(at, exponent, Operand(HeapNumber::kSignMask));
+  __ Branch(&done, eq, at, Operand(zero_reg));
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(a1) ? a0 : a1;
+    Register tmp2 = input.is(a2) ? a0 : a2;
+    Register tmp3 = input.is(a3) ? a0 : a3;
+    Register tmp4 = input.is(t0) ? a0 : t0;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ Branch(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(v0))
+      __ mov(tmp1, v0);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lw(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+  __ mov(result, input);
+  __ subu(result, zero_reg, input);
+  // Overflow if result is still negative, i.e. 0x80000000.
+  DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    FPURegister input = ToDoubleRegister(instr->value());
+    FPURegister result = ToDoubleRegister(instr->result());
+    __ abs_d(result, input);
+  } else if (r.IsSmiOrInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  Register except_flag = ToRegister(instr->temp());
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     result,
+                     input,
+                     scratch1,
+                     double_scratch0(),
+                     except_flag);
+
+  // Deopt if the operation did not succeed.
+  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+               Operand(zero_reg));
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    Label done;
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ Mfhc1(scratch1, input);
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                 Operand(zero_reg));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+  Register scratch = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ Mfhc1(result, input);
+  __ Ext(scratch,
+         result,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  Label skip1;
+  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, zero_reg);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Branch(&check_sign_on_zero);
+  } else {
+    __ Branch(&done);
+  }
+  __ bind(&skip1);
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+               Operand(HeapNumber::kExponentBias + 32));
+
+  // Save the original sign for later comparison.
+  __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+  __ Move(double_scratch0(), 0.5);
+  __ add_d(double_scratch0(), input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ Mfhc1(result, double_scratch0());
+  __ Xor(result, result, Operand(scratch));
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // ARM uses 'mi' here, which is 'lt'
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+  } else {
+    Label skip2;
+    // ARM uses 'mi' here, which is 'lt'
+    // Negating it results in 'ge'
+    __ Branch(&skip2, ge, result, Operand(zero_reg));
+    __ mov(result, zero_reg);
+    __ Branch(&done);
+    __ bind(&skip2);
+  }
+
+  Register except_flag = scratch;
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     result,
+                     double_scratch0(),
+                     at,
+                     double_scratch1,
+                     except_flag);
+
+  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+               Operand(zero_reg));
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ bind(&check_sign_on_zero);
+    __ Mfhc1(scratch, input);
+    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
+                 Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ cvt_s_d(result.low(), input);
+  __ cvt_d_s(result, result.low());
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = ToDoubleRegister(instr->temp());
+
+  DCHECK(!input.is(result));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ Move(temp, static_cast<double>(-V8_INFINITY));
+  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+  // Set up Infinity in the delay slot.
+  // result is overwritten if the branch is not taken.
+  __ neg_d(result, temp);
+
+  // Add +0 to convert -0 to +0.
+  __ add_d(result, input, kDoubleRegZero);
+  __ sqrt_d(result, result);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(f4));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(f2));
+  DCHECK(ToDoubleRegister(instr->result()).is(f0));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!t3.is(tagged_exponent));
+    __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, t3, Operand(at));
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DoubleRegister double_scratch2 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(
+      masm(), input, result, double_scratch1, double_scratch2,
+      temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+                   0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ Clz(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Jump(target);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Call(target);
+    }
+    generator.AfterCall();
+  }
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Change context.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  __ li(a0, Operand(instr->arity()));
+
+  // Load the code entry address
+  __ lw(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Call(at);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(a3));
+    DCHECK(vector_register.is(a2));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ li(vector_register, vector);
+    __ li(slot_register, Operand(Smi::FromInt(index)));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ li(a0, Operand(arity));
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(a0, Operand(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ li(a2, instr->hydrogen()->site());
+  } else {
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  }
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here,
+      // look at the first argument.
+      __ lw(t1, MemOperand(sp, 0));
+      __ Branch(&packed_case, eq, t1, Operand(zero_reg));
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ jmp(&done);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ Addu(code_object, code_object,
+          Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sw(code_object,
+        FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ Addu(result, base, Operand(ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ Addu(result, base, offset);
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  Register scratch = scratch0();
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register value = ToRegister(instr->value());
+    MemOperand operand = MemOperand(object, offset);
+    __ Store(value, operand, representation);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+  if (representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ sdc1(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  if (instr->hydrogen()->has_transition()) {
+    Handle<Map> transition = instr->hydrogen()->transition_map();
+    AddDeprecationDependency(transition);
+    __ li(scratch, Operand(transition));
+    __ sw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object,
+                           scratch,
+                           temp,
+                           GetRAState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register value = ToRegister(instr->value());
+  if (access.IsInobject()) {
+    MemOperand operand = FieldMemOperand(object, offset);
+    __ Store(value, operand, representation);
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWriteField(object,
+                          offset,
+                          value,
+                          scratch,
+                          GetRAState(),
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          instr->hydrogen()->SmiCheckForWriteBarrier(),
+                          instr->hydrogen()->PointersToHereCheckForValue());
+    }
+  } else {
+    __ lw(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    MemOperand operand = FieldMemOperand(scratch, offset);
+    __ Store(value, operand, representation);
+    if (instr->hydrogen()->NeedsWriteBarrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWriteField(scratch,
+                          offset,
+                          value,
+                          object,
+                          GetRAState(),
+                          kSaveFPRegs,
+                          EMIT_REMEMBERED_SET,
+                          instr->hydrogen()->SmiCheckForWriteBarrier(),
+                          instr->hydrogen()->PointersToHereCheckForValue());
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+  Operand operand(0);
+  Register reg;
+  if (instr->index()->IsConstantOperand()) {
+    operand = ToOperand(instr->index());
+    reg = ToRegister(instr->length());
+    cc = CommuteCondition(cc);
+  } else {
+    reg = ToRegister(instr->index());
+    operand = ToOperand(instr->length());
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ Branch(&done, NegateCondition(cc), reg, operand);
+    __ stop("eliminated bounds check failed");
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - kSmiTagSize) : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    Register address = scratch0();
+    FPURegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      if (constant_key != 0) {
+        __ Addu(address, external_pointer,
+                Operand(constant_key << element_size_shift));
+      } else {
+        address = external_pointer;
+      }
+    } else {
+      __ sll(address, key, shift_size);
+      __ Addu(address, external_pointer, address);
+    }
+
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ cvt_s_d(double_scratch0(), value);
+      __ swc1(double_scratch0(), MemOperand(address, base_offset));
+    } else {  // Storing doubles, not floats.
+      __ sdc1(value, MemOperand(address, base_offset));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size,
+        base_offset);
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        __ sb(value, mem_operand);
+        break;
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        __ sh(value, mem_operand);
+        break;
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        __ sw(value, mem_operand);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  Register scratch_1 = scratch1();
+  DoubleRegister double_scratch = double_scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int base_offset = instr->base_offset();
+  Label not_nan, done;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    __ Addu(scratch, elements,
+           Operand((constant_key << element_size_shift) + base_offset));
+  } else {
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - kSmiTagSize) : element_size_shift;
+    __ Addu(scratch, elements, Operand(base_offset));
+    __ sll(at, ToRegister(instr->key()), shift_size);
+    __ Addu(scratch, scratch, at);
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    Label is_nan;
+    // Check for NaN. All NaNs must be canonicalized.
+    __ BranchF(NULL, &is_nan, eq, value, value);
+    __ Branch(&not_nan);
+
+    // Only load canonical NaN if the comparison above set the overflow.
+    __ bind(&is_nan);
+    __ LoadRoot(scratch_1, Heap::kNanValueRootIndex);
+    __ ldc1(double_scratch,
+            FieldMemOperand(scratch_1, HeapNumber::kValueOffset));
+    __ sdc1(double_scratch, MemOperand(scratch, 0));
+    __ Branch(&done);
+  }
+
+  __ bind(&not_nan);
+  __ sdc1(value, MemOperand(scratch, 0));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+      : no_reg;
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+      __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+      __ addu(scratch, elements, scratch);
+    } else {
+      __ sll(scratch, key, kPointerSizeLog2);
+      __ addu(scratch, elements, scratch);
+    }
+  }
+  __ sw(value, MemOperand(store_base, offset));
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Addu(key, store_base, Operand(offset));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   GetRAState(),
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed,
+                   instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases: external, fast double
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = v0;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ jmp(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ Branch(deferred->entry(), le, ToRegister(current_capacity),
+              Operand(constant_key));
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ Branch(deferred->entry(), ge, ToRegister(key),
+              Operand(constant_capacity));
+  } else {
+    __ Branch(deferred->entry(), ge, ToRegister(key),
+              Operand(ToRegister(current_capacity)));
+  }
+
+  if (instr->elements()->IsRegister()) {
+    __ mov(result, ToRegister(instr->elements()));
+  } else {
+    __ lw(result, ToMemOperand(instr->elements()));
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = v0;
+  __ mov(result, zero_reg);
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsRegister()) {
+      __ mov(result, ToRegister(instr->object()));
+    } else {
+      __ lw(result, ToMemOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+    } else {
+      __ mov(a3, ToRegister(key));
+      __ SmiTag(a3);
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ mov(a0, result);
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  __ SmiTst(result, at);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ lw(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ li(new_map_reg, Operand(to_map));
+    __ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteForMap(object_reg,
+                         new_map_reg,
+                         scratch,
+                         GetRAState(),
+                         kDontSaveFPRegs);
+  } else {
+    DCHECK(object_reg.is(a0));
+    DCHECK(ToRegister(instr->context()).is(cp));
+    PushSafepointRegistersScope scope(this);
+    __ li(a1, Operand(to_map));
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
+                                     ne, &no_memento_found);
+  DeoptimizeIf(al, instr);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ Addu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(v0);
+  __ SmiUntag(v0);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  DCHECK(!char_code.is(result));
+
+  __ Branch(deferred->entry(), hi,
+            char_code, Operand(String::kMaxOneByteCharCode));
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ sll(scratch, char_code, kPointerSizeLog2);
+  __ Addu(result, result, scratch);
+  __ lw(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(deferred->entry(), eq, result, Operand(scratch));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  FPURegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ lw(scratch, ToMemOperand(input));
+    __ mtc1(scratch, single_scratch);
+  } else {
+    __ mtc1(ToRegister(input), single_scratch);
+  }
+  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+
+  __ Cvt_d_uw(ToDoubleRegister(output), ToRegister(input), f22);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI final : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_,
+                                       instr_->value(),
+                                       instr_->temp1(),
+                                       instr_->temp2(),
+                                       SIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  Register src = ToRegister(instr->value());
+  Register dst = ToRegister(instr->result());
+  Register overflow = scratch0();
+
+  DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+  __ SmiTagCheckOverflow(dst, src, overflow);
+  __ BranchOnOverflow(deferred->entry(), overflow);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_,
+                                       instr_->value(),
+                                       instr_->temp1(),
+                                       instr_->temp2(),
+                                       UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
+  __ SmiTag(result, input);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+                                     LOperand* value,
+                                     LOperand* temp1,
+                                     LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register src = ToRegister(value);
+  Register dst = ToRegister(instr->result());
+  Register tmp1 = scratch0();
+  Register tmp2 = ToRegister(temp1);
+  Register tmp3 = ToRegister(temp2);
+  DoubleRegister dbl_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    if (dst.is(src)) {
+      __ SmiUntag(src, dst);
+      __ Xor(src, src, Operand(0x80000000));
+    }
+    __ mtc1(src, dbl_scratch);
+    __ cvt_d_w(dbl_scratch, dbl_scratch);
+  } else {
+    __ Cvt_d_uw(dbl_scratch, src, f22);
+  }
+
+  if (FLAG_inline_new) {
+    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, DONT_TAG_RESULT);
+    __ Branch(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ mov(dst, zero_reg);
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ Subu(v0, v0, kHeapObjectTag);
+    __ StoreToSafepointRegisterSlot(v0, dst);
+  }
+
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
+  __ Addu(dst, dst, kHeapObjectTag);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    // We want the untagged address first for performance
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+                          DONT_TAG_RESULT);
+  } else {
+    __ Branch(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+  // Now that we have finished with the object's real address tag it
+  __ Addu(reg, reg, kHeapObjectTag);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+  __ Subu(v0, v0, kHeapObjectTag);
+  __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ And(at, input, Operand(0xc0000000));
+    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+  }
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ SmiTagCheckOverflow(output, input, at);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+  } else {
+    __ SmiTag(output, input);
+  }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ And(scratch, input, Operand(kHeapObjectTag));
+    __ SmiUntag(result, input);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                DoubleRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Register scratch = scratch0();
+  Label convert, load_smi, done;
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+    // Heap number map check.
+    __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    if (can_convert_undefined_to_nan) {
+      __ Branch(&convert, ne, scratch, Operand(at));
+    } else {
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
+                   Operand(at));
+    }
+    // Load heap number.
+    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+      __ mfc1(at, result_reg.low());
+      __ Branch(&done, ne, at, Operand(zero_reg));
+      __ Mfhc1(scratch, result_reg);
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
+                   Operand(HeapNumber::kSignMask));
+    }
+    __ Branch(&done);
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+      // Convert undefined (and hole) to NaN.
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+                   Operand(at));
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ Branch(&done);
+    }
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ mtc1(scratch, result_reg);
+  __ cvt_d_w(result_reg, result_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->value());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->temp());
+  DoubleRegister double_scratch = double_scratch0();
+  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // The input is a tagged HeapObject.
+  // Heap number map check.
+  __ lw(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  // This 'at' value and scratch1 map value are used for tests in both clauses
+  // of the if.
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label no_heap_number, check_bools, check_false;
+    // Check HeapNumber map.
+    __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+    __ mov(scratch2, input_reg);  // In delay slot.
+    __ TruncateHeapNumberToI(input_reg, scratch2);
+    __ Branch(&done);
+
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ bind(&no_heap_number);
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&check_bools, ne, input_reg, Operand(at));
+    DCHECK(ToRegister(instr->result()).is(input_reg));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ mov(input_reg, zero_reg);  // In delay slot.
+
+    __ bind(&check_bools);
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(&check_false, ne, scratch2, Operand(at));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ li(input_reg, Operand(1));  // In delay slot.
+
+    __ bind(&check_false);
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
+                 scratch2, Operand(at));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ mov(input_reg, zero_reg);  // In delay slot.
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
+                 Operand(at));
+
+    // Load the double value.
+    __ ldc1(double_scratch,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       input_reg,
+                       double_scratch,
+                       scratch1,
+                       double_scratch2,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+                 Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+      __ Mfhc1(scratch1, double_scratch);
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                   Operand(zero_reg));
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+    // Let the deferred code handle the HeapObject case.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+
+    // Smi to int32 conversion.
+    __ SmiUntag(input_reg);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    Register except_flag = LCodeGen::scratch1();
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       result_reg,
+                       double_input,
+                       scratch1,
+                       double_scratch0(),
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+                 Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ Branch(&done, ne, result_reg, Operand(zero_reg));
+      __ Mfhc1(scratch1, double_input);
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = LCodeGen::scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    Register except_flag = LCodeGen::scratch1();
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       result_reg,
+                       double_input,
+                       scratch1,
+                       double_scratch0(),
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+                 Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ Branch(&done, ne, result_reg, Operand(zero_reg));
+      __ Mfhc1(scratch1, double_input);
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+  __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
+  DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch1, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ SmiTst(ToRegister(input), at);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ SmiTst(ToRegister(input), at);
+    DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+  Register scratch = scratch0();
+
+  __ lw(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+  __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+  __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
+  DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ GetObjectType(input, scratch, scratch);
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+                   Operand(first));
+    } else {
+      DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
+                   Operand(first));
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
+                     Operand(last));
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ And(at, scratch, mask);
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
+                   at, Operand(zero_reg));
+    } else {
+      __ And(scratch, scratch, Operand(mask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+                   Operand(tag));
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ li(at, Operand(cell));
+    __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
+    DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
+  }
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ mov(cp, zero_reg);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(v0, scratch0());
+  }
+  __ SmiTst(scratch0(), at);
+  DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
+               Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  Register map_reg = scratch0();
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
+  }
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
+
+  // Check for heap number
+  __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+               Operand(factory()->undefined_value()));
+  __ mov(result_reg, zero_reg);
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, scratch);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ FmoveHigh(result_reg, value_reg);
+  } else {
+    __ FmoveLow(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ li(scratch, Operand(size - kHeapObjectTag));
+    } else {
+      __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+    }
+    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    Label loop;
+    __ bind(&loop);
+    __ Subu(scratch, scratch, Operand(kPointerSize));
+    __ Addu(at, result, Operand(scratch));
+    __ sw(scratch2, MemOperand(at));
+    __ Branch(&loop, ge, scratch, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(size);
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size >= 0 && size <= Smi::kMaxValue) {
+      __ Push(Smi::FromInt(size));
+    } else {
+      // We should never get here at runtime => abort
+      __ stop("invalid allocation size");
+      return;
+    }
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  __ push(a0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->value()).is(a3));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ li(v0, Operand(isolate()->factory()->number_string()));
+  __ jmp(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  Register cmp1 = no_reg;
+  Operand cmp2 = Operand(no_reg);
+
+  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+                                                  instr->FalseLabel(chunk_),
+                                                  input,
+                                                  instr->type_literal(),
+                                                  &cmp1,
+                                                  &cmp2);
+
+  DCHECK(cmp1.is_valid());
+  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name,
+                                 Register* cmp1,
+                                 Operand* cmp2) {
+  // This function utilizes the delay slot heavily. This is used to load
+  // values that are always usable without depending on the type of the input
+  // register.
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label);
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    *cmp1 = input;
+    *cmp2 = Operand(at);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    *cmp1 = scratch;
+    *cmp2 = Operand(FIRST_NONSTRING_TYPE);
+    final_branch_condition = lt;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    *cmp1 = scratch;
+    *cmp2 = Operand(SYMBOL_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    *cmp1 = at;
+    *cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    // The first instruction of JumpIfSmi is an And - it is safe in the delay
+    // slot.
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);
+    final_branch_condition = ne;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(scratch, scratch,
+           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    *cmp1 = scratch;
+    *cmp2 = Operand(1 << Map::kIsCallable);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ GetObjectType(input, scratch, scratch1());
+    __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
+    // Check for callable or undetectable objects => false.
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(at, scratch,
+           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
+  } else if (String::Equals(type_name, factory->type##_string())) {  \
+    __ JumpIfSmi(input, false_label);                                \
+    __ lw(input, FieldMemOperand(input, HeapObject::kMapOffset));    \
+    __ LoadRoot(at, Heap::k##Type##MapRootIndex);                    \
+    *cmp1 = input;                                                   \
+    *cmp2 = Operand(at);                                             \
+    final_branch_condition = eq;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
+    __ Branch(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= Assembler::kInstrSize;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+
+  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
+               Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&done, hs, sp, Operand(at));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(isolate()->builtins()->StackCheck(),
+             RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr);
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  Register result = ToRegister(instr->result());
+  Register object = ToRegister(instr->object());
+  __ And(at, object, kSmiTagMask);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ GetObjectType(object, a1, a1);
+  DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
+               Operand(JS_PROXY_TYPE));
+
+  Label use_cache, call_runtime;
+  DCHECK(object.is(a0));
+  Register null_value = t1;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(object);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  DCHECK(result.is(v0));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ jmp(&done);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ lw(result,
+        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ lw(result,
+        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object, index);
+  __ mov(cp, zero_reg);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+     instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ And(scratch, index, Operand(Smi::FromInt(1)));
+  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
+  __ sra(index, index, 1);
+
+  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
+  __ sll(scratch, index, kPointerSizeLog2 - kSmiTagSize);  // In delay slot.
+
+  STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
+  __ Addu(scratch, object, scratch);
+  __ lw(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ Branch(&done);
+
+  __ bind(&out_of_object);
+  __ lw(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ Subu(scratch, result, scratch);
+  __ lw(result, FieldMemOperand(scratch,
+                                FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ sw(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ li(at, scope_info);
+  __ Push(at, ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/mips/lithium-codegen-mips.h b/src/crankshaft/mips/lithium-codegen-mips.h
new file mode 100644
index 0000000..160ab9a
--- /dev/null
+++ b/src/crankshaft/mips/lithium-codegen-mips.h
@@ -0,0 +1,422 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
+#include "src/crankshaft/mips/lithium-mips.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  RAStatus GetRAState() const {
+    return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
+  }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        FloatRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr,
+                             LOperand* value,
+                             LOperand* temp1,
+                             LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  MemOperand PrepareKeyedOperand(Register key,
+                                 Register base,
+                                 bool key_is_constant,
+                                 int constant_key,
+                                 int element_size,
+                                 int shift_size,
+                                 int base_offset);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  Register scratch0() { return kLithiumScratchReg; }
+  Register scratch1() { return kLithiumScratchReg2; }
+  DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
+
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in a1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type,
+                    Register src1 = zero_reg,
+                    const Operand& src2 = Operand(zero_reg));
+  void DeoptimizeIf(
+      Condition condition, LInstruction* instr,
+      Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
+      Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  MemOperand BuildSeqStringOperand(Register string,
+                                   LOperand* index,
+                                   String::Encoding encoding);
+
+  void EmitIntegerMathAbs(LMathAbs* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template<class InstrType>
+  void EmitBranch(InstrType instr,
+                  Condition condition,
+                  Register src1,
+                  const Operand& src2);
+  template<class InstrType>
+  void EmitBranchF(InstrType instr,
+                   Condition condition,
+                   FPURegister src1,
+                   FPURegister src2);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
+                      const Operand& src2);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
+                       const Operand& src2);
+  template<class InstrType>
+  void EmitFalseBranchF(InstrType instr,
+                        Condition condition,
+                        FPURegister src1,
+                        FPURegister src2);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DoubleRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  // Returns two registers in cmp1 and cmp2 that can be used in the
+  // Branch instruction after EmitTypeofIs.
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name,
+                         Register* cmp1,
+                         Operand* cmp2);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset,
+                    AllocationSiteMode mode);
+  // Emit optimized code for integer division.
+  // Inputs are signed.
+  // All registers are clobbered.
+  // If 'remainder' is no_reg, it is not computed.
+  void EmitSignedIntegerDivisionByConstant(Register result,
+                                           Register dividend,
+                                           int32_t divisor,
+                                           Register remainder,
+                                           Register scratch,
+                                           LEnvironment* environment);
+
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/src/crankshaft/mips/lithium-gap-resolver-mips.cc b/src/crankshaft/mips/lithium-gap-resolver-mips.cc
new file mode 100644
index 0000000..e25a32d
--- /dev/null
+++ b/src/crankshaft/mips/lithium-gap-resolver-mips.cc
@@ -0,0 +1,299 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/mips/lithium-gap-resolver-mips.h"
+
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
+  if (saved_destination_->IsRegister()) {
+    __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+            kLithiumScratchDouble);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ sdc1(kLithiumScratchDouble,
+            cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ sw(source_register, cgen_->ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ lw(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        if (!destination_operand.OffsetIsInt16Encodable()) {
+          // 'at' is overwritten while saving the value to the destination.
+          // Therefore we can't use 'at'.  It is OK if the read from the source
+          // destroys 'at', since that happens before the value is read.
+          // This uses only a single reg of the double reg-pair.
+          __ lwc1(kLithiumScratchDouble, source_operand);
+          __ swc1(kLithiumScratchDouble, destination_operand);
+        } else {
+          __ lw(at, source_operand);
+          __ sw(at, destination_operand);
+        }
+      } else {
+        __ lw(kLithiumScratchReg, source_operand);
+        __ sw(kLithiumScratchReg, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ li(dst, Operand(cgen_->ToRepresentation(constant_source, r)));
+      } else {
+        __ li(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DoubleRegister result = cgen_->ToDoubleRegister(destination);
+      double v = cgen_->ToDouble(constant_source);
+      __ Move(result, v);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ li(kLithiumScratchReg,
+              Operand(cgen_->ToRepresentation(constant_source, r)));
+      } else {
+        __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
+      }
+      __ sw(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ sdc1(source_register, destination_operand);
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        // kLithiumScratchDouble was used to break the cycle,
+        // but kLithiumScratchReg is free.
+        MemOperand source_high_operand =
+            cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lw(kLithiumScratchReg, source_operand);
+        __ sw(kLithiumScratchReg, destination_operand);
+        __ lw(kLithiumScratchReg, source_high_operand);
+        __ sw(kLithiumScratchReg, destination_high_operand);
+      } else {
+        __ ldc1(kLithiumScratchDouble, source_operand);
+        __ sdc1(kLithiumScratchDouble, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/mips/lithium-gap-resolver-mips.h b/src/crankshaft/mips/lithium-gap-resolver-mips.h
new file mode 100644
index 0000000..6c5fd03
--- /dev/null
+++ b/src/crankshaft/mips/lithium-gap-resolver-mips.h
@@ -0,0 +1,59 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/crankshaft/mips/lithium-mips.cc b/src/crankshaft/mips/lithium-mips.cc
new file mode 100644
index 0000000..a9978e1
--- /dev/null
+++ b/src/crankshaft/mips/lithium-mips.cc
@@ -0,0 +1,2586 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/mips/lithium-mips.h"
+
+#include <sstream>
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/mips/lithium-codegen-mips.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "sll-t";
+    case Token::SAR: return "sra-t";
+    case Token::SHR: return "srl-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind)  {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateResultInstruction<1>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator,
+                                           &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseRegisterAtStart(right_value);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift
+    // by 0 and the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), f2);
+    LOperand* right = UseFixedDouble(instr->right(), f4);
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    // We call a C function for double modulo. It can't trigger a GC. We need
+    // to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    return MarkAsCall(DefineFixedDouble(result, f2), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return DefineAsRegister(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, a1);
+  LOperand* right_operand = UseFixed(right, a0);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+// Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(
+      new(zone()) LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* receiver = UseFixed(instr->receiver(), a0);
+  LOperand* length = UseFixed(instr->length(), a2);
+  LOperand* elements = UseFixed(instr->elements(), a3);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = Use(instr->argument(i));
+    AddInstruction(new(zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), cp);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+      descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), a1);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new(zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* double_temp = TempDoubleRegister();
+  LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
+  LOperand* input = UseFixedDouble(instr->value(), f8);
+  LOperand* temp = TempDoubleRegister();
+  LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+  return DefineFixedDouble(result, f4);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  Representation r = instr->value()->representation();
+  LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+      ? NULL
+      : UseFixed(instr->context(), cp);
+  LOperand* input = UseRegister(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LMathAbs(context, input));
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempRegister();
+  LMathFloor* result = new(zone()) LMathFloor(input, temp);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathSqrt* result = new(zone()) LMathSqrt(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempDoubleRegister();
+  LMathRound* result = new(zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), a1);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(a3);
+    vector = FixedTemp(a2);
+  }
+
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineAsRegister(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+          dividend, divisor));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+      (!instr->IsMathFloorOfDiv() &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+          dividend, divisor));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result = DefineAsRegister(new(zone()) LModI(
+      dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    HValue* left = instr->BetterLeftOperand();
+    HValue* right = instr->BetterRightOperand();
+    LOperand* left_op;
+    LOperand* right_op;
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    int32_t constant_value = 0;
+    if (right->IsConstant()) {
+      HConstant* constant = HConstant::cast(right);
+      constant_value = constant->Integer32Value();
+      // Constants -1, 0 and 1 can be optimized if the result can overflow.
+      // For other constants, it can be optimized only without overflow.
+      if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+        left_op = UseRegisterAtStart(left);
+        right_op = UseConstant(right);
+      } else {
+        if (bailout_on_minus_zero) {
+          left_op = UseRegister(left);
+        } else {
+          left_op = UseRegisterAtStart(left);
+        }
+        right_op = UseRegister(right);
+      }
+    } else {
+      if (bailout_on_minus_zero) {
+        left_op = UseRegister(left);
+      } else {
+        left_op = UseRegisterAtStart(left);
+      }
+      right_op = UseRegister(right);
+    }
+    LMulI* mul = new(zone()) LMulI(left_op, right_op);
+    if (right_op->IsConstantOperand()
+            ? ((can_overflow && constant_value == -1) ||
+               (bailout_on_minus_zero && constant_value <= 0))
+            : (can_overflow || bailout_on_minus_zero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
+
+  } else if (instr->representation().IsDouble()) {
+    if (IsMipsArchVariant(kMips32r2)) {
+      if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
+        HAdd* add = HAdd::cast(instr->uses().value());
+        if (instr == add->left()) {
+          // This mul is the lhs of an add. The add and mul will be folded
+          // into a multiply-add.
+          return NULL;
+        }
+        if (instr == add->right() && !add->left()->IsMul()) {
+          // This mul is the rhs of an add, where the lhs is not another mul.
+          // The add and mul will be folded into a multiply-add.
+          return NULL;
+        }
+      }
+    }
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new(zone()) LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+  LOperand* addend_op = UseRegisterAtStart(addend);
+  return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+                                                     multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    if (IsMipsArchVariant(kMips32r2)) {
+      if (instr->left()->IsMul())
+        return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+      if (instr->right()->IsMul()) {
+        DCHECK(!instr->left()->IsMul());
+        return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+      }
+    }
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), f2);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), f4)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, f0),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
+                                        temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegisterAtStart(instr->index())
+      : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  return new(zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+  ? UseRegisterOrConstantAtStart(instr->length())
+  : UseRegisterAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempDoubleRegister();
+        LInstruction* result =
+            DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    // Revisit this decision, here and 8 lines below.
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg,
+        TempDoubleRegister()));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LClampTToUint8* result =
+        new(zone()) LClampTToUint8(reg, TempDoubleRegister());
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub()
+      ? UseFixed(instr->context(), cp)
+      : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
+                             parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new(zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = NULL;
+    if (instr->representation().IsDouble()) {
+      obj = UseRegister(instr->elements());
+    } else {
+      DCHECK(instr->representation().IsSmiOrTagged());
+      obj = UseRegisterAtStart(instr->elements());
+    }
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK(
+        (instr->representation().IsInteger32() &&
+         !IsDoubleOrFloatElementsKind(elements_kind)) ||
+        (instr->representation().IsDouble() &&
+         IsDoubleOrFloatElementsKind(elements_kind)));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* val = NULL;
+    LOperand* key = NULL;
+
+    if (instr->value()->representation().IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      key = UseRegisterOrConstantAtStart(instr->key());
+      val = UseRegister(instr->value());
+    } else {
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new (zone()) LStoreKeyed(object, key, val, nullptr);
+  }
+
+  DCHECK(
+      (instr->value()->representation().IsInteger32() &&
+       !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+      (instr->value()->representation().IsDouble() &&
+       IsDoubleOrFloatElementsKind(instr->elements_kind())));
+  DCHECK(instr->elements()->representation().IsExternal());
+  LOperand* val = UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result =
+      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), a0);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, v0);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map = instr->has_transition() &&
+      instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map
+        ? UseRegister(instr->object())
+        : UseRegisterAtStart(instr->object());
+  }
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new(zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
+      instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* value = UseFixed(instr->value(), a3);
+  LTypeof* result = new (zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseFixed(instr->enumerable(), a0);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/crankshaft/mips/lithium-mips.h b/src/crankshaft/mips/lithium-mips.h
new file mode 100644
index 0000000..880d243
--- /dev/null
+++ b/src/crankshaft/mips/lithium-mips.h
@@ -0,0 +1,2746 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
+#define V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleToI)                               \
+  V(DoubleBits)                              \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathExp)                                 \
+  V(MathClz32)                               \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {
+  }
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator interface.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const final { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 3> {
+ public:
+  LModI(LOperand* left,
+        LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = addend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* addend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathFloor(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LMathExp(LOperand* value,
+           LOperand* double_temp,
+           LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = double_temp;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathPowHalf(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+  uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const final { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTaggedToI(LOperand* value,
+             LOperand* temp,
+             LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+              LOperand* backing_store_owner) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* new_map_temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+  LOperand* view() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* context,
+            LOperand* size,
+            LOperand* temp1,
+            LOperand* temp2) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph) { }
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_MIPS_LITHIUM_MIPS_H_
diff --git a/src/crankshaft/mips64/OWNERS b/src/crankshaft/mips64/OWNERS
new file mode 100644
index 0000000..89455a4
--- /dev/null
+++ b/src/crankshaft/mips64/OWNERS
@@ -0,0 +1,6 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
+ivica.bogosavljevic@imgtec.com
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
new file mode 100644
index 0000000..29d19ee
--- /dev/null
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -0,0 +1,5842 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
+#endif
+
+    // a1: Callee's JS function.
+    // cp: Callee's context.
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
+  }
+
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue());
+    }
+    frame_is_built_ = true;
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ Dsubu(sp,  sp, Operand(slots * kPointerSize));
+      __ Push(a0, a1);
+      __ Daddu(a0, sp, Operand(slots *  kPointerSize));
+      __ li(a1, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ Dsubu(a0, a0, Operand(kPointerSize));
+      __ sd(a1, MemOperand(a0, 2 * kPointerSize));
+      __ Branch(&loop, ne, a0, Operand(sp));
+      __ Pop(a0, a1);
+    } else {
+      __ Dsubu(sp, sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info()->scope()->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in a1.
+    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ push(a1);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(a1);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in both v0. It replaces the context passed to us.
+    // It's saved in the stack and kept live in cp.
+    __ mov(cp, v0);
+    __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ ld(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextMemOperand(cp, var->index());
+        __ sd(a0, target);
+        // Update the write barrier. This clobbers a3 and a0.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+              cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, a0, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ Dsubu(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ Daddu(fp, sp,
+            Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ pop(at);
+        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+        frame_is_built_ = false;
+      }
+      __ jmp(code->exit());
+    }
+  }
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  if (jump_table_.length() > 0) {
+    Comment(";;; -------------------- Jump table --------------------");
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    Label table_start, call_deopt_entry;
+
+    __ bind(&table_start);
+    Label needs_frame;
+    Address base = jump_table_[0]->address;
+    for (int i = 0; i < jump_table_.length(); i++) {
+      Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
+      __ bind(&table_entry->label);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->deopt_info);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load the base
+      // address and add an immediate offset.
+      if (is_int16(entry - base)) {
+        if (table_entry->needs_frame) {
+          DCHECK(!info()->saves_caller_doubles());
+          Comment(";;; call deopt with frame");
+          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+          __ BranchAndLink(&needs_frame, USE_DELAY_SLOT);
+          __ li(t9, Operand(entry - base));
+        } else {
+          __ BranchAndLink(&call_deopt_entry, USE_DELAY_SLOT);
+          __ li(t9, Operand(entry - base));
+        }
+
+      } else {
+        __ li(t9, Operand(entry - base));
+        if (table_entry->needs_frame) {
+          DCHECK(!info()->saves_caller_doubles());
+          Comment(";;; call deopt with frame");
+          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+          __ BranchAndLink(&needs_frame);
+        } else {
+          __ BranchAndLink(&call_deopt_entry);
+        }
+      }
+      info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                   table_entry->deopt_info.inlining_id);
+    }
+    if (needs_frame.is_linked()) {
+      __ bind(&needs_frame);
+      // This variant of deopt can only be used with stubs. Since we don't
+      // have a function pointer to install in the stack frame that we're
+      // building, install a special marker there instead.
+      DCHECK(info()->IsStub());
+      __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+      __ push(at);
+      __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+    }
+
+    Comment(";;; call deopt");
+    __ bind(&call_deopt_entry);
+
+    if (info()->saves_caller_doubles()) {
+      DCHECK(info()->IsStub());
+      RestoreCallerDoubles();
+    }
+
+    __ li(at,
+          Operand(reinterpret_cast<int64_t>(base), RelocInfo::RUNTIME_ENTRY));
+    __ Daddu(t9, t9, Operand(at));
+    __ Jump(t9);
+  }
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::from_code(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::from_code(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      AllowDeferredHandleDereference get_number;
+      DCHECK(literal->IsNumber());
+      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
+    } else if (r.IsDouble()) {
+      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+    } else {
+      DCHECK(r.IsSmiOrTagged());
+      __ li(scratch, literal);
+    }
+    return scratch;
+  } else if (op->IsStackSlot()) {
+    __ ld(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                FloatRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      DCHECK(literal->IsNumber());
+      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+      __ mtc1(at, flt_scratch);
+      __ cvt_d_w(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort(kUnsupportedDoubleImmediate);
+    } else if (r.IsTagged()) {
+      Abort(kUnsupportedTaggedImmediate);
+    }
+  } else if (op->IsStackSlot()) {
+    MemOperand mem_op = ToMemOperand(op);
+    __ ldc1(dbl_scratch, mem_op);
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  // return ToRepresentation(op, Representation::Integer32());
+  HConstant* constant = chunk_->LookupConstant(op);
+  return constant->Integer32Value();
+}
+
+
+int64_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
+                                            const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<int64_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand((int64_t)0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand((int64_t)0);
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+    return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    // return MemOperand(
+    //    sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+    return MemOperand(
+        sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr,
+                           SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Move(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ ld(cp, ToMemOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type,
+                            Register src1, const Operand& src2) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    Register scratch = scratch0();
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    __ Push(a1, scratch);
+    __ li(scratch, Operand(count));
+    __ lw(a1, MemOperand(scratch));
+    __ Subu(a1, a1, Operand(1));
+    __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
+    __ li(a1, Operand(FLAG_deopt_every_n_times));
+    __ sw(a1, MemOperand(scratch));
+    __ Pop(a1, scratch);
+
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ sw(a1, MemOperand(scratch));
+    __ Pop(a1, scratch);
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    Label skip;
+    if (condition != al) {
+      __ Branch(&skip, NegateCondition(condition), src1, src2);
+    }
+    __ stop("trap_on_deopt");
+    __ bind(&skip);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (condition == al && frame_is_built_ &&
+      !info()->saves_caller_doubles()) {
+    DeoptComment(deopt_info);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry* table_entry =
+        new (zone()) Deoptimizer::JumpTableEntry(
+            entry, deopt_info, bailout_type, !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry->IsEquivalentTo(*jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ Branch(&jump_table_.last()->label, condition, src1, src2);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Register src1, const Operand& src2) {
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  DeoptimizeIf(condition, instr, deopt_reason, bailout_type, src1, src2);
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
+    // Note: The code below even works when right contains kMinInt.
+    __ dsubu(dividend, zero_reg, dividend);
+    __ And(dividend, dividend, Operand(mask));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                   Operand(zero_reg));
+    }
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ dsubu(dividend, zero_reg, dividend);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ And(dividend, dividend, Operand(mask));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ Dmul(result, result, Operand(Abs(divisor)));
+  __ Dsubu(result, dividend, Operand(result));
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+  const Register left_reg = ToRegister(instr->left());
+  const Register right_reg = ToRegister(instr->right());
+  const Register result_reg = ToRegister(instr->result());
+
+  // div runs in the background while we check for special cases.
+  __ Dmod(result_reg, left_reg, right_reg);
+
+  Label done;
+  // Check for x % 0, we have to deopt in this case because we can't return a
+  // NaN.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, right_reg,
+                 Operand(zero_reg));
+  }
+
+  // Check for kMinInt % -1, div will return kMinInt, which is not what we
+  // want. We have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, right_reg, Operand(-1));
+    } else {
+      __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+      __ Branch(USE_DELAY_SLOT, &done);
+      __ mov(result_reg, zero_reg);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  __ Branch(&done, ge, left_reg, Operand(zero_reg));
+
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result_reg,
+                 Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, dividend, Operand(kMinInt));
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ And(at, dividend, Operand(mask));
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, at, Operand(zero_reg));
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ Dsubu(result, zero_reg, dividend);
+    return;
+  }
+  uint16_t shift = WhichPowerOf2Abs(divisor);
+  if (shift == 0) {
+    __ Move(result, dividend);
+  } else if (shift == 1) {
+    __ dsrl32(result, dividend, 31);
+    __ Daddu(result, dividend, Operand(result));
+  } else {
+    __ dsra32(result, dividend, 31);
+    __ dsrl32(result, result, 32 - shift);
+    __ Daddu(result, dividend, Operand(result));
+  }
+  if (shift > 0) __ dsra(result, result, shift);
+  if (divisor < 0) __ Dsubu(result, zero_reg, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Subu(result, zero_reg, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ Dmul(scratch0(), result, Operand(divisor));
+    __ Dsubu(scratch0(), scratch0(), dividend);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, scratch0(),
+                 Operand(zero_reg));
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  const Register result = ToRegister(instr->result());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ Div(result, dividend, divisor);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+                 Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+                 Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    // Calculate remainder.
+    Register remainder = ToRegister(instr->temp());
+    if (kArchVariant != kMips64r6) {
+      __ mfhi(remainder);
+    } else {
+      __ dmod(remainder, dividend, divisor);
+    }
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, remainder,
+                 Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+  DoubleRegister addend = ToDoubleRegister(instr->addend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+  // This is computed in-place.
+  DCHECK(addend.is(ToDoubleRegister(instr->result())));
+
+  __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  Register result = ToRegister(instr->result());
+  int32_t divisor = instr->divisor();
+  Register scratch = result.is(dividend) ? scratch0() : dividend;
+  DCHECK(!result.is(dividend) || !scratch.is(dividend));
+
+  // If the divisor is 1, return the dividend.
+  if (divisor == 1) {
+    __ Move(result, dividend);
+    return;
+  }
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  uint16_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ dsra(result, dividend, shift);
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  // Dividend can be the same register as result so save the value of it
+  // for checking overflow.
+  __ Move(scratch, dividend);
+
+  __ Dsubu(result, zero_reg, dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+  }
+
+  __ Xor(scratch, scratch, result);
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(gt, instr, Deoptimizer::kOverflow, result, Operand(kMaxInt));
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ dsra(result, result, shift);
+    return;
+  }
+
+  Label no_overflow, done;
+  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
+  __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
+  __ Branch(&done);
+  __ bind(&no_overflow);
+  __ dsra(result, result, shift);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, dividend,
+                 Operand(zero_reg));
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ Dsubu(result, zero_reg, result);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(!temp.is(dividend) && !temp.is(result));
+  Label needs_adjustment, done;
+  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+            dividend, Operand(zero_reg));
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Dsubu(result, zero_reg, result);
+  __ jmp(&done);
+  __ bind(&needs_adjustment);
+  __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ Dsubu(result, zero_reg, result);
+  __ Dsubu(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  const Register result = ToRegister(instr->result());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ Ddiv(result, dividend, divisor);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero, divisor,
+                 Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, divisor,
+                 Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow, divisor, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  // We performed a truncating division. Correct the result if necessary.
+  Label done;
+  Register remainder = scratch0();
+  if (kArchVariant != kMips64r6) {
+    __ mfhi(remainder);
+  } else {
+    __ dmod(remainder, dividend, divisor);
+  }
+  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+  __ Xor(remainder, remainder, Operand(divisor));
+  __ Branch(&done, ge, remainder, Operand(zero_reg));
+  __ Dsubu(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMulS(LMulS* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+    }
+
+    switch (constant) {
+      case -1:
+        if (overflow) {
+          __ DsubuAndCheckForOverflow(result, zero_reg, left, scratch);
+          DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+                       Operand(zero_reg));
+        } else {
+          __ Dsubu(result, zero_reg, left);
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+                       Operand(zero_reg));
+        }
+        __ mov(result, zero_reg);
+        break;
+      case 1:
+        // Nothing to do.
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ dsll(result, left, shift);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ Dsubu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ dsll(scratch, left, shift);
+          __ Daddu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ Dsubu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ dsll(scratch, left, shift);
+          __ Dsubu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ Dsubu(result, zero_reg, result);
+        } else {
+          // Generate standard code.
+          __ li(at, constant);
+          __ Dmul(result, left, at);
+        }
+    }
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (overflow) {
+      // hi:lo = left * right.
+      __ Dmulh(result, left, right);
+      __ dsra32(scratch, result, 0);
+      __ sra(at, result, 31);
+      __ SmiTag(result);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+    } else {
+      __ SmiUntag(result, left);
+      __ dmul(result, result, right);
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+      __ Xor(at, left, right);
+      __ Branch(&done, ge, at, Operand(zero_reg));
+      // Bail out if the result is minus zero.
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, left, Operand(zero_reg));
+    }
+
+    switch (constant) {
+      case -1:
+        if (overflow) {
+          __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
+          DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, scratch,
+                       Operand(zero_reg));
+        } else {
+          __ Subu(result, zero_reg, left);
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, left,
+                       Operand(zero_reg));
+        }
+        __ mov(result, zero_reg);
+        break;
+      case 1:
+        // Nothing to do.
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ sll(result, left, shift);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ Subu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ sll(scratch, left, shift);
+          __ addu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ Subu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ sll(scratch, left, shift);
+          __ Subu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ Subu(result, zero_reg, result);
+        } else {
+          // Generate standard code.
+          __ li(at, constant);
+          __ Mul(result, left, at);
+        }
+    }
+
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (overflow) {
+      // hi:lo = left * right.
+      __ Dmul(result, left, right);
+      __ dsra32(scratch, result, 0);
+      __ sra(at, result, 31);
+
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, scratch, Operand(at));
+    } else {
+      __ mul(result, left, right);
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+      __ Xor(at, left, right);
+      __ Branch(&done, ge, at, Operand(zero_reg));
+      // Bail out if the result is minus zero.
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, result,
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->left();
+  LOperand* right_op = instr->right();
+  DCHECK(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot()) {
+    right = Operand(EmitLoadRegister(right_op, at));
+  } else {
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+        __ Nor(result, zero_reg, left);
+      } else {
+        __ Xor(result, left, right);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+
+  if (right_op->IsRegister()) {
+    // No need to mask the right operand on MIPS, it is built into the variable
+    // shift instructions.
+    switch (instr->op()) {
+      case Token::ROR:
+        __ Ror(result, left, Operand(ToRegister(right_op)));
+        break;
+      case Token::SAR:
+        __ srav(result, left, ToRegister(right_op));
+        break;
+      case Token::SHR:
+        __ srlv(result, left, ToRegister(right_op));
+        if (instr->can_deopt()) {
+           // TODO(yy): (-1) >>> 0. anything else?
+          DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, result,
+                       Operand(zero_reg));
+          DeoptimizeIf(gt, instr, Deoptimizer::kNegativeValue, result,
+                       Operand(kMaxInt));
+        }
+        break;
+      case Token::SHL:
+        __ sllv(result, left, ToRegister(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count != 0) {
+          __ Ror(result, left, Operand(shift_count));
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sra(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srl(result, left, shift_count);
+        } else {
+          if (instr->can_deopt()) {
+            __ And(at, left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue, at,
+                         Operand(zero_reg));
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          if (instr->hydrogen_value()->representation().IsSmi()) {
+            __ dsll(result, left, shift_count);
+          } else {
+            __ sll(result, left, shift_count);
+          }
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubS(LSubS* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ DsubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
+                                ToOperand(right), overflow, scratch);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+                 Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ SubuAndCheckForOverflow(ToRegister(result), ToRegister(left),
+                               ToOperand(right), overflow, scratch);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+                 Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ li(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+                                           LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+  Register scratch = scratch0();
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ Daddu(scratch, string, ToRegister(index));
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    __ dsll(scratch, ToRegister(index), 1);
+    __ Daddu(scratch, string, scratch);
+  }
+  return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    __ And(scratch, scratch,
+           Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+                                ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ lbu(result, operand);
+  } else {
+    __ lhu(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ sb(value, operand);
+  } else {
+    __ sh(value, operand);
+  }
+}
+
+
+void LCodeGen::DoAddE(LAddE* instr) {
+  LOperand* result = instr->result();
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+
+  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+  DCHECK(right->IsRegister() || right->IsConstantOperand());
+  __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
+}
+
+
+void LCodeGen::DoAddS(LAddS* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ DadduAndCheckForOverflow(ToRegister(result), ToRegister(left),
+                                ToOperand(right), overflow, scratch);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+                 Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
+    __ AdduAndCheckForOverflow(ToRegister(result), ToRegister(left),
+                               ToOperand(right), overflow, scratch);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, overflow,
+                 Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Register left_reg = ToRegister(left);
+    Register right_reg = EmitLoadRegister(right, scratch0());
+    Register result_reg = ToRegister(instr->result());
+    Label return_right, done;
+    Register scratch = scratch1();
+    __ Slt(scratch, left_reg, Operand(right_reg));
+    if (condition == ge) {
+     __  Movz(result_reg, left_reg, scratch);
+     __  Movn(result_reg, right_reg, scratch);
+    } else {
+     DCHECK(condition == le);
+     __  Movn(result_reg, left_reg, scratch);
+     __  Movz(result_reg, right_reg, scratch);
+    }
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    FPURegister left_reg = ToDoubleRegister(left);
+    FPURegister right_reg = ToDoubleRegister(right);
+    FPURegister result_reg = ToDoubleRegister(instr->result());
+    Label check_nan_left, check_zero, return_left, return_right, done;
+    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
+    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
+    __ Branch(&return_right);
+
+    __ bind(&check_zero);
+    // left == right != 0.
+    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      __ neg_d(left_reg, left_reg);
+      __ sub_d(result_reg, left_reg, right_reg);
+      __ neg_d(result_reg, result_reg);
+    } else {
+      __ add_d(result_reg, left_reg, right_reg);
+    }
+    __ Branch(&done);
+
+    __ bind(&check_nan_left);
+    // left == NaN.
+    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
+    __ bind(&return_right);
+    if (!right_reg.is(result_reg)) {
+      __ mov_d(result_reg, right_reg);
+    }
+    __ Branch(&done);
+
+    __ bind(&return_left);
+    if (!left_reg.is(result_reg)) {
+      __ mov_d(result_reg, left_reg);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ add_d(result, left, right);
+      break;
+    case Token::SUB:
+      __ sub_d(result, left, right);
+      break;
+    case Token::MUL:
+      __ mul_d(result, left, right);
+      break;
+    case Token::DIV:
+      __ div_d(result, left, right);
+      break;
+    case Token::MOD: {
+      // Save a0-a3 on the stack.
+      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+      __ MultiPush(saved_regs);
+
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ MovToFloatParameters(left, right);
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()),
+          0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(result);
+
+      // Restore saved register.
+      __ MultiPop(saved_regs);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  // Other arch use a nop here, to signal that there is no inlined
+  // patchable code. Mips does not need the nop, since our marker
+  // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr,
+                          Condition condition,
+                          Register src1,
+                          const Operand& src2) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+  if (right_block == left_block || condition == al) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(right_block),
+              NegateCondition(condition), src1, src2);
+  } else if (right_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+  } else {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchF(InstrType instr,
+                           Condition condition,
+                           FPURegister src1,
+                           FPURegister src2) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+               NegateFpuCondition(condition), src1, src2);
+  } else if (right_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+               condition, src1, src2);
+  } else {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+               condition, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition condition,
+                              Register src1, const Operand& src2) {
+  int true_block = instr->TrueDestination(chunk_);
+  __ Branch(chunk_->GetAssemblyLabel(true_block), condition, src1, src2);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition condition,
+                               Register src1, const Operand& src2) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranchF(InstrType instr,
+                                Condition condition,
+                                FPURegister src1,
+                                FPURegister src2) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
+             condition, src1, src2);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ stop("LDebugBreak");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32() || r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    EmitBranch(instr, ne, reg, Operand(zero_reg));
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    DoubleRegister reg = ToDoubleRegister(instr->value());
+    // Test the double value. Zero and NaN are false.
+    EmitBranchF(instr, ogl, reg, kDoubleRegZero);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ LoadRoot(at, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq, reg, Operand(at));
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, ne, reg, Operand(zero_reg));
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      DoubleRegister dbl_scratch = double_scratch0();
+      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+      EmitBranch(instr, ne, at, Operand(zero_reg));
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ LoadRoot(at, Heap::kTrueValueRootIndex);
+        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
+        __ LoadRoot(at, Heap::kFalseValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ LoadRoot(at, Heap::kNullValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ SmiTst(reg, at);
+        DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ And(at, at, Operand(1 << Map::kIsUndetectable));
+          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_),
+                  ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+        __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
+        __ Branch(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        const Register scratch = scratch1();
+        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        const Register scratch = scratch1();
+        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_), eq, scratch,
+                  Operand(SIMD128_VALUE_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DoubleRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+        __ Branch(&not_heap_number, ne, map, Operand(at));
+        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                   ne, dbl_scratch, kDoubleRegZero);
+        // Falls through if dbl_scratch == 0.
+        __ Branch(instr->FalseLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject, zero_reg,
+                     Operand(zero_reg));
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right as doubles and load the
+      // resulting flags into the normal status register.
+      FPURegister left_reg = ToDoubleRegister(left);
+      FPURegister right_reg = ToDoubleRegister(right);
+
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
+                 left_reg, right_reg);
+
+      EmitBranchF(instr, cond, left_reg, right_reg);
+    } else {
+      Register cmp_left;
+      Operand cmp_right = Operand((int64_t)0);
+      if (right->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          cmp_left = ToRegister(left);
+          cmp_right = Operand(Smi::FromInt(value));
+        } else {
+          cmp_left = ToRegister(left);
+          cmp_right = Operand(value);
+        }
+      } else if (left->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          cmp_left = ToRegister(right);
+          cmp_right = Operand(Smi::FromInt(value));
+        } else {
+          cmp_left = ToRegister(right);
+          cmp_right = Operand(value);
+        }
+        // We commuted the operands, so commute the condition.
+        cond = CommuteCondition(cond);
+      } else {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToRegister(right));
+      }
+
+      EmitBranch(instr, cond, cmp_left, cmp_right);
+    }
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  EmitBranch(instr, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ li(at, Operand(factory()->the_hole_value()));
+    EmitBranch(instr, eq, input_reg, Operand(at));
+    return;
+  }
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->object());
+  EmitFalseBranchF(instr, eq, input_reg, input_reg);
+
+  Register scratch = scratch0();
+  __ FmoveHigh(scratch, input_reg);
+  EmitBranch(instr, eq, scratch,
+             Operand(static_cast<int32_t>(kHoleNanUpper32)));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
+    __ FmoveHigh(scratch, value);
+    // Only use low 32-bits of value.
+    __ dsll32(scratch, scratch, 0);
+    __ dsrl32(scratch, scratch, 0);
+    __ li(at, 0x80000000);
+  } else {
+    Register value = ToRegister(instr->value());
+    __ CheckMap(value,
+                scratch,
+                Heap::kHeapNumberMapRootIndex,
+                instr->FalseLabel(chunk()),
+                DO_SMI_CHECK);
+    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
+    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+    __ mov(at, zero_reg);
+  }
+  EmitBranch(instr, eq, scratch, Operand(at));
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ GetObjectType(input, temp1, temp1);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond, temp1,
+             Operand(FIRST_NONSTRING_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register input_reg = EmitLoadRegister(instr->value(), at);
+  __ And(at, input_reg, kSmiTagMask);
+  EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(instr, ne, at, Operand(zero_reg));
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+
+  EmitBranch(instr, ComputeCompareCondition(instr->op()), v0,
+             Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ GetObjectType(input, scratch, scratch);
+  EmitBranch(instr,
+             BranchCondition(instr->hydrogen()),
+             scratch,
+             Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ lwu(scratch,
+         FieldMemOperand(input, String::kHashFieldOffset));
+  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+  EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  __ GetObjectType(input, temp, temp2);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ Branch(is_true, eq, temp2, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, eq, temp2, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  Register instance_type = scratch1();
+  DCHECK(!instance_type.is(temp));
+  __ GetMapConstructor(temp, temp, temp2, instance_type);
+
+  // Objects with a non-function constructor have class 'Object'.
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
+    __ Branch(is_true, ne, instance_type, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, ne, instance_type, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+
+  // End with the address of this class_name instance in temp register.
+  // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temp());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                  class_name, input, temp, temp2);
+
+  EmitBranch(instr, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  EmitBranch(instr, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Label true_label, done;
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = scratch0();
+  Register const object_instance_type = scratch1();
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ SmiTst(object, at);
+    EmitFalseBranch(instr, eq, at, Operand(zero_reg));
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ lbu(object_instance_type,
+         FieldMemOperand(object_map, Map::kBitFieldOffset));
+  __ And(object_instance_type, object_instance_type,
+         Operand(1 << Map::kIsAccessCheckNeeded));
+  DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, object_instance_type,
+               Operand(zero_reg));
+  __ lbu(object_instance_type,
+         FieldMemOperand(object_map, Map::kInstanceTypeOffset));
+  DeoptimizeIf(eq, instr, Deoptimizer::kProxy, object_instance_type,
+               Operand(JS_PROXY_TYPE));
+
+  __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+  EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
+  __ LoadRoot(at, Heap::kNullValueRootIndex);
+  EmitFalseBranch(instr, eq, object_prototype, Operand(at));
+  __ Branch(&loop, USE_DELAY_SLOT);
+  __ ld(object_map, FieldMemOperand(object_prototype,
+                                    HeapObject::kMapOffset));  // In delay slot.
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+  Condition condition = ComputeCompareCondition(op);
+  // A minor optimization that relies on LoadRoot always emitting one
+  // instruction.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+  Label done, check;
+  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+  __ bind(&check);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(v0);
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  if (NeedsEagerFrame()) {
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+  }
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+    if (sp_delta != 0) {
+      __ Daddu(sp, sp, Operand(sp_delta));
+    }
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    __ SmiUntag(reg);
+    __ dsll(at, reg, kPointerSizeLog2);
+    __ Daddu(sp, sp, at);
+  }
+
+  __ Jump(ra);
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(a0));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ li(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ li(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ li(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ li(slot_register, Operand(Smi::FromInt(index)));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+            .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+
+  __ ld(result, ContextMemOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+    } else {
+      Label is_not_hole;
+      __ Branch(&is_not_hole, ne, result, Operand(at));
+      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+      __ bind(&is_not_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ ld(scratch, target);
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch, Operand(at));
+    } else {
+      __ Branch(&skip_assignment, ne, scratch, Operand(at));
+    }
+  }
+
+  __ sd(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              GetRAState(),
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = MemOperand(object, offset);
+    __ Load(result, operand, access.representation());
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ ldc1(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+
+  Representation representation = access.representation();
+  if (representation.IsSmi() && SmiValuesAre32Bits() &&
+      instr->hydrogen()->representation().IsInteger32()) {
+    if (FLAG_debug_code) {
+      // Verify this is really an Smi.
+      Register scratch = scratch0();
+      __ Load(scratch, FieldMemOperand(object, offset), representation);
+      __ AssertSmi(scratch);
+    }
+
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset = SmiWordOffset(offset);
+    representation = Representation::Integer32();
+  }
+  __ Load(result, FieldMemOperand(object, offset), representation);
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Name is always in a2.
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ ld(result,
+         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(at));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ GetObjectType(result, scratch, scratch);
+  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them add one more.
+  if (instr->length()->IsConstantOperand()) {
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (instr->index()->IsConstantOperand()) {
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int index = (const_length - const_index) + 1;
+      __ ld(result, MemOperand(arguments, index * kPointerSize));
+    } else {
+      Register index = ToRegister(instr->index());
+      __ li(at, Operand(const_length + 1));
+      __ Dsubu(result, at, index);
+      __ dsll(at, result, kPointerSizeLog2);
+      __ Daddu(at, arguments, at);
+      __ ld(result, MemOperand(at));
+    }
+  } else if (instr->index()->IsConstantOperand()) {
+    Register length = ToRegister(instr->length());
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int loc = const_index - 1;
+    if (loc != 0) {
+      __ Dsubu(result, length, Operand(loc));
+      __ dsll(at, result, kPointerSizeLog2);
+      __ Daddu(at, arguments, at);
+      __ ld(result, MemOperand(at));
+    } else {
+      __ dsll(at, length, kPointerSizeLog2);
+      __ Daddu(at, arguments, at);
+      __ ld(result, MemOperand(at));
+    }
+  } else {
+    Register length = ToRegister(instr->length());
+    Register index = ToRegister(instr->index());
+    __ Dsubu(result, length, index);
+    __ Daddu(result, result, 1);
+    __ dsll(at, result, kPointerSizeLog2);
+    __ Daddu(at, arguments, at);
+    __ ld(result, MemOperand(at));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+      : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    FPURegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Daddu(scratch0(), external_pointer,
+          constant_key << element_size_shift);
+    } else {
+      if (shift_size < 0) {
+         if (shift_size == -32) {
+           __ dsra32(scratch0(), key, 0);
+         } else {
+           __ dsra(scratch0(), key, -shift_size);
+         }
+      } else {
+        __ dsll(scratch0(), key, shift_size);
+      }
+      __ Daddu(scratch0(), scratch0(), external_pointer);
+    }
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ lwc1(result, MemOperand(scratch0(), base_offset));
+      __ cvt_d_s(result, result);
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ ldc1(result, MemOperand(scratch0(), base_offset));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size, base_offset);
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ lb(result, mem_operand);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ lbu(result, mem_operand);
+        break;
+      case INT16_ELEMENTS:
+        __ lh(result, mem_operand);
+        break;
+      case UINT16_ELEMENTS:
+        __ lhu(result, mem_operand);
+        break;
+      case INT32_ELEMENTS:
+        __ lw(result, mem_operand);
+        break;
+      case UINT32_ELEMENTS:
+        __ lw(result, mem_operand);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          DeoptimizeIf(Ugreater_equal, instr, Deoptimizer::kNegativeValue,
+                       result, Operand(0x80000000));
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+  int base_offset = instr->base_offset();
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    base_offset += constant_key * kDoubleSize;
+  }
+  __ Daddu(scratch, elements, Operand(base_offset));
+
+  if (!key_is_constant) {
+    key = ToRegister(instr->key());
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+        : element_size_shift;
+    if (shift_size > 0) {
+      __ dsll(at, key, shift_size);
+    } else if (shift_size == -32) {
+      __ dsra32(at, key, 0);
+    } else {
+      __ dsra(at, key, -shift_size);
+    }
+    __ Daddu(scratch, scratch, at);
+  }
+
+  __ ldc1(result, MemOperand(scratch));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ FmoveHigh(scratch, result);
+    DeoptimizeIf(eq, instr, Deoptimizer::kHole, scratch,
+                 Operand(static_cast<int32_t>(kHoleNanUpper32)));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  HLoadKeyed* hinstr = instr->hydrogen();
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    Register key = ToRegister(instr->key());
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+    __ SmiScale(scratch, key, kPointerSizeLog2);
+    __ daddu(scratch, elements, scratch);
+    } else {
+      __ dsll(scratch, key, kPointerSizeLog2);
+      __ daddu(scratch, elements, scratch);
+    }
+  }
+
+  Representation representation = hinstr->representation();
+  if (representation.IsInteger32() && SmiValuesAre32Bits() &&
+      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+    DCHECK(!hinstr->RequiresHoleCheck());
+    if (FLAG_debug_code) {
+      Register temp = scratch1();
+      __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
+      __ AssertSmi(temp);
+    }
+
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset = SmiWordOffset(offset);
+  }
+
+  __ Load(result, MemOperand(store_base, offset), representation);
+
+  // Check for the hole value.
+  if (hinstr->RequiresHoleCheck()) {
+    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+      __ SmiTst(result, scratch);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch,
+                   Operand(zero_reg));
+    } else {
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole, result, Operand(scratch));
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ Branch(&done, ne, result, Operand(scratch));
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+      // The comparison only needs LS bits of value, which is a smi.
+      __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
+      DeoptimizeIf(ne, instr, Deoptimizer::kHole, result,
+                   Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
+    }
+    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+                                         Register base,
+                                         bool key_is_constant,
+                                         int constant_key,
+                                         int element_size,
+                                         int shift_size,
+                                         int base_offset) {
+  if (key_is_constant) {
+    return MemOperand(base, (constant_key << element_size) + base_offset);
+  }
+
+  if (base_offset == 0) {
+    if (shift_size >= 0) {
+      __ dsll(scratch0(), key, shift_size);
+      __ Daddu(scratch0(), base, scratch0());
+      return MemOperand(scratch0());
+    } else {
+      if (shift_size == -32) {
+        __ dsra32(scratch0(), key, 0);
+      } else {
+        __ dsra(scratch0(), key, -shift_size);
+      }
+      __ Daddu(scratch0(), base, scratch0());
+      return MemOperand(scratch0());
+    }
+  }
+
+  if (shift_size >= 0) {
+    __ dsll(scratch0(), key, shift_size);
+    __ Daddu(scratch0(), base, scratch0());
+    return MemOperand(scratch0(), base_offset);
+  } else {
+    if (shift_size == -32) {
+       __ dsra32(scratch0(), key, 0);
+    } else {
+      __ dsra(scratch0(), key, -shift_size);
+    }
+    __ Daddu(scratch0(), base, scratch0());
+    return MemOperand(scratch0(), base_offset);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register temp = scratch1();
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ Dsubu(result, sp, 2 * kPointerSize);
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
+    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
+  __ Branch(&done, eq, fp, Operand(elem));
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(result,
+        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, result_in_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode functions.
+    __ ld(scratch,
+           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+    // Do not transform the receiver to object for builtins.
+    int32_t strict_mode_function_mask =
+        1 <<  SharedFunctionInfo::kStrictModeBitWithinByte;
+    int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
+
+    __ lbu(at,
+           FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
+    __ And(at, at, Operand(strict_mode_function_mask));
+    __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
+    __ lbu(at,
+           FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
+    __ And(at, at, Operand(native_mask));
+    __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+  // Deoptimize if the receiver is not a JS object.
+  __ SmiTst(receiver, scratch);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, scratch, Operand(zero_reg));
+
+  __ GetObjectType(receiver, scratch, scratch);
+  DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject, scratch,
+               Operand(FIRST_JS_RECEIVER_TYPE));
+  __ Branch(&result_in_receiver);
+
+  __ bind(&global_object);
+  __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+  __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
+  if (result.is(receiver)) {
+    __ bind(&result_in_receiver);
+  } else {
+    Label result_ok;
+    __ Branch(&result_ok);
+    __ bind(&result_in_receiver);
+    __ mov(result, receiver);
+    __ bind(&result_ok);
+  }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DCHECK(receiver.is(a0));  // Used for parameter count.
+  DCHECK(function.is(a1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments, length,
+               Operand(kArgumentsLimit));
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ Move(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ Daddu(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+  __ dsll(scratch, length, kPointerSizeLog2);
+  __ bind(&loop);
+  __ Daddu(scratch, elements, scratch);
+  __ ld(scratch, MemOperand(scratch));
+  __ push(scratch);
+  __ Dsubu(length, length, Operand(1));
+  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+  __ dsll(scratch, length, kPointerSizeLog2);
+
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is a0, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort(kDoPushArgumentNotImplementedForDoubleType);
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, at);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  __ li(scratch0(), instr->hydrogen()->pairs());
+  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+  __ Push(scratch0(), scratch1());
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = a1;
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+    __ li(a0, Operand(arity));
+
+    // Invoke function.
+    __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+    __ Call(at);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch, Operand(at));
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ Move(result, input);
+  __ And(at, exponent, Operand(HeapNumber::kSignMask));
+  __ Branch(&done, eq, at, Operand(zero_reg));
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(a1) ? a0 : a1;
+    Register tmp2 = input.is(a2) ? a0 : a2;
+    Register tmp3 = input.is(a3) ? a0 : a3;
+    Register tmp4 = input.is(a4) ? a0 : a4;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ Branch(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(v0))
+      __ mov(tmp1, v0);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+  __ mov(result, input);
+  __ subu(result, zero_reg, input);
+  // Overflow if result is still negative, i.e. 0x80000000.
+  DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+  __ mov(result, input);
+  __ dsubu(result, zero_reg, input);
+  // Overflow if result is still negative, i.e. 0x80000000 00000000.
+  DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, result, Operand(zero_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    FPURegister input = ToDoubleRegister(instr->value());
+    FPURegister result = ToDoubleRegister(instr->result());
+    __ abs_d(result, input);
+  } else if (r.IsInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else if (r.IsSmi()) {
+    EmitSmiMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitSmiMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  Register except_flag = ToRegister(instr->temp());
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     result,
+                     input,
+                     scratch1,
+                     double_scratch0(),
+                     except_flag);
+
+  // Deopt if the operation did not succeed.
+  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+               Operand(zero_reg));
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    Label done;
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ mfhc1(scratch1, input);  // Get exponent/sign bits.
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                 Operand(zero_reg));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+  Register scratch = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ mfhc1(result, input);
+  __ Ext(scratch,
+         result,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  Label skip1;
+  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, zero_reg);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Branch(&check_sign_on_zero);
+  } else {
+    __ Branch(&done);
+  }
+  __ bind(&skip1);
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  DeoptimizeIf(ge, instr, Deoptimizer::kOverflow, scratch,
+               Operand(HeapNumber::kExponentBias + 32));
+
+  // Save the original sign for later comparison.
+  __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+  __ Move(double_scratch0(), 0.5);
+  __ add_d(double_scratch0(), input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ mfhc1(result, double_scratch0());
+  // mfhc1 sign-extends, clear the upper bits.
+  __ dsll32(result, result, 0);
+  __ dsrl32(result, result, 0);
+  __ Xor(result, result, Operand(scratch));
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // ARM uses 'mi' here, which is 'lt'
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, result, Operand(zero_reg));
+  } else {
+    Label skip2;
+    // ARM uses 'mi' here, which is 'lt'
+    // Negating it results in 'ge'
+    __ Branch(&skip2, ge, result, Operand(zero_reg));
+    __ mov(result, zero_reg);
+    __ Branch(&done);
+    __ bind(&skip2);
+  }
+
+  Register except_flag = scratch;
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     result,
+                     double_scratch0(),
+                     at,
+                     double_scratch1,
+                     except_flag);
+
+  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+               Operand(zero_reg));
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ bind(&check_sign_on_zero);
+    __ mfhc1(scratch, input);  // Get exponent/sign bits.
+    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch,
+                 Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ cvt_s_d(result, input);
+  __ cvt_d_s(result, result);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = ToDoubleRegister(instr->temp());
+
+  DCHECK(!input.is(result));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ Move(temp, static_cast<double>(-V8_INFINITY));
+  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+  // Set up Infinity in the delay slot.
+  // result is overwritten if the branch is not taken.
+  __ neg_d(result, temp);
+
+  // Add +0 to convert -0 to +0.
+  __ add_d(result, input, kDoubleRegZero);
+  __ sqrt_d(result, result);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(f4));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(f2));
+  DCHECK(ToDoubleRegister(instr->result()).is(f0));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!a7.is(tagged_exponent));
+    __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, a7, Operand(at));
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DoubleRegister double_scratch2 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(
+      masm(), input, result, double_scratch1, double_scratch2,
+      temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+                   0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ Clz(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(a1, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Jump(target);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Call(target);
+    }
+    generator.AfterCall();
+  }
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Change context.
+  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+  __ li(a0, Operand(instr->arity()));
+
+  // Load the code entry address
+  __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Call(at);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(a3));
+    DCHECK(vector_register.is(a2));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ li(vector_register, vector);
+    __ li(slot_register, Operand(Smi::FromInt(index)));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ li(a0, Operand(arity));
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(a0, Operand(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ li(a2, instr->hydrogen()->site());
+  } else {
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  }
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here,
+      // look at the first argument.
+      __ ld(a5, MemOperand(sp, 0));
+      __ Branch(&packed_case, eq, a5, Operand(zero_reg));
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ jmp(&done);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ Daddu(code_object, code_object,
+          Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sd(code_object,
+        FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ Daddu(result, base, Operand(ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ Daddu(result, base, offset);
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  Register scratch2 = scratch1();
+  Register scratch1 = scratch0();
+
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  if (access.IsExternalMemory()) {
+    Register value = ToRegister(instr->value());
+    MemOperand operand = MemOperand(object, offset);
+    __ Store(value, operand, representation);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ sdc1(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  if (instr->hydrogen()->has_transition()) {
+    Handle<Map> transition = instr->hydrogen()->transition_map();
+    AddDeprecationDependency(transition);
+    __ li(scratch1, Operand(transition));
+    __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object,
+                           scratch1,
+                           temp,
+                           GetRAState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register destination = object;
+  if (!access.IsInobject()) {
+       destination = scratch1;
+    __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  }
+
+  if (representation.IsSmi() && SmiValuesAre32Bits() &&
+      instr->hydrogen()->value()->representation().IsInteger32()) {
+    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    if (FLAG_debug_code) {
+      __ Load(scratch2, FieldMemOperand(destination, offset), representation);
+      __ AssertSmi(scratch2);
+    }
+    // Store int value directly to upper half of the smi.
+    offset = SmiWordOffset(offset);
+    representation = Representation::Integer32();
+  }
+  MemOperand operand = FieldMemOperand(destination, offset);
+
+  if (FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ sdc1(value, operand);
+  } else {
+    DCHECK(instr->value()->IsRegister());
+    Register value = ToRegister(instr->value());
+    __ Store(value, operand, representation);
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    // Update the write barrier for the object for in-object properties.
+    Register value = ToRegister(instr->value());
+    __ RecordWriteField(destination,
+                        offset,
+                        value,
+                        scratch2,
+                        GetRAState(),
+                        kSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        instr->hydrogen()->SmiCheckForWriteBarrier(),
+                        instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+  Operand operand((int64_t)0);
+  Register reg;
+  if (instr->index()->IsConstantOperand()) {
+    operand = ToOperand(instr->index());
+    reg = ToRegister(instr->length());
+    cc = CommuteCondition(cc);
+  } else {
+    reg = ToRegister(instr->index());
+    operand = ToOperand(instr->length());
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ Branch(&done, NegateCondition(cc), reg, operand);
+    __ stop("eliminated bounds check failed");
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds, reg, operand);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+      : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    Register address = scratch0();
+    FPURegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      if (constant_key != 0) {
+        __ Daddu(address, external_pointer,
+                Operand(constant_key << element_size_shift));
+      } else {
+        address = external_pointer;
+      }
+    } else {
+      if (shift_size < 0) {
+        if (shift_size == -32) {
+          __ dsra32(address, key, 0);
+        } else {
+          __ dsra(address, key, -shift_size);
+        }
+      } else {
+        __ dsll(address, key, shift_size);
+      }
+      __ Daddu(address, external_pointer, address);
+    }
+
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ cvt_s_d(double_scratch0(), value);
+      __ swc1(double_scratch0(), MemOperand(address, base_offset));
+    } else {  // Storing doubles, not floats.
+      __ sdc1(value, MemOperand(address, base_offset));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size,
+        base_offset);
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        __ sb(value, mem_operand);
+        break;
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        __ sh(value, mem_operand);
+        break;
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        __ sw(value, mem_operand);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DoubleRegister double_scratch = double_scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int base_offset = instr->base_offset();
+  Label not_nan, done;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    __ Daddu(scratch, elements,
+             Operand((constant_key << element_size_shift) + base_offset));
+  } else {
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+        : element_size_shift;
+    __ Daddu(scratch, elements, Operand(base_offset));
+    DCHECK((shift_size == 3) || (shift_size == -29));
+    if (shift_size == 3) {
+      __ dsll(at, ToRegister(instr->key()), 3);
+    } else if (shift_size == -29) {
+      __ dsra(at, ToRegister(instr->key()), 29);
+    }
+    __ Daddu(scratch, scratch, at);
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    __ FPUCanonicalizeNaN(double_scratch, value);
+    __ sdc1(double_scratch, MemOperand(scratch, 0));
+  } else {
+    __ sdc1(value, MemOperand(scratch, 0));
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+      : no_reg;
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+      __ SmiScale(scratch, key, kPointerSizeLog2);
+      __ daddu(store_base, elements, scratch);
+    } else {
+      __ dsll(scratch, key, kPointerSizeLog2);
+      __ daddu(store_base, elements, scratch);
+    }
+  }
+
+  Representation representation = instr->hydrogen()->value()->representation();
+  if (representation.IsInteger32() && SmiValuesAre32Bits()) {
+    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+    if (FLAG_debug_code) {
+      Register temp = scratch1();
+      __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
+      __ AssertSmi(temp);
+    }
+
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset = SmiWordOffset(offset);
+    representation = Representation::Integer32();
+  }
+
+  __ Store(value, MemOperand(store_base, offset), representation);
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Daddu(key, store_base, Operand(offset));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   GetRAState(),
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed,
+                   instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases: external, fast double
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = v0;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ jmp(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ Branch(deferred->entry(), le, ToRegister(current_capacity),
+              Operand(constant_key));
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ Branch(deferred->entry(), ge, ToRegister(key),
+              Operand(constant_capacity));
+  } else {
+    __ Branch(deferred->entry(), ge, ToRegister(key),
+              Operand(ToRegister(current_capacity)));
+  }
+
+  if (instr->elements()->IsRegister()) {
+    __ mov(result, ToRegister(instr->elements()));
+  } else {
+    __ ld(result, ToMemOperand(instr->elements()));
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = v0;
+  __ mov(result, zero_reg);
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsRegister()) {
+      __ mov(result, ToRegister(instr->object()));
+    } else {
+      __ ld(result, ToMemOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ li(a3, Operand(ToSmi(LConstantOperand::cast(key))));
+    } else {
+      __ mov(a3, ToRegister(key));
+      __ SmiTag(a3);
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ mov(a0, result);
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  __ SmiTst(result, at);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ li(new_map_reg, Operand(to_map));
+    __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteForMap(object_reg,
+                         new_map_reg,
+                         scratch,
+                         GetRAState(),
+                         kDontSaveFPRegs);
+  } else {
+    DCHECK(object_reg.is(a0));
+    DCHECK(ToRegister(instr->context()).is(cp));
+    PushSafepointRegistersScope scope(this);
+    __ li(a1, Operand(to_map));
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
+                                     ne, &no_memento_found);
+  DeoptimizeIf(al, instr, Deoptimizer::kMementoFound);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(v0);
+  __ SmiUntag(v0);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  DCHECK(!char_code.is(result));
+
+  __ Branch(deferred->entry(), hi,
+            char_code, Operand(String::kMaxOneByteCharCode));
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ dsll(scratch, char_code, kPointerSizeLog2);
+  __ Daddu(result, result, scratch);
+  __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(deferred->entry(), eq, result, Operand(scratch));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  FPURegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ ld(scratch, ToMemOperand(input));
+    __ mtc1(scratch, single_scratch);
+  } else {
+    __ mtc1(ToRegister(input), single_scratch);
+  }
+  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+
+  FPURegister dbl_scratch = double_scratch0();
+  __ mtc1(ToRegister(input), dbl_scratch);
+  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch);
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_,
+                                       instr_->value(),
+                                       instr_->temp1(),
+                                       instr_->temp2(),
+                                       UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
+  __ SmiTag(result, input);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+                                     LOperand* value,
+                                     LOperand* temp1,
+                                     LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register src = ToRegister(value);
+  Register dst = ToRegister(instr->result());
+  Register tmp1 = scratch0();
+  Register tmp2 = ToRegister(temp1);
+  Register tmp3 = ToRegister(temp2);
+  DoubleRegister dbl_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    if (dst.is(src)) {
+      __ SmiUntag(src, dst);
+      __ Xor(src, src, Operand(0x80000000));
+    }
+    __ mtc1(src, dbl_scratch);
+    __ cvt_d_w(dbl_scratch, dbl_scratch);
+  } else {
+    __ mtc1(src, dbl_scratch);
+    __ Cvt_d_uw(dbl_scratch, dbl_scratch);
+  }
+
+  if (FLAG_inline_new) {
+    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
+    __ Branch(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ mov(dst, zero_reg);
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(v0, dst);
+  }
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    // We want the untagged address first for performance
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+                          DONT_TAG_RESULT);
+  } else {
+    __ Branch(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+  // Now that we have finished with the object's real address tag it
+  __ Daddu(reg, reg, kHeapObjectTag);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+  __ Dsubu(v0, v0, kHeapObjectTag);
+  __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ And(at, input, Operand(0x80000000));
+    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+  }
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ SmiTagCheckOverflow(output, input, at);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, at, Operand(zero_reg));
+  } else {
+    __ SmiTag(output, input);
+  }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ And(scratch, input, Operand(kHeapObjectTag));
+    __ SmiUntag(result, input);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, scratch, Operand(zero_reg));
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                DoubleRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Register scratch = scratch0();
+  Label convert, load_smi, done;
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+    // Heap number map check.
+    __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    if (can_convert_undefined_to_nan) {
+      __ Branch(&convert, ne, scratch, Operand(at));
+    } else {
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch,
+                   Operand(at));
+    }
+    // Load heap number.
+    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+      __ mfc1(at, result_reg);
+      __ Branch(&done, ne, at, Operand(zero_reg));
+      __ mfhc1(scratch, result_reg);  // Get exponent/sign bits.
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, scratch,
+                   Operand(HeapNumber::kSignMask));
+    }
+    __ Branch(&done);
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+      // Convert undefined (and hole) to NaN.
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+                   Operand(at));
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ Branch(&done);
+    }
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ mtc1(scratch, result_reg);
+  __ cvt_d_w(result_reg, result_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->value());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->temp());
+  DoubleRegister double_scratch = double_scratch0();
+  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // The input is a tagged HeapObject.
+  // Heap number map check.
+  __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  // This 'at' value and scratch1 map value are used for tests in both clauses
+  // of the if.
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label no_heap_number, check_bools, check_false;
+    // Check HeapNumber map.
+    __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+    __ mov(scratch2, input_reg);  // In delay slot.
+    __ TruncateHeapNumberToI(input_reg, scratch2);
+    __ Branch(&done);
+
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ bind(&no_heap_number);
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&check_bools, ne, input_reg, Operand(at));
+    DCHECK(ToRegister(instr->result()).is(input_reg));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ mov(input_reg, zero_reg);  // In delay slot.
+
+    __ bind(&check_bools);
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(&check_false, ne, scratch2, Operand(at));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ li(input_reg, Operand(1));  // In delay slot.
+
+    __ bind(&check_false);
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean,
+                 scratch2, Operand(at));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ mov(input_reg, zero_reg);  // In delay slot.
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, scratch1,
+                 Operand(at));
+
+    // Load the double value.
+    __ ldc1(double_scratch,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       input_reg,
+                       double_scratch,
+                       scratch1,
+                       double_scratch2,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+                 Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+      __ mfhc1(scratch1, double_scratch);  // Get exponent/sign bits.
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                   Operand(zero_reg));
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+    // Let the deferred code handle the HeapObject case.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+
+    // Smi to int32 conversion.
+    __ SmiUntag(input_reg);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    Register except_flag = LCodeGen::scratch1();
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       result_reg,
+                       double_input,
+                       scratch1,
+                       double_scratch0(),
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+                 Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ Branch(&done, ne, result_reg, Operand(zero_reg));
+      __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = LCodeGen::scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    Register except_flag = LCodeGen::scratch1();
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       result_reg,
+                       double_input,
+                       scratch1,
+                       double_scratch0(),
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, except_flag,
+                 Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ Branch(&done, ne, result_reg, Operand(zero_reg));
+      __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero, scratch1,
+                   Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+  __ SmiTag(result_reg, result_reg);
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ SmiTst(ToRegister(input), at);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ SmiTst(ToRegister(input), at);
+    DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+  Register scratch = scratch0();
+
+  __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+  __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+  __ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
+  DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ GetObjectType(input, scratch, scratch);
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+                   Operand(first));
+    } else {
+      DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType, scratch,
+                   Operand(first));
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType, scratch,
+                     Operand(last));
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ And(at, scratch, mask);
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
+                   at, Operand(zero_reg));
+    } else {
+      __ And(scratch, scratch, Operand(mask));
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType, scratch,
+                   Operand(tag));
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ li(at, Operand(cell));
+    __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
+    DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(at));
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch, reg, Operand(object));
+  }
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ mov(cp, zero_reg);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(v0, scratch0());
+  }
+  __ SmiTst(scratch0(), at);
+  DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, at,
+               Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  Register map_reg = scratch0();
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
+  }
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map_reg, Operand(map));
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
+
+  // Check for heap number
+  __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined, input_reg,
+               Operand(factory()->undefined_value()));
+  __ mov(result_reg, zero_reg);
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, scratch);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ FmoveHigh(result_reg, value_reg);
+  } else {
+    __ FmoveLow(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ li(scratch, Operand(size - kHeapObjectTag));
+    } else {
+      __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+    }
+    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    Label loop;
+    __ bind(&loop);
+    __ Dsubu(scratch, scratch, Operand(kPointerSize));
+    __ Daddu(at, result, Operand(scratch));
+    __ sd(scratch2, MemOperand(at));
+    __ Branch(&loop, ge, scratch, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(size);
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size >= 0 && size <= Smi::kMaxValue) {
+      __ li(v0, Operand(Smi::FromInt(size)));
+      __ Push(v0);
+    } else {
+      // We should never get here at runtime => abort
+      __ stop("invalid allocation size");
+      return;
+    }
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ li(v0, Operand(Smi::FromInt(flags)));
+  __ Push(v0);
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  __ push(a0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->value()).is(a3));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ li(v0, Operand(isolate()->factory()->number_string()));
+  __ jmp(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  Register cmp1 = no_reg;
+  Operand cmp2 = Operand(no_reg);
+
+  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+                                                  instr->FalseLabel(chunk_),
+                                                  input,
+                                                  instr->type_literal(),
+                                                  &cmp1,
+                                                  &cmp2);
+
+  DCHECK(cmp1.is_valid());
+  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name,
+                                 Register* cmp1,
+                                 Operand* cmp2) {
+  // This function utilizes the delay slot heavily. This is used to load
+  // values that are always usable without depending on the type of the input
+  // register.
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label);
+    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    *cmp1 = input;
+    *cmp2 = Operand(at);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    *cmp1 = scratch;
+    *cmp2 = Operand(FIRST_NONSTRING_TYPE);
+    final_branch_condition = lt;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    *cmp1 = scratch;
+    *cmp2 = Operand(SYMBOL_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    *cmp1 = at;
+    *cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    // The first instruction of JumpIfSmi is an And - it is safe in the delay
+    // slot.
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);
+    final_branch_condition = ne;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(scratch, scratch,
+           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    *cmp1 = scratch;
+    *cmp2 = Operand(1 << Map::kIsCallable);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ GetObjectType(input, scratch, scratch1());
+    __ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
+    // Check for callable or undetectable objects => false.
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ And(at, scratch,
+           Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
+  } else if (String::Equals(type_name, factory->type##_string())) {  \
+    __ JumpIfSmi(input, false_label);                                \
+    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));    \
+    __ LoadRoot(at, Heap::k##Type##MapRootIndex);                    \
+    *cmp1 = input;                                                   \
+    *cmp2 = Operand(at);                                             \
+    final_branch_condition = eq;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+
+  } else {
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
+    __ Branch(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= Assembler::kInstrSize;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+
+  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type, zero_reg,
+               Operand(zero_reg));
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&done, hs, sp, Operand(at));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(isolate()->builtins()->StackCheck(),
+             RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr);
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  Register result = ToRegister(instr->result());
+  Register object = ToRegister(instr->object());
+
+  __ And(at, object, kSmiTagMask);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, at, Operand(zero_reg));
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ GetObjectType(object, a1, a1);
+  DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject, a1,
+               Operand(JS_PROXY_TYPE));
+
+  Label use_cache, call_runtime;
+  DCHECK(object.is(a0));
+  Register null_value = a5;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(object);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  DCHECK(result.is(v0));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, a1, Operand(at));
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ jmp(&done);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ ld(result,
+        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ ld(result,
+        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  DeoptimizeIf(eq, instr, Deoptimizer::kNoCache, result, Operand(zero_reg));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap, map, Operand(scratch0()));
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object, index);
+  __ mov(cp, zero_reg);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+     instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ And(scratch, index, Operand(Smi::FromInt(1)));
+  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
+  __ dsra(index, index, 1);
+
+  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
+  __ SmiScale(scratch, index, kPointerSizeLog2);  // In delay slot.
+  __ Daddu(scratch, object, scratch);
+  __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ Branch(&done);
+
+  __ bind(&out_of_object);
+  __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ Dsubu(scratch, result, scratch);
+  __ ld(result, FieldMemOperand(scratch,
+                                FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ li(at, scope_info);
+  __ Push(at, ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.h b/src/crankshaft/mips64/lithium-codegen-mips64.h
new file mode 100644
index 0000000..efadb0f
--- /dev/null
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.h
@@ -0,0 +1,425 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
+#include "src/crankshaft/mips64/lithium-mips64.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  RAStatus GetRAState() const {
+    return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
+  }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        FloatRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+  int64_t ToRepresentation_donotuse(LConstantOperand* op,
+                                    const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr,
+                             LOperand* value,
+                             LOperand* temp1,
+                             LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  MemOperand PrepareKeyedOperand(Register key,
+                                 Register base,
+                                 bool key_is_constant,
+                                 int constant_key,
+                                 int element_size,
+                                 int shift_size,
+                                 int base_offset);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  Register scratch0() { return kLithiumScratchReg; }
+  Register scratch1() { return kLithiumScratchReg2; }
+  DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
+
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in a1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type,
+                    Register src1 = zero_reg,
+                    const Operand& src2 = Operand(zero_reg));
+  void DeoptimizeIf(
+      Condition condition, LInstruction* instr,
+      Deoptimizer::DeoptReason deopt_reason = Deoptimizer::kNoReason,
+      Register src1 = zero_reg, const Operand& src2 = Operand(zero_reg));
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  MemOperand BuildSeqStringOperand(Register string,
+                                   LOperand* index,
+                                   String::Encoding encoding);
+
+  void EmitIntegerMathAbs(LMathAbs* instr);
+  void EmitSmiMathAbs(LMathAbs* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template<class InstrType>
+  void EmitBranch(InstrType instr,
+                  Condition condition,
+                  Register src1,
+                  const Operand& src2);
+  template<class InstrType>
+  void EmitBranchF(InstrType instr,
+                   Condition condition,
+                   FPURegister src1,
+                   FPURegister src2);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition condition, Register src1,
+                      const Operand& src2);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition condition, Register src1,
+                       const Operand& src2);
+  template<class InstrType>
+  void EmitFalseBranchF(InstrType instr,
+                        Condition condition,
+                        FPURegister src1,
+                        FPURegister src2);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DoubleRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  // Returns two registers in cmp1 and cmp2 that can be used in the
+  // Branch instruction after EmitTypeofIs.
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name,
+                         Register* cmp1,
+                         Operand* cmp2);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset,
+                    AllocationSiteMode mode);
+  // Emit optimized code for integer division.
+  // Inputs are signed.
+  // All registers are clobbered.
+  // If 'remainder' is no_reg, it is not computed.
+  void EmitSignedIntegerDivisionByConstant(Register result,
+                                           Register dividend,
+                                           int32_t divisor,
+                                           Register remainder,
+                                           Register scratch,
+                                           LEnvironment* environment);
+
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_MIPS64_LITHIUM_CODEGEN_MIPS_H_
diff --git a/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc b/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
new file mode 100644
index 0000000..0374cbc
--- /dev/null
+++ b/src/crankshaft/mips64/lithium-gap-resolver-mips64.cc
@@ -0,0 +1,300 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/mips64/lithium-gap-resolver-mips64.h"
+
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
+  if (saved_destination_->IsRegister()) {
+    __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+            kLithiumScratchDouble);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ sdc1(kLithiumScratchDouble,
+            cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ sd(source_register, cgen_->ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ld(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        if (!destination_operand.OffsetIsInt16Encodable()) {
+          // 'at' is overwritten while saving the value to the destination.
+          // Therefore we can't use 'at'.  It is OK if the read from the source
+          // destroys 'at', since that happens before the value is read.
+          // This uses only a single reg of the double reg-pair.
+          __ ldc1(kLithiumScratchDouble, source_operand);
+          __ sdc1(kLithiumScratchDouble, destination_operand);
+        } else {
+          __ ld(at, source_operand);
+          __ sd(at, destination_operand);
+        }
+      } else {
+        __ ld(kLithiumScratchReg, source_operand);
+        __ sd(kLithiumScratchReg, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsSmi(constant_source)) {
+         __ li(dst, Operand(cgen_->ToSmi(constant_source)));
+      } else if (cgen_->IsInteger32(constant_source)) {
+         __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
+      } else {
+         __ li(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DoubleRegister result = cgen_->ToDoubleRegister(destination);
+      double v = cgen_->ToDouble(constant_source);
+      __ Move(result, v);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      if (cgen_->IsSmi(constant_source)) {
+         __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
+         __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+      } else if (cgen_->IsInteger32(constant_source)) {
+        __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
+        __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+      } else {
+        __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
+        __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+      }
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ sdc1(source_register, destination_operand);
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        // kLithiumScratchDouble was used to break the cycle,
+        // but kLithiumScratchReg is free.
+        MemOperand source_high_operand =
+            cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lw(kLithiumScratchReg, source_operand);
+        __ sw(kLithiumScratchReg, destination_operand);
+        __ lw(kLithiumScratchReg, source_high_operand);
+        __ sw(kLithiumScratchReg, destination_high_operand);
+      } else {
+        __ ldc1(kLithiumScratchDouble, source_operand);
+        __ sdc1(kLithiumScratchDouble, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/mips64/lithium-gap-resolver-mips64.h b/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
new file mode 100644
index 0000000..85d8e29
--- /dev/null
+++ b/src/crankshaft/mips64/lithium-gap-resolver-mips64.h
@@ -0,0 +1,59 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_MIPS64_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/crankshaft/mips64/lithium-mips64.cc b/src/crankshaft/mips64/lithium-mips64.cc
new file mode 100644
index 0000000..129f615
--- /dev/null
+++ b/src/crankshaft/mips64/lithium-mips64.cc
@@ -0,0 +1,2591 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/mips64/lithium-mips64.h"
+
+#include <sstream>
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/mips64/lithium-codegen-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "sll-t";
+    case Token::SAR: return "sra-t";
+    case Token::SHR: return "srl-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind)  {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateResultInstruction<1>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator,
+                                           &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseRegisterAtStart(right_value);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift
+    // by 0 and the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), f2);
+    LOperand* right = UseFixedDouble(instr->right(), f4);
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    // We call a C function for double modulo. It can't trigger a GC. We need
+    // to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    return MarkAsCall(DefineFixedDouble(result, f2), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return DefineAsRegister(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, a1);
+  LOperand* right_operand = UseFixed(right, a0);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+// Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(
+      new(zone()) LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* receiver = UseFixed(instr->receiver(), a0);
+  LOperand* length = UseFixed(instr->length(), a2);
+  LOperand* elements = UseFixed(instr->elements(), a3);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = Use(instr->argument(i));
+    AddInstruction(new(zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), cp);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+      descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), a1);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new(zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* double_temp = TempDoubleRegister();
+  LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
+  LOperand* input = UseFixedDouble(instr->value(), f8);
+  LOperand* temp = TempDoubleRegister();
+  LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+  return DefineFixedDouble(result, f4);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  Representation r = instr->value()->representation();
+  LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+      ? NULL
+      : UseFixed(instr->context(), cp);
+  LOperand* input = UseRegister(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LMathAbs(context, input));
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempRegister();
+  LMathFloor* result = new(zone()) LMathFloor(input, temp);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathSqrt* result = new(zone()) LMathSqrt(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempDoubleRegister();
+  LMathRound* result = new(zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), a1);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(a3);
+    vector = FixedTemp(a2);
+  }
+
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineAsRegister(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+          dividend, divisor));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+    ? NULL : TempRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+      (!instr->IsMathFloorOfDiv() &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+          dividend, divisor));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result = DefineAsRegister(new(zone()) LModI(
+      dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    HValue* left = instr->BetterLeftOperand();
+    HValue* right = instr->BetterRightOperand();
+    LOperand* left_op;
+    LOperand* right_op;
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    int32_t constant_value = 0;
+    if (right->IsConstant()) {
+      HConstant* constant = HConstant::cast(right);
+      constant_value = constant->Integer32Value();
+      // Constants -1, 0 and 1 can be optimized if the result can overflow.
+      // For other constants, it can be optimized only without overflow.
+      if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+        left_op = UseRegisterAtStart(left);
+        right_op = UseConstant(right);
+      } else {
+        if (bailout_on_minus_zero) {
+          left_op = UseRegister(left);
+        } else {
+          left_op = UseRegisterAtStart(left);
+        }
+        right_op = UseRegister(right);
+      }
+    } else {
+      if (bailout_on_minus_zero) {
+        left_op = UseRegister(left);
+      } else {
+        left_op = UseRegisterAtStart(left);
+      }
+      right_op = UseRegister(right);
+    }
+    LInstruction* result =
+        instr->representation().IsSmi()
+            ? DefineAsRegister(new (zone()) LMulS(left_op, right_op))
+            : DefineAsRegister(new (zone()) LMulI(left_op, right_op));
+    if (right_op->IsConstantOperand()
+            ? ((can_overflow && constant_value == -1) ||
+               (bailout_on_minus_zero && constant_value <= 0))
+            : (can_overflow || bailout_on_minus_zero)) {
+      AssignEnvironment(result);
+    }
+    return result;
+
+  } else if (instr->representation().IsDouble()) {
+    if (kArchVariant == kMips64r2) {
+      if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
+        HAdd* add = HAdd::cast(instr->uses().value());
+        if (instr == add->left()) {
+          // This mul is the lhs of an add. The add and mul will be folded
+          // into a multiply-add.
+          return NULL;
+        }
+        if (instr == add->right() && !add->left()->IsMul()) {
+          // This mul is the rhs of an add, where the lhs is not another mul.
+          // The add and mul will be folded into a multiply-add.
+          return NULL;
+        }
+      }
+    }
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    LInstruction* result =
+        instr->representation().IsSmi()
+            ? DefineAsRegister(new (zone()) LSubS(left, right))
+            : DefineAsRegister(new (zone()) LSubI(left, right));
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+  LOperand* addend_op = UseRegisterAtStart(addend);
+  return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+                                                     multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
+    LInstruction* result =
+        instr->representation().IsSmi()
+            ? DefineAsRegister(new (zone()) LAddS(left, right))
+            : DefineAsRegister(new (zone()) LAddI(left, right));
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return DefineAsRegister(new (zone()) LAddE(left, right));
+  } else if (instr->representation().IsDouble()) {
+    if (kArchVariant == kMips64r2) {
+      if (instr->left()->IsMul())
+        return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+      if (instr->right()->IsMul()) {
+        DCHECK(!instr->left()->IsMul());
+        return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+      }
+    }
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), f2);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), f4)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, f0),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
+                                        temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegisterAtStart(instr->index())
+      : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  return new(zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+  ? UseRegisterOrConstantAtStart(instr->length())
+  : UseRegisterAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempDoubleRegister();
+        LInstruction* result =
+            DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+                      (kMaxInt == Smi::kMaxValue));
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiTag(value));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    // Revisit this decision, here and 8 lines below.
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg,
+        TempDoubleRegister()));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LClampTToUint8* result =
+        new(zone()) LClampTToUint8(reg, TempDoubleRegister());
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub()
+      ? UseFixed(instr->context(), cp)
+      : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
+                             parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new(zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = NULL;
+    if (instr->representation().IsDouble()) {
+      obj = UseRegister(instr->elements());
+    } else {
+      DCHECK(instr->representation().IsSmiOrTagged() ||
+             instr->representation().IsInteger32());
+      obj = UseRegisterAtStart(instr->elements());
+    }
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK(
+        (instr->representation().IsInteger32() &&
+         !IsDoubleOrFloatElementsKind(elements_kind)) ||
+        (instr->representation().IsDouble() &&
+         IsDoubleOrFloatElementsKind(elements_kind)));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* val = NULL;
+    LOperand* key = NULL;
+
+    if (instr->value()->representation().IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      key = UseRegisterOrConstantAtStart(instr->key());
+      val = UseRegister(instr->value());
+    } else {
+      DCHECK(instr->value()->representation().IsSmiOrTagged() ||
+             instr->value()->representation().IsInteger32());
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new (zone()) LStoreKeyed(object, key, val, nullptr);
+  }
+
+  DCHECK(
+      (instr->value()->representation().IsInteger32() &&
+       !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+      (instr->value()->representation().IsDouble() &&
+       IsDoubleOrFloatElementsKind(instr->elements_kind())));
+  DCHECK(instr->elements()->representation().IsExternal());
+  LOperand* val = UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result =
+      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), a0);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, v0);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map = instr->has_transition() &&
+      instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map
+        ? UseRegister(instr->object())
+        : UseRegisterAtStart(instr->object());
+  }
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new(zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
+      instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* value = UseFixed(instr->value(), a3);
+  LTypeof* result = new (zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseFixed(instr->enumerable(), a0);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/crankshaft/mips64/lithium-mips64.h b/src/crankshaft/mips64/lithium-mips64.h
new file mode 100644
index 0000000..01dc234
--- /dev/null
+++ b/src/crankshaft/mips64/lithium-mips64.h
@@ -0,0 +1,2792 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
+#define V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddE)                                    \
+  V(AddI)                                    \
+  V(AddS)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleToI)                               \
+  V(DoubleBits)                              \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathExp)                                 \
+  V(MathClz32)                               \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MulS)                                    \
+  V(MultiplyAddD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(SubS)                                    \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {
+  }
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator interface.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const final { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 3> {
+ public:
+  LModI(LOperand* left,
+        LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor,  LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulS(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulS, "mul-s")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = addend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* addend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathFloor(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LMathExp(LOperand* value,
+           LOperand* double_temp,
+           LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = double_temp;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathPowHalf(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LSubS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubS(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubS, "sub-s")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddE final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddE(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddE, "add-e")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LAddS final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddS(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddS, "add-s")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const final { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTaggedToI(LOperand* value,
+             LOperand* temp,
+             LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+              LOperand* backing_store_owner) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* new_map_temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+  LOperand* view() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* context,
+            LOperand* size,
+            LOperand* temp1,
+            LOperand* temp2) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph) { }
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_MIPS64_LITHIUM_MIPS_H_
diff --git a/src/crankshaft/ppc/OWNERS b/src/crankshaft/ppc/OWNERS
new file mode 100644
index 0000000..eb007cb
--- /dev/null
+++ b/src/crankshaft/ppc/OWNERS
@@ -0,0 +1,5 @@
+jyan@ca.ibm.com
+dstence@us.ibm.com
+joransiu@ca.ibm.com
+mbrandy@us.ibm.com
+michael_dawson@ca.ibm.com
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.cc b/src/crankshaft/ppc/lithium-codegen-ppc.cc
new file mode 100644
index 0000000..936b8a7
--- /dev/null
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.cc
@@ -0,0 +1,5876 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+            GenerateJumpTable() && GenerateSafepointTable();
+  if (FLAG_enable_embedded_constant_pool && !rc) {
+    masm()->AbortConstantPoolBuilding();
+  }
+  return rc;
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ stfd(DoubleRegister::from_code(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ lfd(DoubleRegister::from_code(save_iterator.Current()),
+           MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
+#endif
+
+    // r4: Callee's JS function.
+    // cp: Callee's context.
+    // pp: Callee's constant pool pointer (if enabled)
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
+    // ip: Our own function entry (required by the prologue)
+  }
+
+  int prologue_offset = masm_->pc_offset();
+
+  if (prologue_offset) {
+    // Prologue logic requires it's starting address in ip and the
+    // corresponding offset from the function entry.
+    prologue_offset += Instruction::kInstrSize;
+    __ addi(ip, ip, Operand(prologue_offset));
+  }
+  info()->set_prologue_offset(prologue_offset);
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue(ip, prologue_offset);
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
+    }
+    frame_is_built_ = true;
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    __ subi(sp, sp, Operand(slots * kPointerSize));
+    if (FLAG_debug_code) {
+      __ Push(r3, r4);
+      __ li(r0, Operand(slots));
+      __ mtctr(r0);
+      __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
+      __ mov(r4, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ StorePU(r4, MemOperand(r3, -kPointerSize));
+      __ bdnz(&loop);
+      __ Pop(r3, r4);
+    }
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info()->scope()->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in r4.
+    int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ push(r4);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(r4);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in both r3 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ mr(cp, r3);
+    __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                               (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ LoadP(r3, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextMemOperand(cp, var->index());
+        __ StoreP(r3, target, r0);
+        // Update the write barrier. This clobbers r6 and r3.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+                                    GetLinkRegisterState(), kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r3, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ subi(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(
+          ";;; <@%d,#%d> "
+          "-------------------- Deferred %s --------------------",
+          code->instruction_index(), code->instr()->hydrogen_value()->id(),
+          code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+        __ PushFixedFrame(scratch0());
+        __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ PopFixedFrame(ip);
+        frame_is_built_ = false;
+      }
+      __ b(code->exit());
+    }
+  }
+
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  // Check that the jump table is accessible from everywhere in the function
+  // code, i.e. that offsets to the table can be encoded in the 24bit signed
+  // immediate of a branch instruction.
+  // To simplify we consider the code size from the first instruction to the
+  // end of the jump table. We also don't consider the pc load delta.
+  // Each entry in the jump table generates one instruction and inlines one
+  // 32bit data after it.
+  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+                jump_table_.length() * 7)) {
+    Abort(kGeneratedCodeIsTooLarge);
+  }
+
+  if (jump_table_.length() > 0) {
+    Label needs_frame, call_deopt_entry;
+
+    Comment(";;; -------------------- Jump table --------------------");
+    Address base = jump_table_[0].address;
+
+    Register entry_offset = scratch0();
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
+
+      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->deopt_info);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load an immediate
+      // offset which will be added to the base address later.
+      __ mov(entry_offset, Operand(entry - base));
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        Comment(";;; call deopt with frame");
+        __ PushFixedFrame();
+        __ b(&needs_frame, SetLK);
+      } else {
+        __ b(&call_deopt_entry, SetLK);
+      }
+      info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                   table_entry->deopt_info.inlining_id);
+    }
+
+    if (needs_frame.is_linked()) {
+      __ bind(&needs_frame);
+      // This variant of deopt can only be used with stubs. Since we don't
+      // have a function pointer to install in the stack frame that we're
+      // building, install a special marker there instead.
+      DCHECK(info()->IsStub());
+      __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+      __ push(ip);
+      __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+    }
+
+    Comment(";;; call deopt");
+    __ bind(&call_deopt_entry);
+
+    if (info()->saves_caller_doubles()) {
+      DCHECK(info()->IsStub());
+      RestoreCallerDoubles();
+    }
+
+    // Add the base address to the offset previously loaded in entry_offset.
+    __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+    __ add(ip, entry_offset, ip);
+    __ Jump(ip);
+  }
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int code) const {
+  return Register::from_code(code);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
+  return DoubleRegister::from_code(code);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      AllowDeferredHandleDereference get_number;
+      DCHECK(literal->IsNumber());
+      __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+    } else {
+      DCHECK(r.IsSmiOrTagged());
+      __ Move(scratch, literal);
+    }
+    return scratch;
+  } else if (op->IsStackSlot()) {
+    __ LoadP(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
+                                       Register dst) {
+  DCHECK(IsInteger32(const_op));
+  HConstant* constant = chunk_->LookupConstant(const_op);
+  int32_t value = constant->Integer32Value();
+  if (IsSmi(const_op)) {
+    __ LoadSmiLiteral(dst, Smi::FromInt(value));
+  } else {
+    __ LoadIntLiteral(dst, value);
+  }
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                    const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<intptr_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand::Zero();
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand::Zero();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp,
+                      ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation, LOperand* op,
+                                bool is_tagged, bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment, translation, value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer, dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
+                           LInstruction* instr, SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Move(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ LoadP(cp, ToMemOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+                                       LInstruction* instr, LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(instr->pointer_map(), argc,
+                               Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index, translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type,
+                            CRegister cr) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    CRegister alt_cr = cr6;
+    Register scratch = scratch0();
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    DCHECK(!alt_cr.is(cr));
+    __ Push(r4, scratch);
+    __ mov(scratch, Operand(count));
+    __ lwz(r4, MemOperand(scratch));
+    __ subi(r4, r4, Operand(1));
+    __ cmpi(r4, Operand::Zero(), alt_cr);
+    __ bne(&no_deopt, alt_cr);
+    __ li(r4, Operand(FLAG_deopt_every_n_times));
+    __ stw(r4, MemOperand(scratch));
+    __ Pop(r4, scratch);
+
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ stw(r4, MemOperand(scratch));
+    __ Pop(r4, scratch);
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
+    DeoptComment(deopt_info);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ b(cond, &jump_table_.last().label, cr);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            CRegister cr) {
+  Deoptimizer::BailoutType bailout_type =
+      info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+  DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                            SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+                               int arguments, Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_, label->hydrogen_value()->id(),
+          label->block_id(), LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(r3));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ cmpwi(dividend, Operand::Zero());
+    __ bge(&dividend_is_not_negative);
+    if (shift) {
+      // Note that this is correct even for kMinInt operands.
+      __ neg(dividend, dividend);
+      __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+      __ neg(dividend, dividend, LeaveOE, SetRC);
+      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+        DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+      }
+    } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ li(dividend, Operand::Zero());
+    } else {
+      DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
+    }
+    __ b(&done);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  if (shift) {
+    __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+  } else {
+    __ li(dividend, Operand::Zero());
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ mov(ip, Operand(Abs(divisor)));
+  __ mullw(result, result, ip);
+  __ sub(result, dividend, result, LeaveOE, SetRC);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ bne(&remainder_not_zero, cr0);
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+  Register left_reg = ToRegister(instr->left());
+  Register right_reg = ToRegister(instr->right());
+  Register result_reg = ToRegister(instr->result());
+  Register scratch = scratch0();
+  bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
+  Label done;
+
+  if (can_overflow) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+  }
+
+  __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
+
+  // Check for x % 0.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmpwi(right_reg, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for kMinInt % -1, divw will return undefined, which is not what we
+  // want. We have to deopt if we care about -0, because we can't return that.
+  if (can_overflow) {
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
+    } else {
+      if (CpuFeatures::IsSupported(ISELECT)) {
+        __ isel(overflow, result_reg, r0, result_reg, cr0);
+        __ boverflow(&done, cr0);
+      } else {
+        Label no_overflow_possible;
+        __ bnooverflow(&no_overflow_possible, cr0);
+        __ li(result_reg, Operand::Zero());
+        __ b(&done);
+        __ bind(&no_overflow_possible);
+      }
+    }
+  }
+
+  __ mullw(scratch, right_reg, scratch);
+  __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ bne(&done, cr0);
+    __ cmpwi(left_reg, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+    __ cmpw(dividend, r0);
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+
+  int32_t shift = WhichPowerOf2Abs(divisor);
+
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
+    __ TestBitRange(dividend, shift - 1, 0, r0);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ neg(result, dividend);
+    return;
+  }
+  if (shift == 0) {
+    __ mr(result, dividend);
+  } else {
+    if (shift == 1) {
+      __ srwi(result, dividend, Operand(31));
+    } else {
+      __ srawi(result, dividend, 31);
+      __ srwi(result, result, Operand(32 - shift));
+    }
+    __ add(result, dividend, result);
+    __ srawi(result, result, shift);
+  }
+  if (divisor < 0) __ neg(result, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ neg(result, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    Register scratch = scratch0();
+    __ mov(ip, Operand(divisor));
+    __ mullw(scratch, result, ip);
+    __ cmpw(scratch, dividend);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  const Register dividend = ToRegister(instr->dividend());
+  const Register divisor = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+  bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
+
+  DCHECK(!dividend.is(result));
+  DCHECK(!divisor.is(result));
+
+  if (can_overflow) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+  }
+
+  __ divw(result, dividend, divisor, SetOE, SetRC);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ cmpwi(dividend, Operand::Zero());
+    __ bne(&dividend_not_zero);
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (can_overflow) {
+    if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+    } else {
+      // When truncating, we want kMinInt / -1 = kMinInt.
+      if (CpuFeatures::IsSupported(ISELECT)) {
+        __ isel(overflow, result, dividend, result, cr0);
+      } else {
+        Label no_overflow_possible;
+        __ bnooverflow(&no_overflow_possible, cr0);
+        __ mr(result, dividend);
+        __ bind(&no_overflow_possible);
+      }
+    }
+  }
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    Register scratch = scratch0();
+    __ mullw(scratch, divisor, result);
+    __ cmpw(dividend, scratch);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register result = ToRegister(instr->result());
+  int32_t divisor = instr->divisor();
+  bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 0) {
+    if (shift || !result.is(dividend)) {
+      __ srawi(result, dividend, shift);
+    }
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  OEBit oe = LeaveOE;
+#if V8_TARGET_ARCH_PPC64
+  if (divisor == -1 && can_overflow) {
+    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+    __ cmpw(dividend, r0);
+    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+  }
+#else
+  if (can_overflow) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+    oe = SetOE;
+  }
+#endif
+
+  __ neg(result, dividend, oe, SetRC);
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
+  }
+
+// If the negation could not overflow, simply shifting is OK.
+#if !V8_TARGET_ARCH_PPC64
+  if (!can_overflow) {
+#endif
+    if (shift) {
+      __ ShiftRightArithImm(result, result, shift);
+    }
+    return;
+#if !V8_TARGET_ARCH_PPC64
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+    return;
+  }
+
+  Label overflow, done;
+  __ boverflow(&overflow, cr0);
+  __ srawi(result, result, shift);
+  __ b(&done);
+  __ bind(&overflow);
+  __ mov(result, Operand(kMinInt / divisor));
+  __ bind(&done);
+#endif
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ neg(result, result);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(!temp.is(dividend) && !temp.is(result));
+  Label needs_adjustment, done;
+  __ cmpwi(dividend, Operand::Zero());
+  __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ neg(result, result);
+  __ b(&done);
+  __ bind(&needs_adjustment);
+  __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ neg(result, result);
+  __ subi(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  const Register dividend = ToRegister(instr->dividend());
+  const Register divisor = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+  bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
+
+  DCHECK(!dividend.is(result));
+  DCHECK(!divisor.is(result));
+
+  if (can_overflow) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+  }
+
+  __ divw(result, dividend, divisor, SetOE, SetRC);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ cmpwi(dividend, Operand::Zero());
+    __ bne(&dividend_not_zero);
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (can_overflow) {
+    if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+    } else {
+      // When truncating, we want kMinInt / -1 = kMinInt.
+      if (CpuFeatures::IsSupported(ISELECT)) {
+        __ isel(overflow, result, dividend, result, cr0);
+      } else {
+        Label no_overflow_possible;
+        __ bnooverflow(&no_overflow_possible, cr0);
+        __ mr(result, dividend);
+        __ bind(&no_overflow_possible);
+      }
+    }
+  }
+
+  Label done;
+  Register scratch = scratch0();
+// If both operands have the same sign then we are done.
+#if V8_TARGET_ARCH_PPC64
+  __ xor_(scratch, dividend, divisor);
+  __ cmpwi(scratch, Operand::Zero());
+  __ bge(&done);
+#else
+  __ xor_(scratch, dividend, divisor, SetRC);
+  __ bge(&done, cr0);
+#endif
+
+  // If there is no remainder then we are done.
+  __ mullw(scratch, divisor, result);
+  __ cmpw(dividend, scratch);
+  __ beq(&done);
+
+  // We performed a truncating division. Correct the result.
+  __ subi(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+  DoubleRegister addend = ToDoubleRegister(instr->addend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  __ fmadd(result, multiplier, multiplicand, addend);
+}
+
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+  DoubleRegister minuend = ToDoubleRegister(instr->minuend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  __ fmsub(result, multiplier, multiplicand, minuend);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      __ cmpi(left, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+
+    switch (constant) {
+      case -1:
+        if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+          if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+            __ li(r0, Operand::Zero());  // clear xer
+            __ mtxer(r0);
+            __ neg(result, left, SetOE, SetRC);
+            DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+#if V8_TARGET_ARCH_PPC64
+          } else {
+            __ neg(result, left);
+            __ TestIfInt32(result, r0);
+            DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+          }
+#endif
+        } else {
+          __ neg(result, left);
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+// If left is strictly negative and the constant is null, the
+// result is -0. Deoptimize if required, otherwise return 0.
+#if V8_TARGET_ARCH_PPC64
+          if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+            __ cmpi(left, Operand::Zero());
+#if V8_TARGET_ARCH_PPC64
+          } else {
+            __ cmpwi(left, Operand::Zero());
+          }
+#endif
+          DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+        }
+        __ li(result, Operand::Zero());
+        break;
+      case 1:
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ ShiftLeftImm(result, left, Operand(shift));
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ neg(result, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ ShiftLeftImm(scratch, left, Operand(shift));
+          __ add(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ neg(result, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ ShiftLeftImm(scratch, left, Operand(shift));
+          __ sub(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ neg(result, result);
+        } else {
+          // Generate standard code.
+          __ mov(ip, Operand(constant));
+          __ Mul(result, left, ip);
+        }
+    }
+
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+      // result = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ SmiUntag(scratch, right);
+        __ Mul(result, result, scratch);
+      } else {
+        __ Mul(result, left, right);
+      }
+      __ TestIfInt32(result, r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiTag(result);
+      }
+#else
+      // scratch:result = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ mulhw(scratch, result, right);
+        __ mullw(result, result, right);
+      } else {
+        __ mulhw(scratch, left, right);
+        __ mullw(result, left, right);
+      }
+      __ TestIfInt32(scratch, result, r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+#endif
+    } else {
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ Mul(result, result, right);
+      } else {
+        __ Mul(result, left, right);
+      }
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+#if V8_TARGET_ARCH_PPC64
+      if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+        __ xor_(r0, left, right, SetRC);
+        __ bge(&done, cr0);
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        __ xor_(r0, left, right);
+        __ cmpwi(r0, Operand::Zero());
+        __ bge(&done);
+      }
+#endif
+      // Bail out if the result is minus zero.
+      __ cmpi(result, Operand::Zero());
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->left();
+  LOperand* right_op = instr->right();
+  DCHECK(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot()) {
+    right = Operand(EmitLoadRegister(right_op, ip));
+  } else {
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+
+    if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
+      switch (instr->op()) {
+        case Token::BIT_AND:
+          __ andi(result, left, right);
+          break;
+        case Token::BIT_OR:
+          __ ori(result, left, right);
+          break;
+        case Token::BIT_XOR:
+          __ xori(result, left, right);
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+      return;
+    }
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+        __ notx(result, left);
+      } else {
+        __ Xor(result, left, right);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  if (right_op->IsRegister()) {
+    // Mask the right_op operand.
+    __ andi(scratch, ToRegister(right_op), Operand(0x1F));
+    switch (instr->op()) {
+      case Token::ROR:
+        // rotate_right(a, b) == rotate_left(a, 32 - b)
+        __ subfic(scratch, scratch, Operand(32));
+        __ rotlw(result, left, scratch);
+        break;
+      case Token::SAR:
+        __ sraw(result, left, scratch);
+        break;
+      case Token::SHR:
+        if (instr->can_deopt()) {
+          __ srw(result, left, scratch, SetRC);
+#if V8_TARGET_ARCH_PPC64
+          __ extsw(result, result, SetRC);
+#endif
+          DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
+        } else {
+          __ srw(result, left, scratch);
+        }
+        break;
+      case Token::SHL:
+        __ slw(result, left, scratch);
+#if V8_TARGET_ARCH_PPC64
+        __ extsw(result, result);
+#endif
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count != 0) {
+          __ rotrwi(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ srawi(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srwi(result, left, Operand(shift_count));
+        } else {
+          if (instr->can_deopt()) {
+            __ cmpwi(left, Operand::Zero());
+            DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+#if V8_TARGET_ARCH_PPC64
+          if (instr->hydrogen_value()->representation().IsSmi()) {
+            __ sldi(result, left, Operand(shift_count));
+#else
+          if (instr->hydrogen_value()->representation().IsSmi() &&
+              instr->can_deopt()) {
+            if (shift_count != 1) {
+              __ slwi(result, left, Operand(shift_count - 1));
+              __ SmiTagCheckOverflow(result, result, scratch);
+            } else {
+              __ SmiTagCheckOverflow(result, left, scratch);
+            }
+            DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+          } else {
+            __ slwi(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_PPC64
+            __ extsw(result, result);
+#endif
+          }
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* right = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#if V8_TARGET_ARCH_PPC64
+  const bool isInteger = !instr->hydrogen()->representation().IsSmi();
+#else
+  const bool isInteger = false;
+#endif
+  if (!can_overflow || isInteger) {
+    if (right->IsConstantOperand()) {
+      __ Add(result, left, -(ToOperand(right).immediate()), r0);
+    } else {
+      __ sub(result, left, EmitLoadRegister(right, ip));
+    }
+#if V8_TARGET_ARCH_PPC64
+    if (can_overflow) {
+      __ TestIfInt32(result, r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+    }
+#endif
+  } else {
+    if (right->IsConstantOperand()) {
+      __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
+                                scratch0(), r0);
+    } else {
+      __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
+                                scratch0(), r0);
+    }
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+  }
+}
+
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+
+  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
+         right->IsConstantOperand());
+
+  Operand right_operand = ToOperand(right);
+  if (is_int16(right_operand.immediate())) {
+    __ subfic(ToRegister(result), ToRegister(left), right_operand);
+  } else {
+    __ mov(r0, right_operand);
+    __ sub(ToRegister(result), r0, ToRegister(left));
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+#if V8_HOST_ARCH_IA32
+  // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
+  // builds.
+  uint64_t bits = instr->bits();
+  if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
+      V8_UINT64_C(0x7FF0000000000000)) {
+    uint32_t lo = static_cast<uint32_t>(bits);
+    uint32_t hi = static_cast<uint32_t>(bits >> 32);
+    __ mov(ip, Operand(lo));
+    __ mov(scratch0(), Operand(hi));
+    __ MovInt64ToDouble(result, scratch0(), ip);
+    return;
+  }
+#endif
+  double v = instr->value();
+  __ LoadDoubleLiteral(result, v, scratch0());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ Move(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+  Register scratch = scratch0();
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ add(scratch, string, ToRegister(index));
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
+    __ add(scratch, string, scratch);
+  }
+  return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    __ andi(scratch, scratch,
+            Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ cmpi(scratch,
+            Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
+                                                          : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ lbz(result, operand);
+  } else {
+    __ lhz(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+
+  if (FLAG_debug_code) {
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+            ? one_byte_seq_type
+            : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ stb(value, operand);
+  } else {
+    __ sth(value, operand);
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* right = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#if V8_TARGET_ARCH_PPC64
+  const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+                           instr->hydrogen()->representation().IsExternal());
+#else
+  const bool isInteger = false;
+#endif
+
+  if (!can_overflow || isInteger) {
+    if (right->IsConstantOperand()) {
+      __ Add(result, left, ToOperand(right).immediate(), r0);
+    } else {
+      __ add(result, left, EmitLoadRegister(right, ip));
+    }
+#if V8_TARGET_ARCH_PPC64
+    if (can_overflow) {
+      __ TestIfInt32(result, r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+    }
+#endif
+  } else {
+    if (right->IsConstantOperand()) {
+      __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
+                                scratch0(), r0);
+    } else {
+      __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
+                                scratch0(), r0);
+    }
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Register left_reg = ToRegister(left);
+    Register right_reg = EmitLoadRegister(right, ip);
+    Register result_reg = ToRegister(instr->result());
+    Label return_left, done;
+#if V8_TARGET_ARCH_PPC64
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+#endif
+      __ cmp(left_reg, right_reg);
+#if V8_TARGET_ARCH_PPC64
+    } else {
+      __ cmpw(left_reg, right_reg);
+    }
+#endif
+    if (CpuFeatures::IsSupported(ISELECT)) {
+      __ isel(cond, result_reg, left_reg, right_reg);
+    } else {
+      __ b(cond, &return_left);
+      __ Move(result_reg, right_reg);
+      __ b(&done);
+      __ bind(&return_left);
+      __ Move(result_reg, left_reg);
+      __ bind(&done);
+    }
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    DoubleRegister left_reg = ToDoubleRegister(left);
+    DoubleRegister right_reg = ToDoubleRegister(right);
+    DoubleRegister result_reg = ToDoubleRegister(instr->result());
+    Label check_nan_left, check_zero, return_left, return_right, done;
+    __ fcmpu(left_reg, right_reg);
+    __ bunordered(&check_nan_left);
+    __ beq(&check_zero);
+    __ b(cond, &return_left);
+    __ b(&return_right);
+
+    __ bind(&check_zero);
+    __ fcmpu(left_reg, kDoubleRegZero);
+    __ bne(&return_left);  // left == right != 0.
+
+    // At this point, both left and right are either 0 or -0.
+    // N.B. The following works because +0 + -0 == +0
+    if (operation == HMathMinMax::kMathMin) {
+      // For min we want logical-or of sign bit: -(-L + -R)
+      __ fneg(left_reg, left_reg);
+      __ fsub(result_reg, left_reg, right_reg);
+      __ fneg(result_reg, result_reg);
+    } else {
+      // For max we want logical-and of sign bit: (L + R)
+      __ fadd(result_reg, left_reg, right_reg);
+    }
+    __ b(&done);
+
+    __ bind(&check_nan_left);
+    __ fcmpu(left_reg, left_reg);
+    __ bunordered(&return_left);  // left == NaN.
+
+    __ bind(&return_right);
+    if (!right_reg.is(result_reg)) {
+      __ fmr(result_reg, right_reg);
+    }
+    __ b(&done);
+
+    __ bind(&return_left);
+    if (!left_reg.is(result_reg)) {
+      __ fmr(result_reg, left_reg);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ fadd(result, left, right);
+      break;
+    case Token::SUB:
+      __ fsub(result, left, right);
+      break;
+    case Token::MUL:
+      __ fmul(result, left, right);
+      break;
+    case Token::DIV:
+      __ fdiv(result, left, right);
+      break;
+    case Token::MOD: {
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ MovToFloatParameters(left, right);
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(result);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r4));
+  DCHECK(ToRegister(instr->right()).is(r3));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block || cond == al) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
+  } else if (right_block == next_block) {
+    __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
+  } else {
+    __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
+    __ b(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
+  int true_block = instr->TrueDestination(chunk_);
+  __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  DoubleRegister dbl_scratch = double_scratch0();
+  const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
+                             1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
+
+  if (r.IsInteger32()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ cmpwi(reg, Operand::Zero());
+    EmitBranch(instr, ne);
+  } else if (r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ cmpi(reg, Operand::Zero());
+    EmitBranch(instr, ne);
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    DoubleRegister reg = ToDoubleRegister(instr->value());
+    // Test the double value. Zero and NaN are false.
+    __ fcmpu(reg, kDoubleRegZero, cr7);
+    __ mfcr(r0);
+    __ andi(r0, r0, Operand(crZOrNaNBits));
+    EmitBranch(instr, eq, cr0);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      __ cmpi(reg, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, al);
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
+      __ mfcr(r0);
+      __ andi(r0, r0, Operand(crZOrNaNBits));
+      EmitBranch(instr, eq, cr0);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+      __ cmpi(ip, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+        __ beq(instr->TrueLabel(chunk_));
+        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ CompareRoot(reg, Heap::kNullValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ cmpi(reg, Operand::Zero());
+        __ beq(instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ TestIfSmi(reg, r0);
+        DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ TestBit(ip, Map::kIsUndetectable, r0);
+          __ bne(instr->FalseLabel(chunk_), cr0);
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
+        __ bge(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+        __ bge(&not_string);
+        __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+        __ cmpi(ip, Operand::Zero());
+        __ bne(instr->TrueLabel(chunk_));
+        __ b(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+        __ beq(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        Label not_simd;
+        __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
+        __ beq(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+        __ bne(&not_heap_number);
+        __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        // Test the double value. Zero and NaN are false.
+        __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
+        __ mfcr(r0);
+        __ andi(r0, r0, Operand(crZOrNaNBits));
+        __ bne(instr->FalseLabel(chunk_), cr0);
+        __ b(instr->TrueLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = lt;
+      break;
+    case Token::GT:
+      cond = gt;
+      break;
+    case Token::LTE:
+      cond = le;
+      break;
+    case Token::GTE:
+      cond = ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op());
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right operands as doubles and load the
+      // resulting flags into the normal status register.
+      __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ bunordered(instr->FalseLabel(chunk_));
+    } else {
+      if (right->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          if (is_unsigned) {
+            __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+          } else {
+            __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+          }
+        } else {
+          if (is_unsigned) {
+            __ Cmplwi(ToRegister(left), Operand(value), r0);
+          } else {
+            __ Cmpwi(ToRegister(left), Operand(value), r0);
+          }
+        }
+      } else if (left->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          if (is_unsigned) {
+            __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+          } else {
+            __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+          }
+        } else {
+          if (is_unsigned) {
+            __ Cmplwi(ToRegister(right), Operand(value), r0);
+          } else {
+            __ Cmpwi(ToRegister(right), Operand(value), r0);
+          }
+        }
+        // We commuted the operands, so commute the condition.
+        cond = CommuteCondition(cond);
+      } else if (instr->hydrogen_value()->representation().IsSmi()) {
+        if (is_unsigned) {
+          __ cmpl(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmp(ToRegister(left), ToRegister(right));
+        }
+      } else {
+        if (is_unsigned) {
+          __ cmplw(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmpw(ToRegister(left), ToRegister(right));
+        }
+      }
+    }
+    EmitBranch(instr, cond);
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  __ cmp(left, right);
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ mov(ip, Operand(factory()->the_hole_value()));
+    __ cmp(input_reg, ip);
+    EmitBranch(instr, eq);
+    return;
+  }
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->object());
+  __ fcmpu(input_reg, input_reg);
+  EmitFalseBranch(instr, ordered);
+
+  Register scratch = scratch0();
+  __ MovDoubleHighToInt(scratch, input_reg);
+  __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ fcmpu(value, kDoubleRegZero);
+    EmitFalseBranch(instr, ne);
+#if V8_TARGET_ARCH_PPC64
+    __ MovDoubleToInt64(scratch, value);
+#else
+    __ MovDoubleHighToInt(scratch, value);
+#endif
+    __ cmpi(scratch, Operand::Zero());
+    EmitBranch(instr, lt);
+  } else {
+    Register value = ToRegister(instr->value());
+    __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+                instr->FalseLabel(chunk()), DO_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+    __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
+    __ li(ip, Operand(1));
+    __ rotrdi(ip, ip, 1);  // ip = 0x80000000_00000000
+    __ cmp(scratch, ip);
+#else
+    __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+    Label skip;
+    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+    __ cmp(scratch, r0);
+    __ bne(&skip);
+    __ cmpi(ip, Operand::Zero());
+    __ bind(&skip);
+#endif
+    EmitBranch(instr, eq);
+  }
+}
+
+
+Condition LCodeGen::EmitIsString(Register input, Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+                              ? OMIT_SMI_CHECK
+                              : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register input_reg = EmitLoadRegister(instr->value(), ip);
+  __ TestIfSmi(input_reg, r0);
+  EmitBranch(instr, eq, cr0);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ TestBit(temp, Map::kIsUndetectable, r0);
+  EmitBranch(instr, ne, cr0);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r4));
+  DCHECK(ToRegister(instr->right()).is(r3));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  __ cmpi(r3, Operand::Zero());
+
+  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return ge;
+  if (from == FIRST_TYPE) return le;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
+  __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
+  __ and_(r0, scratch, r0, SetRC);
+  EmitBranch(instr, eq, cr0);
+}
+
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
+                               Handle<String> class_name, Register input,
+                               Register temp, Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ beq(is_true);
+  } else {
+    __ beq(is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  Register instance_type = ip;
+  __ GetMapConstructor(temp, temp, temp2, instance_type);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
+  if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
+    __ bne(is_true);
+  } else {
+    __ bne(is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(temp,
+           FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  __ Cmpi(temp, Operand(class_name), r0);
+  // End with the answer in flags.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temp());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                  class_name, input, temp, temp2);
+
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ Cmpi(temp, Operand(instr->map()), r0);
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(r3));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = scratch0();
+  Register const object_instance_type = ip;
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ TestIfSmi(object, r0);
+    EmitFalseBranch(instr, eq, cr0);
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ lbz(object_instance_type,
+         FieldMemOperand(object_map, Map::kBitFieldOffset));
+  __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
+  DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
+  // Deoptimize for proxies.
+  __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
+  DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+  __ LoadP(object_prototype,
+           FieldMemOperand(object_map, Map::kPrototypeOffset));
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, eq);
+  __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+  EmitFalseBranch(instr, eq);
+  __ LoadP(object_map,
+           FieldMemOperand(object_prototype, HeapObject::kMapOffset));
+  __ b(&loop);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // This instruction also signals no smi code inlined
+  __ cmpi(r3, Operand::Zero());
+
+  Condition condition = ComputeCompareCondition(op);
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    __ LoadRoot(r4, Heap::kTrueValueRootIndex);
+    __ LoadRoot(r5, Heap::kFalseValueRootIndex);
+    __ isel(condition, ToRegister(instr->result()), r4, r5);
+  } else {
+    Label true_value, done;
+
+    __ b(condition, &true_value);
+
+    __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+    __ b(&done);
+
+    __ bind(&true_value);
+    __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in r3.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(r3);
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+    if (NeedsEagerFrame()) {
+      masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+    } else if (sp_delta != 0) {
+      __ addi(sp, sp, Operand(sp_delta));
+    }
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    if (NeedsEagerFrame()) {
+      masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+    }
+    __ SmiToPtrArrayOffset(r0, reg);
+    __ add(sp, sp, r0);
+  }
+
+  __ blr();
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(r3));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      __ cmp(result, ip);
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    } else {
+      if (CpuFeatures::IsSupported(ISELECT)) {
+        Register scratch = scratch0();
+        __ mov(scratch, Operand(factory()->undefined_value()));
+        __ cmp(result, ip);
+        __ isel(eq, result, scratch, result);
+      } else {
+        Label skip;
+        __ cmp(result, ip);
+        __ bne(&skip);
+        __ mov(result, Operand(factory()->undefined_value()));
+        __ bind(&skip);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextMemOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadP(scratch, target);
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(scratch, ip);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    } else {
+      __ bne(&skip_assignment);
+    }
+  }
+
+  __ StoreP(value, target, r0);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+                                ? OMIT_SMI_CHECK
+                                : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context, target.offset(), value, scratch,
+                              GetLinkRegisterState(), kSaveFPRegs,
+                              EMIT_REMEMBERED_SET, check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = MemOperand(object, offset);
+    __ LoadRepresentation(result, operand, access.representation(), r0);
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DCHECK(access.IsInobject());
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ lfd(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+
+  Representation representation = access.representation();
+
+#if V8_TARGET_ARCH_PPC64
+  // 64-bit Smi optimization
+  if (representation.IsSmi() &&
+      instr->hydrogen()->representation().IsInteger32()) {
+    // Read int value directly from upper half of the smi.
+    offset = SmiWordOffset(offset);
+    representation = Representation::Integer32();
+  }
+#endif
+
+  __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
+                        r0);
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  // Name is always in r5.
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ LoadP(result,
+           FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(result, ip);
+  DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+
+  // If the function does not have an initial map, we're done.
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    // Get the prototype from the initial map (optimistic).
+    __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
+    __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+    __ isel(eq, result, ip, result);
+  } else {
+    Label done;
+    __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+    __ bne(&done);
+
+    // Get the prototype from the initial map.
+    __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+    // All done.
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them add one more.
+  if (instr->length()->IsConstantOperand()) {
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (instr->index()->IsConstantOperand()) {
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int index = (const_length - const_index) + 1;
+      __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
+    } else {
+      Register index = ToRegister(instr->index());
+      __ subfic(result, index, Operand(const_length + 1));
+      __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+      __ LoadPX(result, MemOperand(arguments, result));
+    }
+  } else if (instr->index()->IsConstantOperand()) {
+    Register length = ToRegister(instr->length());
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int loc = const_index - 1;
+    if (loc != 0) {
+      __ subi(result, length, Operand(loc));
+      __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+      __ LoadPX(result, MemOperand(arguments, result));
+    } else {
+      __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
+      __ LoadPX(result, MemOperand(arguments, result));
+    }
+  } else {
+    Register length = ToRegister(instr->length());
+    Register index = ToRegister(instr->index());
+    __ sub(result, length, index);
+    __ addi(result, result, Operand(1));
+    __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+    __ LoadPX(result, MemOperand(arguments, result));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
+             r0);
+    } else {
+      __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+      __ add(scratch0(), external_pointer, r0);
+    }
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ lfs(result, MemOperand(scratch0(), base_offset));
+    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ lfd(result, MemOperand(scratch0(), base_offset));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand =
+        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+                            constant_key, element_size_shift, base_offset);
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadByte(result, mem_operand, r0);
+        } else {
+          __ lbzx(result, mem_operand);
+        }
+        __ extsb(result, result);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadByte(result, mem_operand, r0);
+        } else {
+          __ lbzx(result, mem_operand);
+        }
+        break;
+      case INT16_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadHalfWordArith(result, mem_operand, r0);
+        } else {
+          __ lhax(result, mem_operand);
+        }
+        break;
+      case UINT16_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadHalfWord(result, mem_operand, r0);
+        } else {
+          __ lhzx(result, mem_operand);
+        }
+        break;
+      case INT32_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadWordArith(result, mem_operand, r0);
+        } else {
+          __ lwax(result, mem_operand);
+        }
+        break;
+      case UINT32_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadWord(result, mem_operand, r0);
+        } else {
+          __ lwzx(result, mem_operand);
+        }
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+          __ cmplw(result, r0);
+          DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+
+  int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+  if (!key_is_constant) {
+    __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+    __ add(scratch, elements, r0);
+    elements = scratch;
+  }
+  if (!is_int16(base_offset)) {
+    __ Add(scratch, elements, base_offset, r0);
+    base_offset = 0;
+    elements = scratch;
+  }
+  __ lfd(result, MemOperand(elements, base_offset));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (is_int16(base_offset + Register::kExponentOffset)) {
+      __ lwz(scratch,
+             MemOperand(elements, base_offset + Register::kExponentOffset));
+    } else {
+      __ addi(scratch, elements, Operand(base_offset));
+      __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
+    }
+    __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
+    DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  HLoadKeyed* hinstr = instr->hydrogen();
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    Register key = ToRegister(instr->key());
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (hinstr->key()->representation().IsSmi()) {
+      __ SmiToPtrArrayOffset(r0, key);
+    } else {
+      __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
+    }
+    __ add(scratch, elements, r0);
+  }
+
+  bool requires_hole_check = hinstr->RequiresHoleCheck();
+  Representation representation = hinstr->representation();
+
+#if V8_TARGET_ARCH_PPC64
+  // 64-bit Smi optimization
+  if (representation.IsInteger32() &&
+      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+    DCHECK(!requires_hole_check);
+    // Read int value directly from upper half of the smi.
+    offset = SmiWordOffset(offset);
+  }
+#endif
+
+  __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
+                        r0);
+
+  // Check for the hole value.
+  if (requires_hole_check) {
+    if (IsFastSmiElementsKind(hinstr->elements_kind())) {
+      __ TestIfSmi(result, r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+    } else {
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      __ cmp(result, scratch);
+      DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, scratch);
+    __ bne(&done);
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+      __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+      __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
+      DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+    }
+    __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
+                                         bool key_is_constant, bool key_is_smi,
+                                         int constant_key,
+                                         int element_size_shift,
+                                         int base_offset) {
+  Register scratch = scratch0();
+
+  if (key_is_constant) {
+    return MemOperand(base, (constant_key << element_size_shift) + base_offset);
+  }
+
+  bool needs_shift =
+      (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
+
+  if (!(base_offset || needs_shift)) {
+    return MemOperand(base, key);
+  }
+
+  if (needs_shift) {
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    key = scratch;
+  }
+
+  if (base_offset) {
+    __ Add(scratch, key, base_offset, r0);
+  }
+
+  return MemOperand(base, scratch);
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ subi(result, sp, Operand(2 * kPointerSize));
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ LoadP(result,
+             MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    if (CpuFeatures::IsSupported(ISELECT)) {
+      __ isel(eq, result, scratch, fp);
+    } else {
+      Label done, adapted;
+      __ beq(&adapted);
+      __ mr(result, fp);
+      __ b(&done);
+
+      __ bind(&adapted);
+      __ mr(result, scratch);
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ cmp(fp, elem);
+  __ mov(result, Operand(scope()->num_parameters()));
+  __ beq(&done);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(result,
+           MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, result_in_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions or builtins.
+    __ LoadP(scratch,
+             FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ lwz(scratch,
+           FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
+                                 (1 << SharedFunctionInfo::kNativeBit)));
+    __ bne(&result_in_receiver, cr0);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ beq(&global_object);
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ beq(&global_object);
+
+  // Deoptimize if the receiver is not a JS object.
+  __ TestIfSmi(receiver, r0);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+  __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
+  DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+
+  __ b(&result_in_receiver);
+  __ bind(&global_object);
+  __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+  __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+
+  if (result.is(receiver)) {
+    __ bind(&result_in_receiver);
+  } else {
+    Label result_ok;
+    __ b(&result_ok);
+    __ bind(&result_in_receiver);
+    __ mr(result, receiver);
+    __ bind(&result_ok);
+  }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DCHECK(receiver.is(r3));  // Used for parameter count.
+  DCHECK(function.is(r4));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmpli(length, Operand(kArgumentsLimit));
+  DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ mr(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ addi(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ cmpi(length, Operand::Zero());
+  __ beq(&invoke);
+  __ mtctr(length);
+  __ bind(&loop);
+  __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
+  __ LoadPX(scratch, MemOperand(elements, r0));
+  __ push(scratch);
+  __ addi(length, length, Operand(-1));
+  __ bdnz(&loop);
+
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is r3, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort(kDoPushArgumentNotImplementedForDoubleType);
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, ip);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  __ Move(scratch0(), instr->hydrogen()->pairs());
+  __ push(scratch0());
+  __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
+  __ push(scratch0());
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = r4;
+
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+    __ mov(r3, Operand(arity));
+
+    bool is_self_call = function.is_identical_to(info()->closure());
+
+    // Invoke function.
+    if (is_self_call) {
+      __ CallSelf();
+    } else {
+      __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+      __ CallJSEntry(ip);
+    }
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch, ip);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ cmpwi(exponent, Operand::Zero());
+  // Move the input to the result if necessary.
+  __ Move(result, input);
+  __ bge(&done);
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(r4) ? r3 : r4;
+    Register tmp2 = input.is(r5) ? r3 : r5;
+    Register tmp3 = input.is(r6) ? r3 : r6;
+    Register tmp4 = input.is(r7) ? r3 : r7;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ b(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(r3)) __ mr(tmp1, r3);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+    __ clrlwi(exponent, exponent, Operand(1));  // clear sign bit
+    __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done;
+  __ cmpi(input, Operand::Zero());
+  __ Move(result, input);
+  __ bge(&done);
+  __ li(r0, Operand::Zero());  // clear xer
+  __ mtxer(r0);
+  __ neg(result, result, SetOE, SetRC);
+  // Deoptimize on overflow.
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
+  __ bind(&done);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done;
+  __ cmpwi(input, Operand::Zero());
+  __ Move(result, input);
+  __ bge(&done);
+
+  // Deoptimize on overflow.
+  __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+  __ cmpw(input, r0);
+  DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+
+  __ neg(result, result);
+  __ bind(&done);
+}
+#endif
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    DoubleRegister input = ToDoubleRegister(instr->value());
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ fabs(result, input);
+#if V8_TARGET_ARCH_PPC64
+  } else if (r.IsInteger32()) {
+    EmitInteger32MathAbs(instr);
+  } else if (r.IsSmi()) {
+#else
+  } else if (r.IsSmiOrInteger32()) {
+#endif
+    EmitMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register input_high = scratch0();
+  Register scratch = ip;
+  Label done, exact;
+
+  __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
+                   &exact);
+  DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+  __ bind(&exact);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ cmpi(result, Operand::Zero());
+    __ bne(&done);
+    __ cmpwi(input_high, Operand::Zero());
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+  DoubleRegister input_plus_dot_five = double_scratch1;
+  Register scratch1 = scratch0();
+  Register scratch2 = ip;
+  DoubleRegister dot_five = double_scratch0();
+  Label convert, done;
+
+  __ LoadDoubleLiteral(dot_five, 0.5, r0);
+  __ fabs(double_scratch1, input);
+  __ fcmpu(double_scratch1, dot_five);
+  DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
+  // If input is in [-0.5, -0], the result is -0.
+  // If input is in [+0, +0.5[, the result is +0.
+  // If the input is +0.5, the result is 1.
+  __ bgt(&convert);  // Out of [-0.5, +0.5].
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+#if V8_TARGET_ARCH_PPC64
+    __ MovDoubleToInt64(scratch1, input);
+#else
+    __ MovDoubleHighToInt(scratch1, input);
+#endif
+    __ cmpi(scratch1, Operand::Zero());
+    // [-0.5, -0].
+    DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+  }
+  __ fcmpu(input, dot_five);
+  if (CpuFeatures::IsSupported(ISELECT)) {
+    __ li(result, Operand(1));
+    __ isel(lt, result, r0, result);
+    __ b(&done);
+  } else {
+    Label return_zero;
+    __ bne(&return_zero);
+    __ li(result, Operand(1));  // +0.5.
+    __ b(&done);
+    // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+    // flag kBailoutOnMinusZero.
+    __ bind(&return_zero);
+    __ li(result, Operand::Zero());
+    __ b(&done);
+  }
+
+  __ bind(&convert);
+  __ fadd(input_plus_dot_five, input, dot_five);
+  // Reuse dot_five (double_scratch0) as we no longer need this value.
+  __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
+                   double_scratch0(), &done, &done);
+  DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  DoubleRegister output_reg = ToDoubleRegister(instr->result());
+  __ frsp(output_reg, input_reg);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = double_scratch0();
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label skip, done;
+
+  __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
+  __ fcmpu(input, temp);
+  __ bne(&skip);
+  __ fneg(result, temp);
+  __ b(&done);
+
+  // Add +0 to convert -0 to +0.
+  __ bind(&skip);
+  __ fadd(result, input, kDoubleRegZero);
+  __ fsqrt(result, result);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+// Having marked this as a call, we can use any registers.
+// Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(d2));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(d1));
+  DCHECK(ToDoubleRegister(instr->result()).is(d3));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!r10.is(tagged_exponent));
+    __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(r10, ip);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DoubleRegister double_scratch2 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
+                                double_scratch2, temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
+                   1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ cntlzw_(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r4));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ JumpToJSEntry(ip);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ CallJSEntry(ip);
+    }
+    generator.AfterCall();
+  }
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(r4));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  // Change context.
+  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+  __ mov(r3, Operand(instr->arity()));
+
+  bool is_self_call = false;
+  if (instr->hydrogen()->function()->IsConstant()) {
+    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+    Handle<JSFunction> jsfun =
+        Handle<JSFunction>::cast(fun_const->handle(isolate()));
+    is_self_call = jsfun.is_identical_to(info()->closure());
+  }
+
+  if (is_self_call) {
+    __ CallSelf();
+  } else {
+    __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+    __ CallJSEntry(ip);
+  }
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r4));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(r6));
+    DCHECK(vector_register.is(r5));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ Move(vector_register, vector);
+    __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ mov(r3, Operand(arity));
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(r4));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  __ mov(r3, Operand(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ Move(r5, instr->hydrogen()->site());
+  } else {
+    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  }
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here
+      // look at the first argument
+      __ LoadP(r8, MemOperand(sp, 0));
+      __ cmpi(r8, Operand::Zero());
+      __ beq(&packed_case);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ b(&done);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ addi(code_object, code_object,
+          Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(code_object,
+            FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ Add(result, base, ToInteger32(offset), r0);
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ add(result, base, offset);
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  HStoreNamedField* hinstr = instr->hydrogen();
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  Register scratch = scratch0();
+  HObjectAccess access = hinstr->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register value = ToRegister(instr->value());
+    MemOperand operand = MemOperand(object, offset);
+    __ StoreRepresentation(value, operand, representation, r0);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+#if V8_TARGET_ARCH_PPC64
+  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+         IsInteger32(LConstantOperand::cast(instr->value())));
+#else
+  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+#endif
+  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!hinstr->has_transition());
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ stfd(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  if (hinstr->has_transition()) {
+    Handle<Map> transition = hinstr->transition_map();
+    AddDeprecationDependency(transition);
+    __ mov(scratch, Operand(transition));
+    __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
+    if (hinstr->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register record_dest = object;
+  Register record_value = no_reg;
+  Register record_scratch = scratch;
+#if V8_TARGET_ARCH_PPC64
+  if (FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ stfd(value, FieldMemOperand(object, offset));
+    if (hinstr->NeedsWriteBarrier()) {
+      record_value = ToRegister(instr->value());
+    }
+  } else {
+    if (representation.IsSmi() &&
+        hinstr->value()->representation().IsInteger32()) {
+      DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+      // 64-bit Smi optimization
+      // Store int value directly to upper half of the smi.
+      offset = SmiWordOffset(offset);
+      representation = Representation::Integer32();
+    }
+#endif
+    if (access.IsInobject()) {
+      Register value = ToRegister(instr->value());
+      MemOperand operand = FieldMemOperand(object, offset);
+      __ StoreRepresentation(value, operand, representation, r0);
+      record_value = value;
+    } else {
+      Register value = ToRegister(instr->value());
+      __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+      MemOperand operand = FieldMemOperand(scratch, offset);
+      __ StoreRepresentation(value, operand, representation, r0);
+      record_dest = scratch;
+      record_value = value;
+      record_scratch = object;
+    }
+#if V8_TARGET_ARCH_PPC64
+  }
+#endif
+
+  if (hinstr->NeedsWriteBarrier()) {
+    __ RecordWriteField(record_dest, offset, record_value, record_scratch,
+                        GetLinkRegisterState(), kSaveFPRegs,
+                        EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+                        hinstr->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Representation representation = instr->hydrogen()->length()->representation();
+  DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+  DCHECK(representation.IsSmiOrInteger32());
+
+  Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
+  if (instr->length()->IsConstantOperand()) {
+    int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+    Register index = ToRegister(instr->index());
+    if (representation.IsSmi()) {
+      __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
+    } else {
+      __ Cmplwi(index, Operand(length), r0);
+    }
+    cc = CommuteCondition(cc);
+  } else if (instr->index()->IsConstantOperand()) {
+    int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+    Register length = ToRegister(instr->length());
+    if (representation.IsSmi()) {
+      __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
+    } else {
+      __ Cmplwi(length, Operand(index), r0);
+    }
+  } else {
+    Register index = ToRegister(instr->index());
+    Register length = ToRegister(instr->length());
+    if (representation.IsSmi()) {
+      __ cmpl(length, index);
+    } else {
+      __ cmplw(length, index);
+    }
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ b(NegateCondition(cc), &done);
+    __ stop("eliminated bounds check failed");
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
+    Register address = scratch0();
+    DoubleRegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      if (constant_key != 0) {
+        __ Add(address, external_pointer, constant_key << element_size_shift,
+               r0);
+      } else {
+        address = external_pointer;
+      }
+    } else {
+      __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+      __ add(address, external_pointer, r0);
+    }
+    if (elements_kind == FLOAT32_ELEMENTS) {
+      __ frsp(double_scratch0(), value);
+      __ stfs(double_scratch0(), MemOperand(address, base_offset));
+    } else {  // Storing doubles, not floats.
+      __ stfd(value, MemOperand(address, base_offset));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand =
+        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+                            constant_key, element_size_shift, base_offset);
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreByte(value, mem_operand, r0);
+        } else {
+          __ stbx(value, mem_operand);
+        }
+        break;
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreHalfWord(value, mem_operand, r0);
+        } else {
+          __ sthx(value, mem_operand);
+        }
+        break;
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreWord(value, mem_operand, r0);
+        } else {
+          __ stwx(value, mem_operand);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch = scratch0();
+  DoubleRegister double_scratch = double_scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+  if (!key_is_constant) {
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    __ add(scratch, elements, scratch);
+    elements = scratch;
+  }
+  if (!is_int16(base_offset)) {
+    __ Add(scratch, elements, base_offset, r0);
+    base_offset = 0;
+    elements = scratch;
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    // Turn potential sNaN value into qNaN.
+    __ CanonicalizeNaN(double_scratch, value);
+    __ stfd(double_scratch, MemOperand(elements, base_offset));
+  } else {
+    __ stfd(value, MemOperand(elements, base_offset));
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  HStoreKeyed* hinstr = instr->hydrogen();
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (hinstr->key()->representation().IsSmi()) {
+      __ SmiToPtrArrayOffset(scratch, key);
+    } else {
+      __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
+    }
+    __ add(scratch, elements, scratch);
+  }
+
+  Representation representation = hinstr->value()->representation();
+
+#if V8_TARGET_ARCH_PPC64
+  // 64-bit Smi optimization
+  if (representation.IsInteger32()) {
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+    // Store int value directly to upper half of the smi.
+    offset = SmiWordOffset(offset);
+  }
+#endif
+
+  __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
+                         r0);
+
+  if (hinstr->NeedsWriteBarrier()) {
+    SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
+                                ? OMIT_SMI_CHECK
+                                : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Add(key, store_base, offset, r0);
+    __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+                   EMIT_REMEMBERED_SET, check_needed,
+                   hinstr->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases: external, fast double
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = r3;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ b(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
+    __ ble(deferred->entry());
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
+    __ bge(deferred->entry());
+  } else {
+    __ cmpw(ToRegister(key), ToRegister(current_capacity));
+    __ bge(deferred->entry());
+  }
+
+  if (instr->elements()->IsRegister()) {
+    __ Move(result, ToRegister(instr->elements()));
+  } else {
+    __ LoadP(result, ToMemOperand(instr->elements()));
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = r3;
+  __ li(result, Operand::Zero());
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsRegister()) {
+      __ Move(result, ToRegister(instr->object()));
+    } else {
+      __ LoadP(result, ToMemOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
+    } else {
+      __ SmiTag(r6, ToRegister(key));
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  __ TestIfSmi(result, r0);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Cmpi(scratch, Operand(from_map), r0);
+  __ bne(&not_applicable);
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ mov(new_map_reg, Operand(to_map));
+    __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
+              r0);
+    // Write barrier.
+    __ RecordWriteForMap(object_reg, new_map_reg, scratch,
+                         GetLinkRegisterState(), kDontSaveFPRegs);
+  } else {
+    DCHECK(ToRegister(instr->context()).is(cp));
+    DCHECK(object_reg.is(r3));
+    PushSafepointRegistersScope scope(this);
+    __ Move(r4, to_map);
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kLazyDeopt);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+  DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r4));
+  DCHECK(ToRegister(instr->right()).is(r3));
+  StringAddStub stub(isolate(), instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new (zone()) DeferredStringCharCodeAt(this, instr);
+
+  StringCharLoadGenerator::Generate(
+      masm(), ToRegister(instr->string()), ToRegister(instr->index()),
+      ToRegister(instr->result()), deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ li(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(r3);
+  __ SmiUntag(r3);
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new (zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  DCHECK(!char_code.is(result));
+
+  __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
+  __ bgt(deferred->entry());
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
+  __ add(result, result, r0);
+  __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(result, ip);
+  __ beq(deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ li(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ LoadP(scratch, ToMemOperand(input));
+    __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
+  } else {
+    __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
+  }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+  __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI final : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), SIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  Register src = ToRegister(instr->value());
+  Register dst = ToRegister(instr->result());
+
+  DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
+#if V8_TARGET_ARCH_PPC64
+  __ SmiTag(dst, src);
+#else
+  __ SmiTagCheckOverflow(dst, src, r0);
+  __ BranchOnOverflow(deferred->entry());
+#endif
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
+  __ Cmpli(input, Operand(Smi::kMaxValue), r0);
+  __ bgt(deferred->entry());
+  __ SmiTag(result, input);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+                                     LOperand* temp1, LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register src = ToRegister(value);
+  Register dst = ToRegister(instr->result());
+  Register tmp1 = scratch0();
+  Register tmp2 = ToRegister(temp1);
+  Register tmp3 = ToRegister(temp2);
+  DoubleRegister dbl_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    if (dst.is(src)) {
+      __ SmiUntag(src, dst);
+      __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
+    }
+    __ ConvertIntToDouble(src, dbl_scratch);
+  } else {
+    __ ConvertUnsignedIntToDouble(src, dbl_scratch);
+  }
+
+  if (FLAG_inline_new) {
+    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
+    __ b(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ li(dst, Operand::Zero());
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(r3, dst);
+  }
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+  } else {
+    __ b(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ li(reg, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                               Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r3, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ TestUnsignedSmiCandidate(input, r0);
+    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
+  }
+#if !V8_TARGET_ARCH_PPC64
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ SmiTagCheckOverflow(output, input, r0);
+    DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+  } else {
+#endif
+    __ SmiTag(output, input);
+#if !V8_TARGET_ARCH_PPC64
+  }
+#endif
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ andi(scratch, input, Operand(kHeapObjectTag));
+    __ SmiUntag(result, input);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                DoubleRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Register scratch = scratch0();
+  DCHECK(!result_reg.is(double_scratch0()));
+
+  Label convert, load_smi, done;
+
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+    // Heap number map check.
+    __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(scratch, ip);
+    if (can_convert_undefined_to_nan) {
+      __ bne(&convert);
+    } else {
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+    }
+    // load heap number
+    __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+#if V8_TARGET_ARCH_PPC64
+      __ MovDoubleToInt64(scratch, result_reg);
+      // rotate left by one for simple compare.
+      __ rldicl(scratch, scratch, 1, 0);
+      __ cmpi(scratch, Operand(1));
+#else
+      __ MovDoubleToInt64(scratch, ip, result_reg);
+      __ cmpi(ip, Operand::Zero());
+      __ bne(&done);
+      __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
+#endif
+      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+    }
+    __ b(&done);
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+      // Convert undefined (and hole) to NaN.
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      __ cmp(input_reg, ip);
+      DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ b(&done);
+    }
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ ConvertIntToDouble(scratch, result_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->value());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->temp());
+  DoubleRegister double_scratch = double_scratch0();
+  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // Heap number map check.
+  __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch1, ip);
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label no_heap_number, check_bools, check_false;
+    __ bne(&no_heap_number);
+    __ mr(scratch2, input_reg);
+    __ TruncateHeapNumberToI(input_reg, scratch2);
+    __ b(&done);
+
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ bind(&no_heap_number);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(input_reg, ip);
+    __ bne(&check_bools);
+    __ li(input_reg, Operand::Zero());
+    __ b(&done);
+
+    __ bind(&check_bools);
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(input_reg, ip);
+    __ bne(&check_false);
+    __ li(input_reg, Operand(1));
+    __ b(&done);
+
+    __ bind(&check_false);
+    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+    __ cmp(input_reg, ip);
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+    __ li(input_reg, Operand::Zero());
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+
+    __ lfd(double_scratch2,
+           FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // preserve heap number pointer in scratch2 for minus zero check below
+      __ mr(scratch2, input_reg);
+    }
+    __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
+                             double_scratch);
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ cmpi(input_reg, Operand::Zero());
+      __ bne(&done);
+      __ lwz(scratch1,
+             FieldMemOperand(scratch2, HeapNumber::kValueOffset +
+                                           Register::kExponentOffset));
+      __ cmpwi(scratch1, Operand::Zero());
+      DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
+
+    // Branch to deferred code if the input is a HeapObject.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+
+    __ SmiUntag(input_reg);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+                              ? NUMBER_CANDIDATE_IS_SMI
+                              : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+  DoubleRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+                             double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ cmpi(result_reg, Operand::Zero());
+      __ bne(&done);
+#if V8_TARGET_ARCH_PPC64
+      __ MovDoubleToInt64(scratch1, double_input);
+#else
+      __ MovDoubleHighToInt(scratch1, double_input);
+#endif
+      __ cmpi(scratch1, Operand::Zero());
+      DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+  DoubleRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+                             double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ cmpi(result_reg, Operand::Zero());
+      __ bne(&done);
+#if V8_TARGET_ARCH_PPC64
+      __ MovDoubleToInt64(scratch1, double_input);
+#else
+      __ MovDoubleHighToInt(scratch1, double_input);
+#endif
+      __ cmpi(scratch1, Operand::Zero());
+      DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+      __ bind(&done);
+    }
+  }
+#if V8_TARGET_ARCH_PPC64
+  __ SmiTag(result_reg);
+#else
+  __ SmiTagCheckOverflow(result_reg, r0);
+  DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
+#endif
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ TestIfSmi(ToRegister(input), r0);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ TestIfSmi(ToRegister(input), r0);
+    DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+  }
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+  Register scratch = scratch0();
+
+  __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+  __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+  __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
+  DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmpli(scratch, Operand(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpli(scratch, Operand(last));
+        DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ andi(r0, scratch, Operand(mask));
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
+                   cr0);
+    } else {
+      __ andi(scratch, scratch, Operand(mask));
+      __ cmpi(scratch, Operand(tag));
+      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ mov(ip, Operand(cell));
+    __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
+    __ cmp(reg, ip);
+  } else {
+    __ Cmpi(reg, Operand(object), r0);
+  }
+  DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  Register temp = ToRegister(instr->temp());
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ li(cp, Operand::Zero());
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(r3, temp);
+  }
+  __ TestIfSmi(temp, r0);
+  DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  Register object = ToRegister(instr->value());
+  Register map_reg = ToRegister(instr->temp());
+
+  __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new (zone()) DeferredCheckMaps(this, instr, object);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(map_reg, map, &success);
+    __ beq(&success);
+  }
+
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(map_reg, map, &success);
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ bne(deferred->entry());
+  } else {
+    DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+
+  // Check for heap number
+  __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
+  __ beq(&heap_number);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
+  DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+  __ li(result_reg, Operand::Zero());
+  __ b(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
+  __ b(&done);
+
+  // smi
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, result_reg);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ MovDoubleHighToInt(result_reg, value_reg);
+  } else {
+    __ MovDoubleLowToInt(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+#if V8_TARGET_ARCH_PPC64
+  __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
+#else
+  __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
+#endif
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ LoadIntLiteral(scratch, size - kHeapObjectTag);
+    } else {
+      __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+    }
+    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    Label loop;
+    __ bind(&loop);
+    __ subi(scratch, scratch, Operand(kPointerSize));
+    __ StorePX(scratch2, MemOperand(result, scratch));
+    __ cmpi(scratch, Operand::Zero());
+    __ bge(&loop);
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ LoadSmiLiteral(result, Smi::FromInt(0));
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(size);
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+#if !V8_TARGET_ARCH_PPC64
+    if (size >= 0 && size <= Smi::kMaxValue) {
+#endif
+      __ Push(Smi::FromInt(size));
+#if !V8_TARGET_ARCH_PPC64
+    } else {
+      // We should never get here at runtime => abort
+      __ stop("invalid allocation size");
+      return;
+    }
+#endif
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(r3));
+  __ push(r3);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->value()).is(r6));
+  DCHECK(ToRegister(instr->result()).is(r3));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ mov(r3, Operand(isolate()->factory()->number_string()));
+  __ b(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  Condition final_branch_condition =
+      EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
+                   instr->type_literal());
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(instr, final_branch_condition);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
+                                 Register input, Handle<String> type_name) {
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label);
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
+    final_branch_condition = lt;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ CompareRoot(input, Heap::kTrueValueRootIndex);
+    __ beq(true_label);
+    __ CompareRoot(input, Heap::kFalseValueRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+    __ beq(true_label);
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+    __ cmpi(r0, Operand::Zero());
+    final_branch_condition = ne;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ andi(scratch, scratch,
+            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    __ cmpi(scratch, Operand(1 << Map::kIsCallable));
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ beq(true_label);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
+    __ blt(false_label);
+    // Check for callable or undetectable objects => false.
+    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ andi(r0, scratch,
+            Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    __ cmpi(r0, Operand::Zero());
+    final_branch_condition = eq;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
+  } else if (String::Equals(type_name, factory->type##_string())) {  \
+    __ JumpIfSmi(input, false_label);                                \
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
+    __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
+    final_branch_condition = eq;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    __ b(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= Assembler::kInstrSize;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+
+  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmpl(sp, ip);
+    __ bge(&done);
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new (zone()) DeferredStackCheck(this, instr);
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmpl(sp, ip);
+    __ blt(deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  __ TestIfSmi(r3, r0);
+  DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
+  DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
+
+  Label use_cache, call_runtime;
+  Register null_value = r8;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ b(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(r3);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r4, ip);
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+  __ bne(&load_cache);
+  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ b(&done);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ cmpi(result, Operand::Zero());
+  DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ cmp(map, scratch0());
+  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result, Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object, index);
+  __ li(cp, Operand::Zero());
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(instr->pointer_map(), 2,
+                               Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
+                              Register result, Register object, Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {}
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new (zone())
+      DeferredLoadMutableDouble(this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
+  __ bne(deferred->entry(), cr0);
+  __ ShiftRightArithImm(index, index, 1);
+
+  __ cmpi(index, Operand::Zero());
+  __ blt(&out_of_object);
+
+  __ SmiToPtrArrayOffset(r0, index);
+  __ add(scratch, object, r0);
+  __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ b(&done);
+
+  __ bind(&out_of_object);
+  __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ SmiToPtrArrayOffset(r0, index);
+  __ sub(scratch, result, r0);
+  __ LoadP(result,
+           FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ Push(scope_info);
+  __ push(ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/ppc/lithium-codegen-ppc.h b/src/crankshaft/ppc/lithium-codegen-ppc.h
new file mode 100644
index 0000000..b0f016d
--- /dev/null
+++ b/src/crankshaft/ppc/lithium-codegen-ppc.h
@@ -0,0 +1,353 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
+
+#include "src/ast/scopes.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
+#include "src/crankshaft/ppc/lithium-ppc.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen : public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  LinkRegisterStatus GetLinkRegisterState() const {
+    return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+  }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LConstantOperand must be an Integer32 or Smi
+  void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  intptr_t ToRepresentation(LConstantOperand* op,
+                            const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+                             LOperand* temp1, LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
+                                   Register object, Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  MemOperand PrepareKeyedOperand(Register key, Register base,
+                                 bool key_is_constant, bool key_is_tagged,
+                                 int constant_key, int element_size_shift,
+                                 int base_offset);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  Register scratch0() { return kLithiumScratch; }
+  DoubleRegister double_scratch0() { return kScratchDoubleReg; }
+
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true, Label* if_false,
+                       Handle<String> class_name, Register input,
+                       Register temporary, Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+                       LInstruction* instr, SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function, int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id, int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+                               LInstruction* instr, LOperand* context);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in r4.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type, CRegister cr = cr7);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason, CRegister cr = cr7);
+
+  void AddToTranslation(LEnvironment* environment, Translation* translation,
+                        LOperand* op, bool is_tagged, bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  MemOperand BuildSeqStringOperand(Register string, LOperand* index,
+                                   String::Encoding encoding);
+
+  void EmitMathAbs(LMathAbs* instr);
+#if V8_TARGET_ARCH_PPC64
+  void EmitInteger32MathAbs(LMathAbs* instr);
+#endif
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template <class InstrType>
+  void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition condition, CRegister cr = cr7);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition condition,
+                       CRegister cr = cr7);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DoubleRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
+                         Handle<String> type_name);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
+                    int* offset, AllocationSiteMode mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_PPC_LITHIUM_CODEGEN_PPC_H_
diff --git a/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc b/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
new file mode 100644
index 0000000..4e24980
--- /dev/null
+++ b/src/crankshaft/ppc/lithium-gap-resolver-ppc.cc
@@ -0,0 +1,287 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
+
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = {11};
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mr(kSavedValueRegister, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ fmr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ lfd(kScratchDoubleReg, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+  if (saved_destination_->IsRegister()) {
+    __ mr(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ fmr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ stfd(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mr(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ StoreP(source_register, cgen_->ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ LoadP(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        __ LoadP(ip, source_operand);
+        __ StoreP(ip, destination_operand);
+      } else {
+        __ LoadP(kSavedValueRegister, source_operand);
+        __ StoreP(kSavedValueRegister, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsInteger32(constant_source)) {
+        cgen_->EmitLoadIntegerConstant(constant_source, dst);
+      } else {
+        __ Move(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DoubleRegister result = cgen_->ToDoubleRegister(destination);
+      double v = cgen_->ToDouble(constant_source);
+      __ LoadDoubleLiteral(result, v, ip);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      if (cgen_->IsInteger32(constant_source)) {
+        cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
+      } else {
+        __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
+      }
+      __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ fmr(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ stfd(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ lfd(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+// kSavedDoubleValueRegister was used to break the cycle,
+// but kSavedValueRegister is free.
+#if V8_TARGET_ARCH_PPC64
+        __ ld(kSavedValueRegister, source_operand);
+        __ std(kSavedValueRegister, destination_operand);
+#else
+        MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lwz(kSavedValueRegister, source_operand);
+        __ stw(kSavedValueRegister, destination_operand);
+        __ lwz(kSavedValueRegister, source_high_operand);
+        __ stw(kSavedValueRegister, destination_high_operand);
+#endif
+      } else {
+        __ lfd(kScratchDoubleReg, source_operand);
+        __ stfd(kScratchDoubleReg, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/ppc/lithium-gap-resolver-ppc.h b/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
new file mode 100644
index 0000000..6eeea5e
--- /dev/null
+++ b/src/crankshaft/ppc/lithium-gap-resolver-ppc.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
diff --git a/src/crankshaft/ppc/lithium-ppc.cc b/src/crankshaft/ppc/lithium-ppc.cc
new file mode 100644
index 0000000..63aead7
--- /dev/null
+++ b/src/crankshaft/ppc/lithium-ppc.cc
@@ -0,0 +1,2587 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/ppc/lithium-ppc.h"
+
+#include <sstream>
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/ppc/lithium-codegen-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                           \
+  void L##type::CompileToNative(LCodeGen* generator) { \
+    generator->Do##type(this);                         \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD:
+      return "add-d";
+    case Token::SUB:
+      return "sub-d";
+    case Token::MUL:
+      return "mul-d";
+    case Token::DIV:
+      return "div-d";
+    case Token::MOD:
+      return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD:
+      return "add-t";
+    case Token::SUB:
+      return "sub-t";
+    case Token::MUL:
+      return "mul-t";
+    case Token::MOD:
+      return "mod-t";
+    case Token::DIV:
+      return "div-t";
+    case Token::BIT_AND:
+      return "bit-and-t";
+    case Token::BIT_OR:
+      return "bit-or-t";
+    case Token::BIT_XOR:
+      return "bit-xor-t";
+    case Token::ROR:
+      return "ror-t";
+    case Token::SHL:
+      return "shl-t";
+    case Token::SAR:
+      return "sar-t";
+    case Token::SHR:
+      return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
+              true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(), true_block_id(),
+              false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new (zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value,
+             new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new (zone())
+             LUnallocated(LUnallocated::NONE, LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(
+      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+                           !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new (zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new (zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new (zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseRegisterAtStart(right_value);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift
+    // by 0 and the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), d1);
+    LOperand* right = UseFixedDouble(instr->right(), d2);
+    LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+    // We call a C function for double modulo. It can't trigger a GC. We need
+    // to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    return MarkAsCall(DefineFixedDouble(result, d1), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+    return DefineAsRegister(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, r4);
+  LOperand* right_operand = UseFixed(right, r3);
+  LArithmeticT* result =
+      new (zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new (zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new (zone())
+                               LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new (zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new (zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+                   type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new (zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new (zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new (zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* value = UseRegister(instr->value());
+  return DefineAsRegister(new (zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new (zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), r4);
+  LOperand* receiver = UseFixed(instr->receiver(), r3);
+  LOperand* length = UseFixed(instr->length(), r5);
+  LOperand* elements = UseFixed(instr->elements(), r6);
+  LApplyArguments* result =
+      new (zone()) LApplyArguments(function, receiver, length, elements);
+  return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = Use(instr->argument(i));
+    AddInstruction(new (zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new (zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(new (zone())
+                          LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL
+                            : DefineAsRegister(new (zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new (zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new (zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r4);
+
+  LCallJSFunction* result = new (zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), cp);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result =
+      new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), r4);
+  LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFloor* result = new (zone()) LMathFloor(input);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempDoubleRegister();
+  LMathRound* result = new (zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  Representation r = instr->value()->representation();
+  LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+                          ? NULL
+                          : UseFixed(instr->context(), cp);
+  LOperand* input = UseRegister(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LMathAbs(context, input));
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d1);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new (zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* double_temp = TempDoubleRegister();
+  LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathSqrt* result = new (zone()) LMathSqrt(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathPowHalf* result = new (zone()) LMathPowHalf(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), r4);
+  LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), r4);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(r6);
+    vector = FixedTemp(r5);
+  }
+
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineAsRegister(new (zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+      (!instr->IsMathFloorOfDiv() &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
+          ? NULL
+          : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LModI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    HValue* left = instr->BetterLeftOperand();
+    HValue* right = instr->BetterRightOperand();
+    LOperand* left_op;
+    LOperand* right_op;
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    int32_t constant_value = 0;
+    if (right->IsConstant()) {
+      HConstant* constant = HConstant::cast(right);
+      constant_value = constant->Integer32Value();
+      // Constants -1, 0 and 1 can be optimized if the result can overflow.
+      // For other constants, it can be optimized only without overflow.
+      if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+        left_op = UseRegisterAtStart(left);
+        right_op = UseConstant(right);
+      } else {
+        if (bailout_on_minus_zero) {
+          left_op = UseRegister(left);
+        } else {
+          left_op = UseRegisterAtStart(left);
+        }
+        right_op = UseRegister(right);
+      }
+    } else {
+      if (bailout_on_minus_zero) {
+        left_op = UseRegister(left);
+      } else {
+        left_op = UseRegisterAtStart(left);
+      }
+      right_op = UseRegister(right);
+    }
+    LMulI* mul = new (zone()) LMulI(left_op, right_op);
+    if (right_op->IsConstantOperand()
+            ? ((can_overflow && constant_value == -1) ||
+               (bailout_on_minus_zero && constant_value <= 0))
+            : (can_overflow || bailout_on_minus_zero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
+
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+    if (instr->left()->IsConstant() &&
+        !instr->CheckFlag(HValue::kCanOverflow)) {
+      // If lhs is constant, do reverse subtraction instead.
+      return DoRSub(instr);
+    }
+
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new (zone()) LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+
+  // Note: The lhs of the subtraction becomes the rhs of the
+  // reverse-subtraction.
+  LOperand* left = UseRegisterAtStart(instr->right());
+  LOperand* right = UseOrConstantAtStart(instr->left());
+  LRSubI* rsb = new (zone()) LRSubI(left, right);
+  LInstruction* result = DefineAsRegister(rsb);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+  LOperand* addend_op = UseRegisterAtStart(addend);
+  return DefineSameAsFirst(
+      new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+  LOperand* minuend_op = UseRegisterAtStart(minuend);
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+
+  return DefineSameAsFirst(
+      new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    LAddI* add = new (zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LAddI* add = new (zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  return DefineAsRegister(new (zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), d1);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), d2)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new (zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, d3), instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r4);
+  LOperand* right = UseFixed(instr->right(), r3);
+  LCmpT* result = new (zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new (zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new (zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new (zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new (zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new (zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new (zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r4);
+  LOperand* right = UseFixed(instr->right(), r3);
+  LStringCompareAndBranch* result =
+      new (zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new (zone())
+      LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+  return new (zone()) LClassOfTestAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new (zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+                        ? UseRegisterAtStart(instr->index())
+                        : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  return new (zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+                         ? UseRegisterOrConstantAtStart(instr->length())
+                         : UseRegisterAtStart(instr->length());
+  LInstruction* result = new (zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new (zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result =
+          DefineAsRegister(new (zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new (zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(
+          DefineSameAsFirst(new (zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new (zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempDoubleRegister();
+        LInstruction* result =
+            DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new (zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new (zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new (zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new (zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new (zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new (zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      AssignEnvironment(new (zone()) LCheckMaps(value, temp));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new (zone()) LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new (zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LClampTToUint8* result =
+        new (zone()) LClampTToUint8(reg, TempDoubleRegister());
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new (zone())
+      LReturn(UseFixed(instr->value(), r3), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new (zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new (zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new (zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new (zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new (zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadGlobalGeneric* result =
+      new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new (zone()) LStoreContextSlot(context, value);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new (zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r3);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new (zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = NULL;
+    if (instr->representation().IsDouble()) {
+      obj = UseRegister(instr->elements());
+    } else {
+      obj = UseRegisterAtStart(instr->elements());
+    }
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK((instr->representation().IsInteger32() &&
+            !IsDoubleOrFloatElementsKind(elements_kind)) ||
+           (instr->representation().IsDouble() &&
+            IsDoubleOrFloatElementsKind(elements_kind)));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LInstruction* result = DefineFixed(
+      new (zone()) LLoadKeyedGeneric(context, object, key, vector), r3);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* key = NULL;
+    LOperand* val = NULL;
+
+    if (instr->value()->representation().IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      val = UseRegister(instr->value());
+      key = UseRegisterOrConstantAtStart(instr->key());
+    } else {
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new (zone()) LStoreKeyed(object, key, val, nullptr);
+  }
+
+  DCHECK((instr->value()->representation().IsInteger32() &&
+          !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+         (instr->value()->representation().IsDouble() &&
+          IsDoubleOrFloatElementsKind(instr->elements_kind())));
+  DCHECK(instr->elements()->representation().IsExternal());
+  LOperand* val = UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result =
+      new (zone()) LStoreKeyedGeneric(context, obj, key, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), r3);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new (zone()) LTransitionElementsKind(object, context, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new (zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, r3);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map =
+      instr->has_transition() && instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object ? UseRegister(instr->object())
+                       : UseTempRegister(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map ? UseRegister(instr->object())
+                                      : UseRegisterAtStart(instr->object());
+  }
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new (zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, obj, val, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r4);
+  LOperand* right = UseFixed(instr->right(), r3);
+  return MarkAsCall(
+      DefineFixed(new (zone()) LStringAdd(context, left, right), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new (zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new (zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new (zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new (zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new (zone()) LCallStub(context), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), r3);
+  LToFastProperties* result = new (zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* value = UseFixed(instr->value(), r6);
+  LTypeof* result = new (zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new (zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new (zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new (zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer =
+      current_block_->last_environment()->DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseFixed(instr->enumerable(), r3);
+  LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(
+      DefineAsRegister(new (zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new (zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new (zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, cp), instr);
+}
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/ppc/lithium-ppc.h b/src/crankshaft/ppc/lithium-ppc.h
new file mode 100644
index 0000000..e86edc9
--- /dev/null
+++ b/src/crankshaft/ppc/lithium-ppc.h
@@ -0,0 +1,2667 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
+#define V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckNonSmi)                             \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(MultiplySubD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(RSubI)                                   \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+  H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {}
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+// Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+// Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator support.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits : public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template <int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template <int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block) : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const override { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new (zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos) {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template <int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LModI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = addend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* addend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = minuend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* minuend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const { return hydrogen()->representation().IsDouble(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = double_temp;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LRSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LRSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+  uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) {}
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) { inputs_[0] = function; }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+              LOperand* backing_store_owner) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  bool NeedsCanonicalization() {
+    if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+        hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+      return false;
+    }
+    return hydrogen()->NeedsCanonicalization();
+  }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LTransitionElementsKind(LOperand* object, LOperand* context,
+                          LOperand* new_map_temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object, LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+  LOperand* view() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
+            LOperand* temp2) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+  LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+  LInstruction* DoRSub(HSub* instr);
+
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr, HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_PPC_LITHIUM_PPC_H_
diff --git a/src/crankshaft/typing.cc b/src/crankshaft/typing.cc
new file mode 100644
index 0000000..df50f81
--- /dev/null
+++ b/src/crankshaft/typing.cc
@@ -0,0 +1,805 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/typing.h"
+
+#include "src/ast/scopes.h"
+#include "src/frames.h"
+#include "src/frames-inl.h"
+#include "src/ostreams.h"
+#include "src/parsing/parser.h"  // for CompileTimeValue; TODO(rossberg): move
+#include "src/splay-tree-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+AstTyper::AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
+                   Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root)
+    : isolate_(isolate),
+      zone_(zone),
+      closure_(closure),
+      scope_(scope),
+      osr_ast_id_(osr_ast_id),
+      root_(root),
+      oracle_(isolate, zone, handle(closure->shared()->code()),
+              handle(closure->shared()->feedback_vector()),
+              handle(closure->context()->native_context())),
+      store_(zone) {
+  InitializeAstVisitor(isolate);
+}
+
+
+#ifdef OBJECT_PRINT
+  static void PrintObserved(Variable* var, Object* value, Type* type) {
+    OFStream os(stdout);
+    os << "  observed " << (var->IsParameter() ? "param" : "local") << "  ";
+    var->name()->Print(os);
+    os << " : " << Brief(value) << " -> ";
+    type->PrintTo(os);
+    os << std::endl;
+  }
+#endif  // OBJECT_PRINT
+
+
+Effect AstTyper::ObservedOnStack(Object* value) {
+  Type* lower = Type::NowOf(value, zone());
+  return Effect(Bounds(lower, Type::Any(zone())));
+}
+
+
+void AstTyper::ObserveTypesAtOsrEntry(IterationStatement* stmt) {
+  if (stmt->OsrEntryId() != osr_ast_id_) return;
+
+  DisallowHeapAllocation no_gc;
+  JavaScriptFrameIterator it(isolate_);
+  JavaScriptFrame* frame = it.frame();
+
+  // Assert that the frame on the stack belongs to the function we want to OSR.
+  DCHECK_EQ(*closure_, frame->function());
+
+  int params = scope_->num_parameters();
+  int locals = scope_->StackLocalCount();
+
+  // Use sequential composition to achieve desired narrowing.
+  // The receiver is a parameter with index -1.
+  store_.Seq(parameter_index(-1), ObservedOnStack(frame->receiver()));
+  for (int i = 0; i < params; i++) {
+    store_.Seq(parameter_index(i), ObservedOnStack(frame->GetParameter(i)));
+  }
+
+  for (int i = 0; i < locals; i++) {
+    store_.Seq(stack_local_index(i), ObservedOnStack(frame->GetExpression(i)));
+  }
+
+#ifdef OBJECT_PRINT
+  if (FLAG_trace_osr && FLAG_print_scopes) {
+    PrintObserved(scope_->receiver(), frame->receiver(),
+                  store_.LookupBounds(parameter_index(-1)).lower);
+
+    for (int i = 0; i < params; i++) {
+      PrintObserved(scope_->parameter(i), frame->GetParameter(i),
+                    store_.LookupBounds(parameter_index(i)).lower);
+    }
+
+    ZoneList<Variable*> local_vars(locals, zone());
+    ZoneList<Variable*> context_vars(scope_->ContextLocalCount(), zone());
+    ZoneList<Variable*> global_vars(scope_->ContextGlobalCount(), zone());
+    scope_->CollectStackAndContextLocals(&local_vars, &context_vars,
+                                         &global_vars);
+    for (int i = 0; i < locals; i++) {
+      PrintObserved(local_vars.at(i),
+                    frame->GetExpression(i),
+                    store_.LookupBounds(stack_local_index(i)).lower);
+    }
+  }
+#endif  // OBJECT_PRINT
+}
+
+
+#define RECURSE(call)                \
+  do {                               \
+    DCHECK(!HasStackOverflow());     \
+    call;                            \
+    if (HasStackOverflow()) return;  \
+  } while (false)
+
+
+void AstTyper::Run() {
+  RECURSE(VisitDeclarations(scope_->declarations()));
+  RECURSE(VisitStatements(root_->body()));
+}
+
+
+void AstTyper::VisitStatements(ZoneList<Statement*>* stmts) {
+  for (int i = 0; i < stmts->length(); ++i) {
+    Statement* stmt = stmts->at(i);
+    RECURSE(Visit(stmt));
+    if (stmt->IsJump()) break;
+  }
+}
+
+
+void AstTyper::VisitBlock(Block* stmt) {
+  RECURSE(VisitStatements(stmt->statements()));
+  if (stmt->labels() != NULL) {
+    store_.Forget();  // Control may transfer here via 'break l'.
+  }
+}
+
+
+void AstTyper::VisitExpressionStatement(ExpressionStatement* stmt) {
+  RECURSE(Visit(stmt->expression()));
+}
+
+
+void AstTyper::VisitEmptyStatement(EmptyStatement* stmt) {
+}
+
+
+void AstTyper::VisitSloppyBlockFunctionStatement(
+    SloppyBlockFunctionStatement* stmt) {
+  Visit(stmt->statement());
+}
+
+
+void AstTyper::VisitIfStatement(IfStatement* stmt) {
+  // Collect type feedback.
+  if (!stmt->condition()->ToBooleanIsTrue() &&
+      !stmt->condition()->ToBooleanIsFalse()) {
+    stmt->condition()->RecordToBooleanTypeFeedback(oracle());
+  }
+
+  RECURSE(Visit(stmt->condition()));
+  Effects then_effects = EnterEffects();
+  RECURSE(Visit(stmt->then_statement()));
+  ExitEffects();
+  Effects else_effects = EnterEffects();
+  RECURSE(Visit(stmt->else_statement()));
+  ExitEffects();
+  then_effects.Alt(else_effects);
+  store_.Seq(then_effects);
+}
+
+
+void AstTyper::VisitContinueStatement(ContinueStatement* stmt) {
+  // TODO(rossberg): is it worth having a non-termination effect?
+}
+
+
+void AstTyper::VisitBreakStatement(BreakStatement* stmt) {
+  // TODO(rossberg): is it worth having a non-termination effect?
+}
+
+
+void AstTyper::VisitReturnStatement(ReturnStatement* stmt) {
+  // Collect type feedback.
+  // TODO(rossberg): we only need this for inlining into test contexts...
+  stmt->expression()->RecordToBooleanTypeFeedback(oracle());
+
+  RECURSE(Visit(stmt->expression()));
+  // TODO(rossberg): is it worth having a non-termination effect?
+}
+
+
+void AstTyper::VisitWithStatement(WithStatement* stmt) {
+  RECURSE(stmt->expression());
+  RECURSE(stmt->statement());
+}
+
+
+void AstTyper::VisitSwitchStatement(SwitchStatement* stmt) {
+  RECURSE(Visit(stmt->tag()));
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  Effects local_effects(zone());
+  bool complex_effects = false;  // True for label effects or fall-through.
+
+  for (int i = 0; i < clauses->length(); ++i) {
+    CaseClause* clause = clauses->at(i);
+
+    Effects clause_effects = EnterEffects();
+
+    if (!clause->is_default()) {
+      Expression* label = clause->label();
+      // Collect type feedback.
+      Type* tag_type;
+      Type* label_type;
+      Type* combined_type;
+      oracle()->CompareType(clause->CompareId(),
+                            &tag_type, &label_type, &combined_type);
+      NarrowLowerType(stmt->tag(), tag_type);
+      NarrowLowerType(label, label_type);
+      clause->set_compare_type(combined_type);
+
+      RECURSE(Visit(label));
+      if (!clause_effects.IsEmpty()) complex_effects = true;
+    }
+
+    ZoneList<Statement*>* stmts = clause->statements();
+    RECURSE(VisitStatements(stmts));
+    ExitEffects();
+    if (stmts->is_empty() || stmts->last()->IsJump()) {
+      local_effects.Alt(clause_effects);
+    } else {
+      complex_effects = true;
+    }
+  }
+
+  if (complex_effects) {
+    store_.Forget();  // Reached this in unknown state.
+  } else {
+    store_.Seq(local_effects);
+  }
+}
+
+
+void AstTyper::VisitCaseClause(CaseClause* clause) {
+  UNREACHABLE();
+}
+
+
+void AstTyper::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  // Collect type feedback.
+  if (!stmt->cond()->ToBooleanIsTrue()) {
+    stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+  }
+
+  // TODO(rossberg): refine the unconditional Forget (here and elsewhere) by
+  // computing the set of variables assigned in only some of the origins of the
+  // control transfer (such as the loop body here).
+  store_.Forget();  // Control may transfer here via looping or 'continue'.
+  ObserveTypesAtOsrEntry(stmt);
+  RECURSE(Visit(stmt->body()));
+  RECURSE(Visit(stmt->cond()));
+  store_.Forget();  // Control may transfer here via 'break'.
+}
+
+
+void AstTyper::VisitWhileStatement(WhileStatement* stmt) {
+  // Collect type feedback.
+  if (!stmt->cond()->ToBooleanIsTrue()) {
+    stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+  }
+
+  store_.Forget();  // Control may transfer here via looping or 'continue'.
+  RECURSE(Visit(stmt->cond()));
+  ObserveTypesAtOsrEntry(stmt);
+  RECURSE(Visit(stmt->body()));
+  store_.Forget();  // Control may transfer here via termination or 'break'.
+}
+
+
+void AstTyper::VisitForStatement(ForStatement* stmt) {
+  if (stmt->init() != NULL) {
+    RECURSE(Visit(stmt->init()));
+  }
+  store_.Forget();  // Control may transfer here via looping.
+  if (stmt->cond() != NULL) {
+    // Collect type feedback.
+    stmt->cond()->RecordToBooleanTypeFeedback(oracle());
+
+    RECURSE(Visit(stmt->cond()));
+  }
+  ObserveTypesAtOsrEntry(stmt);
+  RECURSE(Visit(stmt->body()));
+  if (stmt->next() != NULL) {
+    store_.Forget();  // Control may transfer here via 'continue'.
+    RECURSE(Visit(stmt->next()));
+  }
+  store_.Forget();  // Control may transfer here via termination or 'break'.
+}
+
+
+void AstTyper::VisitForInStatement(ForInStatement* stmt) {
+  // Collect type feedback.
+  stmt->set_for_in_type(static_cast<ForInStatement::ForInType>(
+      oracle()->ForInType(stmt->ForInFeedbackSlot())));
+
+  RECURSE(Visit(stmt->enumerable()));
+  store_.Forget();  // Control may transfer here via looping or 'continue'.
+  ObserveTypesAtOsrEntry(stmt);
+  RECURSE(Visit(stmt->body()));
+  store_.Forget();  // Control may transfer here via 'break'.
+}
+
+
+void AstTyper::VisitForOfStatement(ForOfStatement* stmt) {
+  RECURSE(Visit(stmt->iterable()));
+  store_.Forget();  // Control may transfer here via looping or 'continue'.
+  RECURSE(Visit(stmt->body()));
+  store_.Forget();  // Control may transfer here via 'break'.
+}
+
+
+void AstTyper::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  Effects try_effects = EnterEffects();
+  RECURSE(Visit(stmt->try_block()));
+  ExitEffects();
+  Effects catch_effects = EnterEffects();
+  store_.Forget();  // Control may transfer here via 'throw'.
+  RECURSE(Visit(stmt->catch_block()));
+  ExitEffects();
+  try_effects.Alt(catch_effects);
+  store_.Seq(try_effects);
+  // At this point, only variables that were reassigned in the catch block are
+  // still remembered.
+}
+
+
+void AstTyper::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  RECURSE(Visit(stmt->try_block()));
+  store_.Forget();  // Control may transfer here via 'throw'.
+  RECURSE(Visit(stmt->finally_block()));
+}
+
+
+void AstTyper::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  store_.Forget();  // May do whatever.
+}
+
+
+void AstTyper::VisitFunctionLiteral(FunctionLiteral* expr) {}
+
+
+void AstTyper::VisitClassLiteral(ClassLiteral* expr) {}
+
+
+void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+}
+
+
+void AstTyper::VisitDoExpression(DoExpression* expr) {
+  RECURSE(VisitBlock(expr->block()));
+  RECURSE(VisitVariableProxy(expr->result()));
+  NarrowType(expr, expr->result()->bounds());
+}
+
+
+void AstTyper::VisitConditional(Conditional* expr) {
+  // Collect type feedback.
+  expr->condition()->RecordToBooleanTypeFeedback(oracle());
+
+  RECURSE(Visit(expr->condition()));
+  Effects then_effects = EnterEffects();
+  RECURSE(Visit(expr->then_expression()));
+  ExitEffects();
+  Effects else_effects = EnterEffects();
+  RECURSE(Visit(expr->else_expression()));
+  ExitEffects();
+  then_effects.Alt(else_effects);
+  store_.Seq(then_effects);
+
+  NarrowType(expr, Bounds::Either(
+      expr->then_expression()->bounds(),
+      expr->else_expression()->bounds(), zone()));
+}
+
+
+void AstTyper::VisitVariableProxy(VariableProxy* expr) {
+  Variable* var = expr->var();
+  if (var->IsStackAllocated()) {
+    NarrowType(expr, store_.LookupBounds(variable_index(var)));
+  }
+}
+
+
+void AstTyper::VisitLiteral(Literal* expr) {
+  Type* type = Type::Constant(expr->value(), zone());
+  NarrowType(expr, Bounds(type));
+}
+
+
+void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
+  // TODO(rossberg): Reintroduce RegExp type.
+  NarrowType(expr, Bounds(Type::Object(zone())));
+}
+
+
+void AstTyper::VisitObjectLiteral(ObjectLiteral* expr) {
+  ZoneList<ObjectLiteral::Property*>* properties = expr->properties();
+  for (int i = 0; i < properties->length(); ++i) {
+    ObjectLiteral::Property* prop = properties->at(i);
+
+    // Collect type feedback.
+    if ((prop->kind() == ObjectLiteral::Property::MATERIALIZED_LITERAL &&
+        !CompileTimeValue::IsCompileTimeValue(prop->value())) ||
+        prop->kind() == ObjectLiteral::Property::COMPUTED) {
+      if (!prop->is_computed_name() &&
+          prop->key()->AsLiteral()->value()->IsInternalizedString() &&
+          prop->emit_store()) {
+        // Record type feed back for the property.
+        FeedbackVectorSlot slot = prop->GetSlot();
+        SmallMapList maps;
+        oracle()->CollectReceiverTypes(slot, &maps);
+        prop->set_receiver_type(maps.length() == 1 ? maps.at(0)
+                                                   : Handle<Map>::null());
+      }
+    }
+
+    RECURSE(Visit(prop->value()));
+  }
+
+  NarrowType(expr, Bounds(Type::Object(zone())));
+}
+
+
+void AstTyper::VisitArrayLiteral(ArrayLiteral* expr) {
+  ZoneList<Expression*>* values = expr->values();
+  for (int i = 0; i < values->length(); ++i) {
+    Expression* value = values->at(i);
+    RECURSE(Visit(value));
+  }
+
+  NarrowType(expr, Bounds(Type::Object(zone())));
+}
+
+
+void AstTyper::VisitAssignment(Assignment* expr) {
+  // Collect type feedback.
+  Property* prop = expr->target()->AsProperty();
+  if (prop != NULL) {
+    FeedbackVectorSlot slot = expr->AssignmentSlot();
+    expr->set_is_uninitialized(oracle()->StoreIsUninitialized(slot));
+    if (!expr->IsUninitialized()) {
+      SmallMapList* receiver_types = expr->GetReceiverTypes();
+      if (prop->key()->IsPropertyName()) {
+        Literal* lit_key = prop->key()->AsLiteral();
+        DCHECK(lit_key != NULL && lit_key->value()->IsString());
+        Handle<String> name = Handle<String>::cast(lit_key->value());
+        oracle()->AssignmentReceiverTypes(slot, name, receiver_types);
+      } else {
+        KeyedAccessStoreMode store_mode;
+        IcCheckType key_type;
+        oracle()->KeyedAssignmentReceiverTypes(slot, receiver_types,
+                                               &store_mode, &key_type);
+        expr->set_store_mode(store_mode);
+        expr->set_key_type(key_type);
+      }
+    }
+  }
+
+  Expression* rhs =
+      expr->is_compound() ? expr->binary_operation() : expr->value();
+  RECURSE(Visit(expr->target()));
+  RECURSE(Visit(rhs));
+  NarrowType(expr, rhs->bounds());
+
+  VariableProxy* proxy = expr->target()->AsVariableProxy();
+  if (proxy != NULL && proxy->var()->IsStackAllocated()) {
+    store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+  }
+}
+
+
+void AstTyper::VisitYield(Yield* expr) {
+  RECURSE(Visit(expr->generator_object()));
+  RECURSE(Visit(expr->expression()));
+
+  // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitThrow(Throw* expr) {
+  RECURSE(Visit(expr->exception()));
+  // TODO(rossberg): is it worth having a non-termination effect?
+
+  NarrowType(expr, Bounds(Type::None(zone())));
+}
+
+
+void AstTyper::VisitProperty(Property* expr) {
+  // Collect type feedback.
+  FeedbackVectorSlot slot = expr->PropertyFeedbackSlot();
+  expr->set_inline_cache_state(oracle()->LoadInlineCacheState(slot));
+
+  if (!expr->IsUninitialized()) {
+    if (expr->key()->IsPropertyName()) {
+      Literal* lit_key = expr->key()->AsLiteral();
+      DCHECK(lit_key != NULL && lit_key->value()->IsString());
+      Handle<String> name = Handle<String>::cast(lit_key->value());
+      oracle()->PropertyReceiverTypes(slot, name, expr->GetReceiverTypes());
+    } else {
+      bool is_string;
+      IcCheckType key_type;
+      oracle()->KeyedPropertyReceiverTypes(slot, expr->GetReceiverTypes(),
+                                           &is_string, &key_type);
+      expr->set_is_string_access(is_string);
+      expr->set_key_type(key_type);
+    }
+  }
+
+  RECURSE(Visit(expr->obj()));
+  RECURSE(Visit(expr->key()));
+
+  // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitCall(Call* expr) {
+  // Collect type feedback.
+  RECURSE(Visit(expr->expression()));
+  bool is_uninitialized = true;
+  if (expr->IsUsingCallFeedbackICSlot(isolate_)) {
+    FeedbackVectorSlot slot = expr->CallFeedbackICSlot();
+    is_uninitialized = oracle()->CallIsUninitialized(slot);
+    if (!expr->expression()->IsProperty() &&
+        oracle()->CallIsMonomorphic(slot)) {
+      expr->set_target(oracle()->GetCallTarget(slot));
+      Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
+      expr->set_allocation_site(site);
+    }
+  }
+
+  expr->set_is_uninitialized(is_uninitialized);
+
+  ZoneList<Expression*>* args = expr->arguments();
+  for (int i = 0; i < args->length(); ++i) {
+    Expression* arg = args->at(i);
+    RECURSE(Visit(arg));
+  }
+
+  VariableProxy* proxy = expr->expression()->AsVariableProxy();
+  if (proxy != NULL && proxy->var()->is_possibly_eval(isolate_)) {
+    store_.Forget();  // Eval could do whatever to local variables.
+  }
+
+  // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitCallNew(CallNew* expr) {
+  // Collect type feedback.
+  FeedbackVectorSlot allocation_site_feedback_slot =
+      expr->CallNewFeedbackSlot();
+  expr->set_allocation_site(
+      oracle()->GetCallNewAllocationSite(allocation_site_feedback_slot));
+  bool monomorphic =
+      oracle()->CallNewIsMonomorphic(expr->CallNewFeedbackSlot());
+  expr->set_is_monomorphic(monomorphic);
+  if (monomorphic) {
+    expr->set_target(oracle()->GetCallNewTarget(expr->CallNewFeedbackSlot()));
+  }
+
+  RECURSE(Visit(expr->expression()));
+  ZoneList<Expression*>* args = expr->arguments();
+  for (int i = 0; i < args->length(); ++i) {
+    Expression* arg = args->at(i);
+    RECURSE(Visit(arg));
+  }
+
+  NarrowType(expr, Bounds(Type::None(zone()), Type::Receiver(zone())));
+}
+
+
+void AstTyper::VisitCallRuntime(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  for (int i = 0; i < args->length(); ++i) {
+    Expression* arg = args->at(i);
+    RECURSE(Visit(arg));
+  }
+
+  // We don't know anything about the result type.
+}
+
+
+void AstTyper::VisitUnaryOperation(UnaryOperation* expr) {
+  // Collect type feedback.
+  if (expr->op() == Token::NOT) {
+    // TODO(rossberg): only do in test or value context.
+    expr->expression()->RecordToBooleanTypeFeedback(oracle());
+  }
+
+  RECURSE(Visit(expr->expression()));
+
+  switch (expr->op()) {
+    case Token::NOT:
+    case Token::DELETE:
+      NarrowType(expr, Bounds(Type::Boolean(zone())));
+      break;
+    case Token::VOID:
+      NarrowType(expr, Bounds(Type::Undefined(zone())));
+      break;
+    case Token::TYPEOF:
+      NarrowType(expr, Bounds(Type::InternalizedString(zone())));
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void AstTyper::VisitCountOperation(CountOperation* expr) {
+  // Collect type feedback.
+  FeedbackVectorSlot slot = expr->CountSlot();
+  KeyedAccessStoreMode store_mode;
+  IcCheckType key_type;
+  oracle()->GetStoreModeAndKeyType(slot, &store_mode, &key_type);
+  oracle()->CountReceiverTypes(slot, expr->GetReceiverTypes());
+  expr->set_store_mode(store_mode);
+  expr->set_key_type(key_type);
+  expr->set_type(oracle()->CountType(expr->CountBinOpFeedbackId()));
+  // TODO(rossberg): merge the count type with the generic expression type.
+
+  RECURSE(Visit(expr->expression()));
+
+  NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+
+  VariableProxy* proxy = expr->expression()->AsVariableProxy();
+  if (proxy != NULL && proxy->var()->IsStackAllocated()) {
+    store_.Seq(variable_index(proxy->var()), Effect(expr->bounds()));
+  }
+}
+
+
+void AstTyper::VisitBinaryOperation(BinaryOperation* expr) {
+  // Collect type feedback.
+  Type* type;
+  Type* left_type;
+  Type* right_type;
+  Maybe<int> fixed_right_arg = Nothing<int>();
+  Handle<AllocationSite> allocation_site;
+  oracle()->BinaryType(expr->BinaryOperationFeedbackId(),
+      &left_type, &right_type, &type, &fixed_right_arg,
+      &allocation_site, expr->op());
+  NarrowLowerType(expr, type);
+  NarrowLowerType(expr->left(), left_type);
+  NarrowLowerType(expr->right(), right_type);
+  expr->set_allocation_site(allocation_site);
+  expr->set_fixed_right_arg(fixed_right_arg);
+  if (expr->op() == Token::OR || expr->op() == Token::AND) {
+    expr->left()->RecordToBooleanTypeFeedback(oracle());
+  }
+
+  switch (expr->op()) {
+    case Token::COMMA:
+      RECURSE(Visit(expr->left()));
+      RECURSE(Visit(expr->right()));
+      NarrowType(expr, expr->right()->bounds());
+      break;
+    case Token::OR:
+    case Token::AND: {
+      Effects left_effects = EnterEffects();
+      RECURSE(Visit(expr->left()));
+      ExitEffects();
+      Effects right_effects = EnterEffects();
+      RECURSE(Visit(expr->right()));
+      ExitEffects();
+      left_effects.Alt(right_effects);
+      store_.Seq(left_effects);
+
+      NarrowType(expr, Bounds::Either(
+          expr->left()->bounds(), expr->right()->bounds(), zone()));
+      break;
+    }
+    case Token::BIT_OR:
+    case Token::BIT_AND: {
+      RECURSE(Visit(expr->left()));
+      RECURSE(Visit(expr->right()));
+      Type* upper = Type::Union(
+          expr->left()->bounds().upper, expr->right()->bounds().upper, zone());
+      if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+      Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+      NarrowType(expr, Bounds(lower, upper));
+      break;
+    }
+    case Token::BIT_XOR:
+    case Token::SHL:
+    case Token::SAR:
+      RECURSE(Visit(expr->left()));
+      RECURSE(Visit(expr->right()));
+      NarrowType(expr,
+          Bounds(Type::SignedSmall(zone()), Type::Signed32(zone())));
+      break;
+    case Token::SHR:
+      RECURSE(Visit(expr->left()));
+      RECURSE(Visit(expr->right()));
+      // TODO(rossberg): The upper bound would be Unsigned32, but since there
+      // is no 'positive Smi' type for the lower bound, we use the smallest
+      // union of Smi and Unsigned32 as upper bound instead.
+      NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+      break;
+    case Token::ADD: {
+      RECURSE(Visit(expr->left()));
+      RECURSE(Visit(expr->right()));
+      Bounds l = expr->left()->bounds();
+      Bounds r = expr->right()->bounds();
+      Type* lower =
+          !l.lower->IsInhabited() || !r.lower->IsInhabited() ?
+              Type::None(zone()) :
+          l.lower->Is(Type::String()) || r.lower->Is(Type::String()) ?
+              Type::String(zone()) :
+          l.lower->Is(Type::Number()) && r.lower->Is(Type::Number()) ?
+              Type::SignedSmall(zone()) : Type::None(zone());
+      Type* upper =
+          l.upper->Is(Type::String()) || r.upper->Is(Type::String()) ?
+              Type::String(zone()) :
+          l.upper->Is(Type::Number()) && r.upper->Is(Type::Number()) ?
+              Type::Number(zone()) : Type::NumberOrString(zone());
+      NarrowType(expr, Bounds(lower, upper));
+      break;
+    }
+    case Token::SUB:
+    case Token::MUL:
+    case Token::DIV:
+    case Token::MOD:
+      RECURSE(Visit(expr->left()));
+      RECURSE(Visit(expr->right()));
+      NarrowType(expr, Bounds(Type::SignedSmall(zone()), Type::Number(zone())));
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void AstTyper::VisitCompareOperation(CompareOperation* expr) {
+  // Collect type feedback.
+  Type* left_type;
+  Type* right_type;
+  Type* combined_type;
+  oracle()->CompareType(expr->CompareOperationFeedbackId(),
+      &left_type, &right_type, &combined_type);
+  NarrowLowerType(expr->left(), left_type);
+  NarrowLowerType(expr->right(), right_type);
+  expr->set_combined_type(combined_type);
+
+  RECURSE(Visit(expr->left()));
+  RECURSE(Visit(expr->right()));
+
+  NarrowType(expr, Bounds(Type::Boolean(zone())));
+}
+
+
+void AstTyper::VisitSpread(Spread* expr) { RECURSE(Visit(expr->expression())); }
+
+
+void AstTyper::VisitEmptyParentheses(EmptyParentheses* expr) {
+  UNREACHABLE();
+}
+
+
+void AstTyper::VisitThisFunction(ThisFunction* expr) {
+}
+
+
+void AstTyper::VisitSuperPropertyReference(SuperPropertyReference* expr) {}
+
+
+void AstTyper::VisitSuperCallReference(SuperCallReference* expr) {}
+
+
+void AstTyper::VisitRewritableAssignmentExpression(
+    RewritableAssignmentExpression* expr) {
+  Visit(expr->expression());
+}
+
+
+void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
+  for (int i = 0; i < decls->length(); ++i) {
+    Declaration* decl = decls->at(i);
+    RECURSE(Visit(decl));
+  }
+}
+
+
+void AstTyper::VisitVariableDeclaration(VariableDeclaration* declaration) {
+}
+
+
+void AstTyper::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
+  RECURSE(Visit(declaration->fun()));
+}
+
+
+void AstTyper::VisitImportDeclaration(ImportDeclaration* declaration) {
+}
+
+
+void AstTyper::VisitExportDeclaration(ExportDeclaration* declaration) {
+}
+
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/crankshaft/typing.h b/src/crankshaft/typing.h
new file mode 100644
index 0000000..40b538a
--- /dev/null
+++ b/src/crankshaft/typing.h
@@ -0,0 +1,85 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_TYPING_H_
+#define V8_CRANKSHAFT_TYPING_H_
+
+#include "src/allocation.h"
+#include "src/ast/ast.h"
+#include "src/ast/scopes.h"
+#include "src/effects.h"
+#include "src/type-info.h"
+#include "src/types.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+class AstTyper: public AstVisitor {
+ public:
+  AstTyper(Isolate* isolate, Zone* zone, Handle<JSFunction> closure,
+           Scope* scope, BailoutId osr_ast_id, FunctionLiteral* root);
+  void Run();
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+
+ private:
+  Effect ObservedOnStack(Object* value);
+  void ObserveTypesAtOsrEntry(IterationStatement* stmt);
+
+  static const int kNoVar = INT_MIN;
+  typedef v8::internal::Effects<int, kNoVar> Effects;
+  typedef v8::internal::NestedEffects<int, kNoVar> Store;
+
+  Isolate* isolate_;
+  Zone* zone_;
+  Handle<JSFunction> closure_;
+  Scope* scope_;
+  BailoutId osr_ast_id_;
+  FunctionLiteral* root_;
+  TypeFeedbackOracle oracle_;
+  Store store_;
+
+  Zone* zone() const { return zone_; }
+  TypeFeedbackOracle* oracle() { return &oracle_; }
+
+  void NarrowType(Expression* e, Bounds b) {
+    e->set_bounds(Bounds::Both(e->bounds(), b, zone()));
+  }
+  void NarrowLowerType(Expression* e, Type* t) {
+    e->set_bounds(Bounds::NarrowLower(e->bounds(), t, zone()));
+  }
+
+  Effects EnterEffects() {
+    store_ = store_.Push();
+    return store_.Top();
+  }
+  void ExitEffects() { store_ = store_.Pop(); }
+
+  int parameter_index(int index) { return -index - 2; }
+  int stack_local_index(int index) { return index; }
+
+  int variable_index(Variable* var) {
+    // Stack locals have the range [0 .. l]
+    // Parameters have the range [-1 .. p]
+    // We map this to [-p-2 .. -1, 0 .. l]
+    return var->IsStackLocal() ? stack_local_index(var->index()) :
+           var->IsParameter() ? parameter_index(var->index()) : kNoVar;
+  }
+
+  void VisitDeclarations(ZoneList<Declaration*>* declarations) override;
+  void VisitStatements(ZoneList<Statement*>* statements) override;
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) override;
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  DISALLOW_COPY_AND_ASSIGN(AstTyper);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_TYPING_H_
diff --git a/src/crankshaft/unique.h b/src/crankshaft/unique.h
new file mode 100644
index 0000000..54abfa7
--- /dev/null
+++ b/src/crankshaft/unique.h
@@ -0,0 +1,362 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_UNIQUE_H_
+#define V8_CRANKSHAFT_UNIQUE_H_
+
+#include <ostream>  // NOLINT(readability/streams)
+
+#include "src/assert-scope.h"
+#include "src/base/functional.h"
+#include "src/handles.h"
+#include "src/utils.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+
+template <typename T>
+class UniqueSet;
+
+
+// Represents a handle to an object on the heap, but with the additional
+// ability of checking for equality and hashing without accessing the heap.
+//
+// Creating a Unique<T> requires first dereferencing the handle to obtain
+// the address of the object, which is used as the hashcode and the basis for
+// comparison. The object can be moved later by the GC, but comparison
+// and hashing use the old address of the object, without dereferencing it.
+//
+// Careful! Comparison of two Uniques is only correct if both were created
+// in the same "era" of GC or if at least one is a non-movable object.
+template <typename T>
+class Unique final {
+ public:
+  Unique<T>() : raw_address_(NULL) {}
+
+  // TODO(titzer): make private and introduce a uniqueness scope.
+  explicit Unique(Handle<T> handle) {
+    if (handle.is_null()) {
+      raw_address_ = NULL;
+    } else {
+      // This is a best-effort check to prevent comparing Unique<T>'s created
+      // in different GC eras; we require heap allocation to be disallowed at
+      // creation time.
+      // NOTE: we currently consider maps to be non-movable, so no special
+      // assurance is required for creating a Unique<Map>.
+      // TODO(titzer): other immortable immovable objects are also fine.
+      DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
+      raw_address_ = reinterpret_cast<Address>(*handle);
+      DCHECK_NOT_NULL(raw_address_);  // Non-null should imply non-zero address.
+    }
+    handle_ = handle;
+  }
+
+  // Constructor for handling automatic up casting.
+  // Eg. Unique<JSFunction> can be passed when Unique<Object> is expected.
+  template <class S> Unique(Unique<S> uniq) {
+#ifdef DEBUG
+    T* a = NULL;
+    S* b = NULL;
+    a = b;  // Fake assignment to enforce type checks.
+    USE(a);
+#endif
+    raw_address_ = uniq.raw_address_;
+    handle_ = uniq.handle_;
+  }
+
+  template <typename U>
+  inline bool operator==(const Unique<U>& other) const {
+    DCHECK(IsInitialized() && other.IsInitialized());
+    return raw_address_ == other.raw_address_;
+  }
+
+  template <typename U>
+  inline bool operator!=(const Unique<U>& other) const {
+    DCHECK(IsInitialized() && other.IsInitialized());
+    return raw_address_ != other.raw_address_;
+  }
+
+  friend inline size_t hash_value(Unique<T> const& unique) {
+    DCHECK(unique.IsInitialized());
+    return base::hash<void*>()(unique.raw_address_);
+  }
+
+  inline intptr_t Hashcode() const {
+    DCHECK(IsInitialized());
+    return reinterpret_cast<intptr_t>(raw_address_);
+  }
+
+  inline bool IsNull() const {
+    DCHECK(IsInitialized());
+    return raw_address_ == NULL;
+  }
+
+  inline bool IsKnownGlobal(void* global) const {
+    DCHECK(IsInitialized());
+    return raw_address_ == reinterpret_cast<Address>(global);
+  }
+
+  inline Handle<T> handle() const {
+    return handle_;
+  }
+
+  template <class S> static Unique<T> cast(Unique<S> that) {
+    // Allow fetching location() to unsafe-cast the handle. This is necessary
+    // since we can't concurrently safe-cast. Safe-casting requires looking at
+    // the heap which may be moving concurrently to the compiler thread.
+    AllowHandleDereference allow_deref;
+    return Unique<T>(that.raw_address_,
+                     Handle<T>(reinterpret_cast<T**>(that.handle_.location())));
+  }
+
+  inline bool IsInitialized() const {
+    return raw_address_ != NULL || handle_.is_null();
+  }
+
+  // TODO(titzer): this is a hack to migrate to Unique<T> incrementally.
+  static Unique<T> CreateUninitialized(Handle<T> handle) {
+    return Unique<T>(NULL, handle);
+  }
+
+  static Unique<T> CreateImmovable(Handle<T> handle) {
+    return Unique<T>(reinterpret_cast<Address>(*handle), handle);
+  }
+
+ private:
+  Unique(Address raw_address, Handle<T> handle)
+      : raw_address_(raw_address), handle_(handle) {}
+
+  Address raw_address_;
+  Handle<T> handle_;
+
+  friend class UniqueSet<T>;  // Uses internal details for speed.
+  template <class U>
+  friend class Unique;  // For comparing raw_address values.
+};
+
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, Unique<T> uniq) {
+  return os << Brief(*uniq.handle());
+}
+
+
+template <typename T>
+class UniqueSet final : public ZoneObject {
+ public:
+  // Constructor. A new set will be empty.
+  UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
+
+  // Capacity constructor. A new set will be empty.
+  UniqueSet(int capacity, Zone* zone)
+      : size_(0), capacity_(capacity),
+        array_(zone->NewArray<Unique<T> >(capacity)) {
+    DCHECK(capacity <= kMaxCapacity);
+  }
+
+  // Singleton constructor.
+  UniqueSet(Unique<T> uniq, Zone* zone)
+      : size_(1), capacity_(1), array_(zone->NewArray<Unique<T> >(1)) {
+    array_[0] = uniq;
+  }
+
+  // Add a new element to this unique set. Mutates this set. O(|this|).
+  void Add(Unique<T> uniq, Zone* zone) {
+    DCHECK(uniq.IsInitialized());
+    // Keep the set sorted by the {raw_address} of the unique elements.
+    for (int i = 0; i < size_; i++) {
+      if (array_[i] == uniq) return;
+      if (array_[i].raw_address_ > uniq.raw_address_) {
+        // Insert in the middle.
+        Grow(size_ + 1, zone);
+        for (int j = size_ - 1; j >= i; j--) array_[j + 1] = array_[j];
+        array_[i] = uniq;
+        size_++;
+        return;
+      }
+    }
+    // Append the element to the the end.
+    Grow(size_ + 1, zone);
+    array_[size_++] = uniq;
+  }
+
+  // Remove an element from this set. Mutates this set. O(|this|)
+  void Remove(Unique<T> uniq) {
+    for (int i = 0; i < size_; i++) {
+      if (array_[i] == uniq) {
+        while (++i < size_) array_[i - 1] = array_[i];
+        size_--;
+        return;
+      }
+    }
+  }
+
+  // Compare this set against another set. O(|this|).
+  bool Equals(const UniqueSet<T>* that) const {
+    if (that->size_ != this->size_) return false;
+    for (int i = 0; i < this->size_; i++) {
+      if (this->array_[i] != that->array_[i]) return false;
+    }
+    return true;
+  }
+
+  // Check whether this set contains the given element. O(|this|)
+  // TODO(titzer): use binary search for large sets to make this O(log|this|)
+  template <typename U>
+  bool Contains(const Unique<U> elem) const {
+    for (int i = 0; i < this->size_; ++i) {
+      Unique<T> cand = this->array_[i];
+      if (cand.raw_address_ >= elem.raw_address_) {
+        return cand.raw_address_ == elem.raw_address_;
+      }
+    }
+    return false;
+  }
+
+  // Check if this set is a subset of the given set. O(|this| + |that|).
+  bool IsSubset(const UniqueSet<T>* that) const {
+    if (that->size_ < this->size_) return false;
+    int j = 0;
+    for (int i = 0; i < this->size_; i++) {
+      Unique<T> sought = this->array_[i];
+      while (true) {
+        if (sought == that->array_[j++]) break;
+        // Fail whenever there are more elements in {this} than {that}.
+        if ((this->size_ - i) > (that->size_ - j)) return false;
+      }
+    }
+    return true;
+  }
+
+  // Returns a new set representing the intersection of this set and the other.
+  // O(|this| + |that|).
+  UniqueSet<T>* Intersect(const UniqueSet<T>* that, Zone* zone) const {
+    if (that->size_ == 0 || this->size_ == 0) return new(zone) UniqueSet<T>();
+
+    UniqueSet<T>* out = new(zone) UniqueSet<T>(
+        Min(this->size_, that->size_), zone);
+
+    int i = 0, j = 0, k = 0;
+    while (i < this->size_ && j < that->size_) {
+      Unique<T> a = this->array_[i];
+      Unique<T> b = that->array_[j];
+      if (a == b) {
+        out->array_[k++] = a;
+        i++;
+        j++;
+      } else if (a.raw_address_ < b.raw_address_) {
+        i++;
+      } else {
+        j++;
+      }
+    }
+
+    out->size_ = k;
+    return out;
+  }
+
+  // Returns a new set representing the union of this set and the other.
+  // O(|this| + |that|).
+  UniqueSet<T>* Union(const UniqueSet<T>* that, Zone* zone) const {
+    if (that->size_ == 0) return this->Copy(zone);
+    if (this->size_ == 0) return that->Copy(zone);
+
+    UniqueSet<T>* out = new(zone) UniqueSet<T>(
+        this->size_ + that->size_, zone);
+
+    int i = 0, j = 0, k = 0;
+    while (i < this->size_ && j < that->size_) {
+      Unique<T> a = this->array_[i];
+      Unique<T> b = that->array_[j];
+      if (a == b) {
+        out->array_[k++] = a;
+        i++;
+        j++;
+      } else if (a.raw_address_ < b.raw_address_) {
+        out->array_[k++] = a;
+        i++;
+      } else {
+        out->array_[k++] = b;
+        j++;
+      }
+    }
+
+    while (i < this->size_) out->array_[k++] = this->array_[i++];
+    while (j < that->size_) out->array_[k++] = that->array_[j++];
+
+    out->size_ = k;
+    return out;
+  }
+
+  // Returns a new set representing all elements from this set which are not in
+  // that set. O(|this| * |that|).
+  UniqueSet<T>* Subtract(const UniqueSet<T>* that, Zone* zone) const {
+    if (that->size_ == 0) return this->Copy(zone);
+
+    UniqueSet<T>* out = new(zone) UniqueSet<T>(this->size_, zone);
+
+    int i = 0, j = 0;
+    while (i < this->size_) {
+      Unique<T> cand = this->array_[i];
+      if (!that->Contains(cand)) {
+        out->array_[j++] = cand;
+      }
+      i++;
+    }
+
+    out->size_ = j;
+    return out;
+  }
+
+  // Makes an exact copy of this set. O(|this|).
+  UniqueSet<T>* Copy(Zone* zone) const {
+    UniqueSet<T>* copy = new(zone) UniqueSet<T>(this->size_, zone);
+    copy->size_ = this->size_;
+    memcpy(copy->array_, this->array_, this->size_ * sizeof(Unique<T>));
+    return copy;
+  }
+
+  void Clear() {
+    size_ = 0;
+  }
+
+  inline int size() const {
+    return size_;
+  }
+
+  inline Unique<T> at(int index) const {
+    DCHECK(index >= 0 && index < size_);
+    return array_[index];
+  }
+
+ private:
+  // These sets should be small, since operations are implemented with simple
+  // linear algorithms. Enforce a maximum size.
+  static const int kMaxCapacity = 65535;
+
+  uint16_t size_;
+  uint16_t capacity_;
+  Unique<T>* array_;
+
+  // Grow the size of internal storage to be at least {size} elements.
+  void Grow(int size, Zone* zone) {
+    CHECK(size < kMaxCapacity);  // Enforce maximum size.
+    if (capacity_ < size) {
+      int new_capacity = 2 * capacity_ + size;
+      if (new_capacity > kMaxCapacity) new_capacity = kMaxCapacity;
+      Unique<T>* new_array = zone->NewArray<Unique<T> >(new_capacity);
+      if (size_ > 0) {
+        memcpy(new_array, array_, size_ * sizeof(Unique<T>));
+      }
+      capacity_ = new_capacity;
+      array_ = new_array;
+    }
+  }
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_UNIQUE_H_
diff --git a/src/crankshaft/x64/lithium-codegen-x64.cc b/src/crankshaft/x64/lithium-codegen-x64.cc
new file mode 100644
index 0000000..3f7e9ba
--- /dev/null
+++ b/src/crankshaft/x64/lithium-codegen-x64.cc
@@ -0,0 +1,5670 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateJumpTable() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+}
+
+
+#ifdef _MSC_VER
+void LCodeGen::MakeSureStackPagesMapped(int offset) {
+  const int kPageSize = 4 * KB;
+  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
+    __ movp(Operand(rsp, offset), rax);
+  }
+}
+#endif
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ Movsd(MemOperand(rsp, count * kDoubleSize),
+             XMMRegister::from_code(save_iterator.Current()));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ Movsd(XMMRegister::from_code(save_iterator.Current()),
+             MemOperand(rsp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ int3();
+    }
+#endif
+  }
+
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    DCHECK(!frame_is_built_);
+    frame_is_built_ = true;
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue());
+    }
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ subp(rsp, Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+      MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+      __ Push(rax);
+      __ Set(rax, slots);
+      __ Set(kScratchRegister, kSlotsZapValue);
+      Label loop;
+      __ bind(&loop);
+      __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
+              kScratchRegister);
+      __ decl(rax);
+      __ j(not_zero, &loop);
+      __ Pop(rax);
+    } else {
+      __ subp(rsp, Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+      MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+    }
+
+    if (info()->saves_caller_doubles()) {
+      SaveCallerDoubles();
+    }
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info_->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is still in rdi.
+    int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ Push(rdi);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ Push(rdi);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in rax.  It replaces the context passed to us.
+    // It's saved in the stack and kept live in rsi.
+    __ movp(rsi, rax);
+    __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
+
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ movp(rax, Operand(rbp, parameter_offset));
+        // Store it in the context.
+        int context_offset = Context::SlotOffset(var->index());
+        __ movp(Operand(rsi, context_offset), rax);
+        // Update the write barrier. This clobbers rax and rbx.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ subp(rsp, Immediate(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+  if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
+      instr->hydrogen_value()->representation().IsInteger32() &&
+      instr->result()->IsRegister()) {
+    __ AssertZeroExtended(ToRegister(instr->result()));
+  }
+
+  if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
+    // We sign extend the dehoisted key at the definition point when the pointer
+    // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
+    // points and MustSignExtendResult is always false. We can't use
+    // STATIC_ASSERT here as the pointer size is 32-bit for x32.
+    DCHECK(kPointerSize == kInt64Size);
+    if (instr->result()->IsRegister()) {
+      Register result_reg = ToRegister(instr->result());
+      __ movsxlq(result_reg, result_reg);
+    } else {
+      // Sign extend the 32bit result in the stack slots.
+      DCHECK(instr->result()->IsStackSlot());
+      Operand src = ToOperand(instr->result());
+      __ movsxlq(kScratchRegister, src);
+      __ movq(src, kScratchRegister);
+    }
+  }
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  if (jump_table_.length() == 0) return !is_aborted();
+
+  Label needs_frame;
+  Comment(";;; -------------------- Jump table --------------------");
+  for (int i = 0; i < jump_table_.length(); i++) {
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->deopt_info);
+    if (table_entry->needs_frame) {
+      DCHECK(!info()->saves_caller_doubles());
+      __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
+      __ call(&needs_frame);
+    } else {
+      if (info()->saves_caller_doubles()) {
+        DCHECK(info()->IsStub());
+        RestoreCallerDoubles();
+      }
+      __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    }
+    info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                 table_entry->deopt_info.inlining_id);
+  }
+
+  if (needs_frame.is_linked()) {
+    __ bind(&needs_frame);
+    /* stack layout
+       4: return address  <-- rsp
+       3: garbage
+       2: garbage
+       1: garbage
+       0: garbage
+    */
+    // Reserve space for context and stub marker.
+    __ subp(rsp, Immediate(2 * kPointerSize));
+    __ Push(MemOperand(rsp, 2 * kPointerSize));  // Copy return address.
+    __ Push(kScratchRegister);  // Save entry address for ret(0)
+
+    /* stack layout
+       4: return address
+       3: garbage
+       2: garbage
+       1: return address
+       0: entry address  <-- rsp
+    */
+
+    // Remember context pointer.
+    __ movp(kScratchRegister,
+            MemOperand(rbp, StandardFrameConstants::kContextOffset));
+    // Save context pointer into the stack frame.
+    __ movp(MemOperand(rsp, 3 * kPointerSize), kScratchRegister);
+
+    // Create a stack frame.
+    __ movp(MemOperand(rsp, 4 * kPointerSize), rbp);
+    __ leap(rbp, MemOperand(rsp, 4 * kPointerSize));
+
+    // This variant of deopt can only be used with stubs. Since we don't
+    // have a function pointer to install in the stack frame that we're
+    // building, install a special marker there instead.
+    DCHECK(info()->IsStub());
+    __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
+
+    /* stack layout
+       4: old rbp
+       3: context pointer
+       2: stub marker
+       1: return address
+       0: entry address  <-- rsp
+    */
+    __ ret(0);
+  }
+
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        // Build the frame in such a way that esi isn't trashed.
+        __ pushq(rbp);  // Caller's frame pointer.
+        __ Push(Operand(rbp, StandardFrameConstants::kContextOffset));
+        __ Push(Smi::FromInt(StackFrame::STUB));
+        __ leap(rbp, Operand(rsp, 2 * kPointerSize));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        __ bind(code->done());
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        frame_is_built_ = false;
+        __ movp(rsp, rbp);
+        __ popq(rbp);
+      }
+      __ jmp(code->exit());
+    }
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::from_code(index);
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(int index) const {
+  return XMMRegister::from_code(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsExternal();
+}
+
+
+bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
+  return op->IsConstantOperand() &&
+      chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
+}
+
+
+bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                   const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
+  return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasExternalReferenceValue());
+  return constant->ExternalReferenceValue();
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+  // Does not handle registers. In X64 assembler, plain registers are not
+  // representable as an Operand.
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return Operand(rbp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    XMMRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode,
+                               int argc) {
+  DCHECK(instr != NULL);
+  __ call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::BINARY_OP_IC ||
+      code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr,
+                           SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+  DCHECK(instr->HasPointerMap());
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    if (!ToRegister(context).is(rsi)) {
+      __ movp(rsi, ToRegister(context));
+    }
+  } else if (context->IsStackSlot()) {
+    __ movp(rsi, ToOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, environment->zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (DeoptEveryNTimes()) {
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    __ pushfq();
+    __ pushq(rax);
+    Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
+    __ movl(rax, count_operand);
+    __ subl(rax, Immediate(1));
+    __ j(not_zero, &no_deopt, Label::kNear);
+    if (FLAG_trap_on_deopt) __ int3();
+    __ movl(rax, Immediate(FLAG_deopt_every_n_times));
+    __ movl(count_operand, rax);
+    __ popq(rax);
+    __ popfq();
+    DCHECK(frame_is_built_);
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ movl(count_operand, rax);
+    __ popq(rax);
+    __ popfq();
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    Label done;
+    if (cc != no_condition) {
+      __ j(NegateCondition(cc), &done, Label::kNear);
+    }
+    __ int3();
+    __ bind(&done);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (cc == no_condition && frame_is_built_ &&
+      !info()->saves_caller_doubles()) {
+    DeoptComment(deopt_info);
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    if (cc == no_condition) {
+      __ jmp(&jump_table_.last().label);
+    } else {
+      __ j(cc, &jump_table_.last().label);
+    }
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason) {
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode, int argc) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), argc, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  DCHECK(kind == expected_safepoint_kind_);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->result()).is(rax));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ testl(dividend, dividend);
+    __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+    // Note that this is correct even for kMinInt operands.
+    __ negl(dividend);
+    __ andl(dividend, Immediate(mask));
+    __ negl(dividend);
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+    }
+    __ jmp(&done, Label::kNear);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ andl(dividend, Immediate(mask));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(dividend, Abs(divisor));
+  __ imull(rdx, rdx, Immediate(Abs(divisor)));
+  __ movl(rax, dividend);
+  __ subl(rax, rdx);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ j(not_zero, &remainder_not_zero, Label::kNear);
+    __ cmpl(dividend, Immediate(0));
+    DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+
+  Register left_reg = ToRegister(instr->left());
+  DCHECK(left_reg.is(rax));
+  Register right_reg = ToRegister(instr->right());
+  DCHECK(!right_reg.is(rax));
+  DCHECK(!right_reg.is(rdx));
+  Register result_reg = ToRegister(instr->result());
+  DCHECK(result_reg.is(rdx));
+
+  Label done;
+  // Check for x % 0, idiv would signal a divide error. We have to
+  // deopt in this case because we can't return a NaN.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ testl(right_reg, right_reg);
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for kMinInt % -1, idiv would signal a divide error. We
+  // have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ cmpl(left_reg, Immediate(kMinInt));
+    __ j(not_zero, &no_overflow_possible, Label::kNear);
+    __ cmpl(right_reg, Immediate(-1));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+    } else {
+      __ j(not_equal, &no_overflow_possible, Label::kNear);
+      __ Set(result_reg, 0);
+      __ jmp(&done, Label::kNear);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  // Sign extend dividend in eax into edx:eax, since we are using only the low
+  // 32 bits of the values.
+  __ cdq();
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label positive_left;
+    __ testl(left_reg, left_reg);
+    __ j(not_sign, &positive_left, Label::kNear);
+    __ idivl(right_reg);
+    __ testl(result_reg, result_reg);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+    __ jmp(&done, Label::kNear);
+    __ bind(&positive_left);
+  }
+  __ idivl(right_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  if (divisor == 1) return;
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ sarl(dividend, Immediate(shift));
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  __ negl(dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ sarl(dividend, Immediate(shift));
+    return;
+  }
+
+  Label not_kmin_int, done;
+  __ j(no_overflow, &not_kmin_int, Label::kNear);
+  __ movl(dividend, Immediate(kMinInt / divisor));
+  __ jmp(&done, Label::kNear);
+  __ bind(&not_kmin_int);
+  __ sarl(dividend, Immediate(shift));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(rdx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ testl(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(dividend, Abs(divisor));
+    if (divisor < 0) __ negl(rdx);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp3());
+  DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
+  Label needs_adjustment, done;
+  __ cmpl(dividend, Immediate(0));
+  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+  __ TruncatingDiv(dividend, Abs(divisor));
+  if (divisor < 0) __ negl(rdx);
+  __ jmp(&done, Label::kNear);
+  __ bind(&needs_adjustment);
+  __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(temp, Abs(divisor));
+  if (divisor < 0) __ negl(rdx);
+  __ decl(rdx);
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  Register remainder = ToRegister(instr->temp());
+  Register result = ToRegister(instr->result());
+  DCHECK(dividend.is(rax));
+  DCHECK(remainder.is(rdx));
+  DCHECK(result.is(rax));
+  DCHECK(!divisor.is(rax));
+  DCHECK(!divisor.is(rdx));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ testl(divisor, divisor);
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ testl(dividend, dividend);
+    __ j(not_zero, &dividend_not_zero, Label::kNear);
+    __ testl(divisor, divisor);
+    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label dividend_not_min_int;
+    __ cmpl(dividend, Immediate(kMinInt));
+    __ j(not_zero, &dividend_not_min_int, Label::kNear);
+    __ cmpl(divisor, Immediate(-1));
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+    __ bind(&dividend_not_min_int);
+  }
+
+  // Sign extend to rdx (= remainder).
+  __ cdq();
+  __ idivl(divisor);
+
+  Label done;
+  __ testl(remainder, remainder);
+  __ j(zero, &done, Label::kNear);
+  __ xorl(remainder, divisor);
+  __ sarl(remainder, Immediate(31));
+  __ addl(result, remainder);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ testl(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    __ cmpl(dividend, Immediate(kMinInt));
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ testl(dividend, Immediate(mask));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+  }
+  __ Move(result, dividend);
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (shift > 0) {
+    // The arithmetic shift is always OK, the 'if' is an optimization only.
+    if (shift > 1) __ sarl(result, Immediate(31));
+    __ shrl(result, Immediate(32 - shift));
+    __ addl(result, dividend);
+    __ sarl(result, Immediate(shift));
+  }
+  if (divisor < 0) __ negl(result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(rdx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ testl(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TruncatingDiv(dividend, Abs(divisor));
+  if (divisor < 0) __ negl(rdx);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ movl(rax, rdx);
+    __ imull(rax, rax, Immediate(divisor));
+    __ subl(rax, dividend);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  Register remainder = ToRegister(instr->temp());
+  DCHECK(dividend.is(rax));
+  DCHECK(remainder.is(rdx));
+  DCHECK(ToRegister(instr->result()).is(rax));
+  DCHECK(!divisor.is(rax));
+  DCHECK(!divisor.is(rdx));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ testl(divisor, divisor);
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ testl(dividend, dividend);
+    __ j(not_zero, &dividend_not_zero, Label::kNear);
+    __ testl(divisor, divisor);
+    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label dividend_not_min_int;
+    __ cmpl(dividend, Immediate(kMinInt));
+    __ j(not_zero, &dividend_not_min_int, Label::kNear);
+    __ cmpl(divisor, Immediate(-1));
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+    __ bind(&dividend_not_min_int);
+  }
+
+  // Sign extend to rdx (= remainder).
+  __ cdq();
+  __ idivl(divisor);
+
+  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    __ testl(remainder, remainder);
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  LOperand* right = instr->right();
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+      __ movp(kScratchRegister, left);
+    } else {
+      __ movl(kScratchRegister, left);
+    }
+  }
+
+  bool can_overflow =
+      instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  if (right->IsConstantOperand()) {
+    int32_t right_value = ToInteger32(LConstantOperand::cast(right));
+    if (right_value == -1) {
+      __ negl(left);
+    } else if (right_value == 0) {
+      __ xorl(left, left);
+    } else if (right_value == 2) {
+      __ addl(left, left);
+    } else if (!can_overflow) {
+      // If the multiplication is known to not overflow, we
+      // can use operations that don't set the overflow flag
+      // correctly.
+      switch (right_value) {
+        case 1:
+          // Do nothing.
+          break;
+        case 3:
+          __ leal(left, Operand(left, left, times_2, 0));
+          break;
+        case 4:
+          __ shll(left, Immediate(2));
+          break;
+        case 5:
+          __ leal(left, Operand(left, left, times_4, 0));
+          break;
+        case 8:
+          __ shll(left, Immediate(3));
+          break;
+        case 9:
+          __ leal(left, Operand(left, left, times_8, 0));
+          break;
+        case 16:
+          __ shll(left, Immediate(4));
+          break;
+        default:
+          __ imull(left, left, Immediate(right_value));
+          break;
+      }
+    } else {
+      __ imull(left, left, Immediate(right_value));
+    }
+  } else if (right->IsStackSlot()) {
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+      __ SmiToInteger64(left, left);
+      __ imulp(left, ToOperand(right));
+    } else {
+      __ imull(left, ToOperand(right));
+    }
+  } else {
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+      __ SmiToInteger64(left, left);
+      __ imulp(left, ToRegister(right));
+    } else {
+      __ imull(left, ToRegister(right));
+    }
+  }
+
+  if (can_overflow) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    Label done;
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+      __ testp(left, left);
+    } else {
+      __ testl(left, left);
+    }
+    __ j(not_zero, &done, Label::kNear);
+    if (right->IsConstantOperand()) {
+      // Constant can't be represented as 32-bit Smi due to immediate size
+      // limit.
+      DCHECK(SmiValuesAre32Bits()
+          ? !instr->hydrogen_value()->representation().IsSmi()
+          : SmiValuesAre31Bits());
+      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+        DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+      } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+        __ cmpl(kScratchRegister, Immediate(0));
+        DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+      }
+    } else if (right->IsStackSlot()) {
+      if (instr->hydrogen_value()->representation().IsSmi()) {
+        __ orp(kScratchRegister, ToOperand(right));
+      } else {
+        __ orl(kScratchRegister, ToOperand(right));
+      }
+      DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    } else {
+      // Test the non-zero operand for negative sign.
+      if (instr->hydrogen_value()->representation().IsSmi()) {
+        __ orp(kScratchRegister, ToRegister(right));
+      } else {
+        __ orl(kScratchRegister, ToRegister(right));
+      }
+      DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
+
+  if (right->IsConstantOperand()) {
+    int32_t right_operand =
+        ToRepresentation(LConstantOperand::cast(right),
+                         instr->hydrogen()->right()->representation());
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ andl(ToRegister(left), Immediate(right_operand));
+        break;
+      case Token::BIT_OR:
+        __ orl(ToRegister(left), Immediate(right_operand));
+        break;
+      case Token::BIT_XOR:
+        if (right_operand == int32_t(~0)) {
+          __ notl(ToRegister(left));
+        } else {
+          __ xorl(ToRegister(left), Immediate(right_operand));
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else if (right->IsStackSlot()) {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        if (instr->IsInteger32()) {
+          __ andl(ToRegister(left), ToOperand(right));
+        } else {
+          __ andp(ToRegister(left), ToOperand(right));
+        }
+        break;
+      case Token::BIT_OR:
+        if (instr->IsInteger32()) {
+          __ orl(ToRegister(left), ToOperand(right));
+        } else {
+          __ orp(ToRegister(left), ToOperand(right));
+        }
+        break;
+      case Token::BIT_XOR:
+        if (instr->IsInteger32()) {
+          __ xorl(ToRegister(left), ToOperand(right));
+        } else {
+          __ xorp(ToRegister(left), ToOperand(right));
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    DCHECK(right->IsRegister());
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        if (instr->IsInteger32()) {
+          __ andl(ToRegister(left), ToRegister(right));
+        } else {
+          __ andp(ToRegister(left), ToRegister(right));
+        }
+        break;
+      case Token::BIT_OR:
+        if (instr->IsInteger32()) {
+          __ orl(ToRegister(left), ToRegister(right));
+        } else {
+          __ orp(ToRegister(left), ToRegister(right));
+        }
+        break;
+      case Token::BIT_XOR:
+        if (instr->IsInteger32()) {
+          __ xorl(ToRegister(left), ToRegister(right));
+        } else {
+          __ xorp(ToRegister(left), ToRegister(right));
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
+  if (right->IsRegister()) {
+    DCHECK(ToRegister(right).is(rcx));
+
+    switch (instr->op()) {
+      case Token::ROR:
+        __ rorl_cl(ToRegister(left));
+        break;
+      case Token::SAR:
+        __ sarl_cl(ToRegister(left));
+        break;
+      case Token::SHR:
+        __ shrl_cl(ToRegister(left));
+        if (instr->can_deopt()) {
+          __ testl(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case Token::SHL:
+        __ shll_cl(ToRegister(left));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int32_t value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count != 0) {
+          __ rorl(ToRegister(left), Immediate(shift_count));
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sarl(ToRegister(left), Immediate(shift_count));
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ shrl(ToRegister(left), Immediate(shift_count));
+        } else if (instr->can_deopt()) {
+          __ testl(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          if (instr->hydrogen_value()->representation().IsSmi()) {
+            if (SmiValuesAre32Bits()) {
+              __ shlp(ToRegister(left), Immediate(shift_count));
+            } else {
+              DCHECK(SmiValuesAre31Bits());
+              if (instr->can_deopt()) {
+                if (shift_count != 1) {
+                  __ shll(ToRegister(left), Immediate(shift_count - 1));
+                }
+                __ Integer32ToSmi(ToRegister(left), ToRegister(left));
+                DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+              } else {
+                __ shll(ToRegister(left), Immediate(shift_count));
+              }
+            }
+          } else {
+            __ shll(ToRegister(left), Immediate(shift_count));
+          }
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    int32_t right_operand =
+        ToRepresentation(LConstantOperand::cast(right),
+                         instr->hydrogen()->right()->representation());
+    __ subl(ToRegister(left), Immediate(right_operand));
+  } else if (right->IsRegister()) {
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+      __ subp(ToRegister(left), ToRegister(right));
+    } else {
+      __ subl(ToRegister(left), ToRegister(right));
+    }
+  } else {
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+      __ subp(ToRegister(left), ToOperand(right));
+    } else {
+      __ subl(ToRegister(left), ToOperand(right));
+    }
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  Register dst = ToRegister(instr->result());
+  if (instr->value() == 0) {
+    __ xorl(dst, dst);
+  } else {
+    __ movl(dst, Immediate(instr->value()));
+  }
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ Move(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  __ Move(ToDoubleRegister(instr->result()), instr->bits());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ LoadAddress(ToRegister(instr->result()), instr->value());
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ Move(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+                                        LOperand* index,
+                                        String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldOperand(string, SeqString::kHeaderSize + offset);
+  }
+  return FieldOperand(
+      string, ToRegister(index),
+      encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+      SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register result = ToRegister(instr->result());
+  Register string = ToRegister(instr->string());
+
+  if (FLAG_debug_code) {
+    __ Push(string);
+    __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
+    __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
+
+    __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+                              ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(equal, kUnexpectedStringType);
+    __ Pop(string);
+  }
+
+  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ movzxbl(result, operand);
+  } else {
+    __ movzxwl(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+
+  if (FLAG_debug_code) {
+    Register value = ToRegister(instr->value());
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+  }
+
+  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (instr->value()->IsConstantOperand()) {
+    int value = ToInteger32(LConstantOperand::cast(instr->value()));
+    DCHECK_LE(0, value);
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      DCHECK_LE(value, String::kMaxOneByteCharCode);
+      __ movb(operand, Immediate(value));
+    } else {
+      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
+      __ movw(operand, Immediate(value));
+    }
+  } else {
+    Register value = ToRegister(instr->value());
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      __ movb(operand, value);
+    } else {
+      __ movw(operand, value);
+    }
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+
+  Representation target_rep = instr->hydrogen()->representation();
+  bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
+
+  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+    if (right->IsConstantOperand()) {
+      // No support for smi-immediates for 32-bit SMI.
+      DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+      int32_t offset =
+          ToRepresentation(LConstantOperand::cast(right),
+                           instr->hydrogen()->right()->representation());
+      if (is_p) {
+        __ leap(ToRegister(instr->result()),
+                MemOperand(ToRegister(left), offset));
+      } else {
+        __ leal(ToRegister(instr->result()),
+                MemOperand(ToRegister(left), offset));
+      }
+    } else {
+      Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+      if (is_p) {
+        __ leap(ToRegister(instr->result()), address);
+      } else {
+        __ leal(ToRegister(instr->result()), address);
+      }
+    }
+  } else {
+    if (right->IsConstantOperand()) {
+      // No support for smi-immediates for 32-bit SMI.
+      DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+      int32_t right_operand =
+          ToRepresentation(LConstantOperand::cast(right),
+                           instr->hydrogen()->right()->representation());
+      if (is_p) {
+        __ addp(ToRegister(left), Immediate(right_operand));
+      } else {
+        __ addl(ToRegister(left), Immediate(right_operand));
+      }
+    } else if (right->IsRegister()) {
+      if (is_p) {
+        __ addp(ToRegister(left), ToRegister(right));
+      } else {
+        __ addl(ToRegister(left), ToRegister(right));
+      }
+    } else {
+      if (is_p) {
+        __ addp(ToRegister(left), ToOperand(right));
+      } else {
+        __ addl(ToRegister(left), ToOperand(right));
+      }
+    }
+    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    }
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Label return_left;
+    Condition condition = (operation == HMathMinMax::kMathMin)
+        ? less_equal
+        : greater_equal;
+    Register left_reg = ToRegister(left);
+    if (right->IsConstantOperand()) {
+      Immediate right_imm = Immediate(
+          ToRepresentation(LConstantOperand::cast(right),
+                           instr->hydrogen()->right()->representation()));
+      DCHECK(SmiValuesAre32Bits()
+          ? !instr->hydrogen()->representation().IsSmi()
+          : SmiValuesAre31Bits());
+      __ cmpl(left_reg, right_imm);
+      __ j(condition, &return_left, Label::kNear);
+      __ movp(left_reg, right_imm);
+    } else if (right->IsRegister()) {
+      Register right_reg = ToRegister(right);
+      if (instr->hydrogen_value()->representation().IsSmi()) {
+        __ cmpp(left_reg, right_reg);
+      } else {
+        __ cmpl(left_reg, right_reg);
+      }
+      __ j(condition, &return_left, Label::kNear);
+      __ movp(left_reg, right_reg);
+    } else {
+      Operand right_op = ToOperand(right);
+      if (instr->hydrogen_value()->representation().IsSmi()) {
+        __ cmpp(left_reg, right_op);
+      } else {
+        __ cmpl(left_reg, right_op);
+      }
+      __ j(condition, &return_left, Label::kNear);
+      __ movp(left_reg, right_op);
+    }
+    __ bind(&return_left);
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    Label not_nan, distinct, return_left, return_right;
+    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+    XMMRegister left_reg = ToDoubleRegister(left);
+    XMMRegister right_reg = ToDoubleRegister(right);
+    __ Ucomisd(left_reg, right_reg);
+    __ j(parity_odd, &not_nan, Label::kNear);  // Both are not NaN.
+
+    // One of the numbers is NaN. Find which one and return it.
+    __ Ucomisd(left_reg, left_reg);
+    __ j(parity_even, &return_left, Label::kNear);  // left is NaN.
+    __ jmp(&return_right, Label::kNear);            // right is NaN.
+
+    __ bind(&not_nan);
+    __ j(not_equal, &distinct, Label::kNear);  // left != right.
+
+    // left == right
+    XMMRegister xmm_scratch = double_scratch0();
+    __ Xorpd(xmm_scratch, xmm_scratch);
+    __ Ucomisd(left_reg, xmm_scratch);
+    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
+
+    // At this point, both left and right are either +0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      __ Orpd(left_reg, right_reg);
+    } else {
+      __ Andpd(left_reg, right_reg);
+    }
+    __ jmp(&return_left, Label::kNear);
+
+    __ bind(&distinct);
+    __ j(condition, &return_left, Label::kNear);
+
+    __ bind(&return_right);
+    __ Movapd(left_reg, right_reg);
+
+    __ bind(&return_left);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  XMMRegister left = ToDoubleRegister(instr->left());
+  XMMRegister right = ToDoubleRegister(instr->right());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vaddsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ addsd(left, right);
+      }
+      break;
+    case Token::SUB:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vsubsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ subsd(left, right);
+      }
+       break;
+    case Token::MUL:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vmulsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ mulsd(left, right);
+      }
+      break;
+    case Token::DIV:
+      if (CpuFeatures::IsSupported(AVX)) {
+        CpuFeatureScope scope(masm(), AVX);
+        __ vdivsd(result, left, right);
+      } else {
+        DCHECK(result.is(left));
+        __ divsd(left, right);
+      }
+      // Don't delete this mov. It may improve performance on some CPUs,
+      // when there is a (v)mulsd depending on the result
+      __ Movapd(result, result);
+      break;
+    case Token::MOD: {
+      XMMRegister xmm_scratch = double_scratch0();
+      __ PrepareCallCFunction(2);
+      __ Movapd(xmm_scratch, left);
+      DCHECK(right.is(xmm1));
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()), 2);
+      __ Movapd(result, xmm_scratch);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->left()).is(rdx));
+  DCHECK(ToRegister(instr->right()).is(rax));
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block || cc == no_condition) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+    if (cc != always) {
+      __ jmp(chunk_->GetAssemblyLabel(right_block));
+    }
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
+  int true_block = instr->TrueDestination(chunk_);
+  __ j(cc, chunk_->GetAssemblyLabel(true_block));
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ j(cc, chunk_->GetAssemblyLabel(false_block));
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ int3();
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ testl(reg, reg);
+    EmitBranch(instr, not_zero);
+  } else if (r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ testp(reg, reg);
+    EmitBranch(instr, not_zero);
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    XMMRegister reg = ToDoubleRegister(instr->value());
+    XMMRegister xmm_scratch = double_scratch0();
+    __ Xorpd(xmm_scratch, xmm_scratch);
+    __ Ucomisd(reg, xmm_scratch);
+    EmitBranch(instr, not_equal);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, equal);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      __ SmiCompare(reg, Smi::FromInt(0));
+      EmitBranch(instr, not_equal);
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, no_condition);
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      XMMRegister xmm_scratch = double_scratch0();
+      __ Xorpd(xmm_scratch, xmm_scratch);
+      __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+      EmitBranch(instr, not_equal);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+      EmitBranch(instr, not_equal);
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // true -> true.
+        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+        __ j(equal, instr->TrueLabel(chunk_));
+        // false -> false.
+        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ CompareRoot(reg, Heap::kNullValueRootIndex);
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Cmp(reg, Smi::FromInt(0));
+        __ j(equal, instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ testb(reg, Immediate(kSmiTagMask));
+        DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+      }
+
+      const Register map = kScratchRegister;
+      if (expected.NeedsMap()) {
+        __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ testb(FieldOperand(map, Map::kBitFieldOffset),
+                   Immediate(1 << Map::kIsUndetectable));
+          __ j(not_zero, instr->FalseLabel(chunk_));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
+        __ j(above_equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+        __ j(above_equal, &not_string, Label::kNear);
+        __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+        __ j(not_zero, instr->TrueLabel(chunk_));
+        __ jmp(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CmpInstanceType(map, SYMBOL_TYPE);
+        __ j(equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
+        __ j(equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+        __ j(not_equal, &not_heap_number, Label::kNear);
+        XMMRegister xmm_scratch = double_scratch0();
+        __ Xorpd(xmm_scratch, xmm_scratch);
+        __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
+        __ j(zero, instr->FalseLabel(chunk_));
+        __ jmp(instr->TrueLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = equal;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = not_equal;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? below : less;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? above : greater;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? below_equal : less_equal;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? above_equal : greater_equal;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->is_double() ||
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cc = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Don't base result on EFLAGS when a NaN is involved. Instead
+      // jump to the false block.
+      __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
+      __ j(parity_even, instr->FalseLabel(chunk_));
+    } else {
+      int32_t value;
+      if (right->IsConstantOperand()) {
+        value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          __ Cmp(ToRegister(left), Smi::FromInt(value));
+        } else {
+          __ cmpl(ToRegister(left), Immediate(value));
+        }
+      } else if (left->IsConstantOperand()) {
+        value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          if (right->IsRegister()) {
+            __ Cmp(ToRegister(right), Smi::FromInt(value));
+          } else {
+            __ Cmp(ToOperand(right), Smi::FromInt(value));
+          }
+        } else if (right->IsRegister()) {
+          __ cmpl(ToRegister(right), Immediate(value));
+        } else {
+          __ cmpl(ToOperand(right), Immediate(value));
+        }
+        // We commuted the operands, so commute the condition.
+        cc = CommuteCondition(cc);
+      } else if (instr->hydrogen_value()->representation().IsSmi()) {
+        if (right->IsRegister()) {
+          __ cmpp(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmpp(ToRegister(left), ToOperand(right));
+        }
+      } else {
+        if (right->IsRegister()) {
+          __ cmpl(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmpl(ToRegister(left), ToOperand(right));
+        }
+      }
+    }
+    EmitBranch(instr, cc);
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+
+  if (instr->right()->IsConstantOperand()) {
+    Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+    __ Cmp(left, right);
+  } else {
+    Register right = ToRegister(instr->right());
+    __ cmpp(left, right);
+  }
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ Cmp(input_reg, factory()->the_hole_value());
+    EmitBranch(instr, equal);
+    return;
+  }
+
+  XMMRegister input_reg = ToDoubleRegister(instr->object());
+  __ Ucomisd(input_reg, input_reg);
+  EmitFalseBranch(instr, parity_odd);
+
+  __ subp(rsp, Immediate(kDoubleSize));
+  __ Movsd(MemOperand(rsp, 0), input_reg);
+  __ addp(rsp, Immediate(kDoubleSize));
+
+  int offset = sizeof(kHoleNanUpper32);
+  __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+
+  if (rep.IsDouble()) {
+    XMMRegister value = ToDoubleRegister(instr->value());
+    XMMRegister xmm_scratch = double_scratch0();
+    __ Xorpd(xmm_scratch, xmm_scratch);
+    __ Ucomisd(xmm_scratch, value);
+    EmitFalseBranch(instr, not_equal);
+    __ Movmskpd(kScratchRegister, value);
+    __ testl(kScratchRegister, Immediate(1));
+    EmitBranch(instr, not_zero);
+  } else {
+    Register value = ToRegister(instr->value());
+    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+    __ cmpl(FieldOperand(value, HeapNumber::kExponentOffset),
+            Immediate(0x1));
+    EmitFalseBranch(instr, no_overflow);
+    __ cmpl(FieldOperand(value, HeapNumber::kMantissaOffset),
+            Immediate(0x00000000));
+    EmitBranch(instr, equal);
+  }
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+
+  Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+  Condition true_cond = EmitIsString(
+      reg, temp, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Condition is_smi;
+  if (instr->value()->IsRegister()) {
+    Register input = ToRegister(instr->value());
+    is_smi = masm()->CheckSmi(input);
+  } else {
+    Operand input = ToOperand(instr->value());
+    is_smi = masm()->CheckSmi(input);
+  }
+  EmitBranch(instr, is_smi);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ testb(FieldOperand(temp, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsUndetectable));
+  EmitBranch(instr, not_zero);
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->left()).is(rdx));
+  DCHECK(ToRegister(instr->right()).is(rax));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  __ testp(rax, rax);
+
+  EmitBranch(instr, TokenToCondition(instr->op(), false));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return equal;
+  if (to == LAST_TYPE) return above_equal;
+  if (from == FIRST_TYPE) return below_equal;
+  UNREACHABLE();
+  return equal;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ movl(result, FieldOperand(input, String::kHashFieldOffset));
+  DCHECK(String::kHashShift >= kSmiTagSize);
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  __ testl(FieldOperand(input, String::kHashFieldOffset),
+           Immediate(String::kContainsCachedArrayIndexMask));
+  EmitBranch(instr, equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.
+// Trashes the temp register.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String> class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ j(equal, is_true);
+  } else {
+    __ j(equal, is_false);
+  }
+
+  // Check if the constructor in the map is a function.
+  __ GetMapConstructor(temp, temp, kScratchRegister);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
+    __ j(not_equal, is_true);
+  } else {
+    __ j(not_equal, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ movp(temp, FieldOperand(temp,
+                             SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  DCHECK(class_name->IsInternalizedString());
+  __ Cmp(temp, class_name);
+  // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+      class_name, input, temp, temp2);
+
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+
+  __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(rax));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = kScratchRegister;
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    Condition is_smi = __ CheckSmi(object);
+    EmitFalseBranch(instr, is_smi);
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+
+  // Deoptimize if the object needs to be access checked.
+  __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded));
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+  // Deoptimize for proxies.
+  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+  DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
+  __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+  __ cmpp(object_prototype, prototype);
+  EmitTrueBranch(instr, equal);
+  __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
+  EmitFalseBranch(instr, equal);
+  __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+  __ jmp(&loop);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = TokenToCondition(op, false);
+  Label true_value, done;
+  __ testp(rax, rax);
+  __ j(condition, &true_value, Label::kNear);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  __ jmp(&done, Label::kNear);
+  __ bind(&true_value);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Preserve the return value on the stack and rely on the runtime call
+    // to return the value in the same register.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ Push(rax);
+    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  if (NeedsEagerFrame()) {
+    __ movp(rsp, rbp);
+    __ popq(rbp);
+  }
+  if (instr->has_constant_parameter_count()) {
+    __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
+           rcx);
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    __ SmiToInteger32(reg, reg);
+    Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
+    __ PopReturnAddressTo(return_addr_reg);
+    __ shlp(reg, Immediate(kPointerSizeLog2));
+    __ addp(rsp, reg);
+    __ jmp(return_addr_reg);
+  }
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(rax));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ Move(slot_register, Smi::FromInt(index));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ Move(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ Move(slot_register, Smi::FromInt(index));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  __ Move(LoadDescriptor::NameRegister(), instr->name());
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ movp(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    } else {
+      Label is_not_hole;
+      __ j(not_equal, &is_not_hole, Label::kNear);
+      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+      __ bind(&is_not_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+
+  Operand target = ContextOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    } else {
+      __ j(not_equal, &skip_assignment);
+    }
+  }
+  __ movp(target, value);
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    int offset = Context::SlotOffset(instr->slot_index());
+    Register scratch = ToRegister(instr->temp());
+    __ RecordWriteContextSlot(context,
+                              offset,
+                              value,
+                              scratch,
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    if (instr->object()->IsConstantOperand()) {
+      DCHECK(result.is(rax));
+      __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
+    } else {
+      Register object = ToRegister(instr->object());
+      __ Load(result, MemOperand(object, offset), access.representation());
+    }
+    return;
+  }
+
+  Register object = ToRegister(instr->object());
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DCHECK(access.IsInobject());
+    XMMRegister result = ToDoubleRegister(instr->result());
+    __ Movsd(result, FieldOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+
+  Representation representation = access.representation();
+  if (representation.IsSmi() && SmiValuesAre32Bits() &&
+      instr->hydrogen()->representation().IsInteger32()) {
+    if (FLAG_debug_code) {
+      Register scratch = kScratchRegister;
+      __ Load(scratch, FieldOperand(object, offset), representation);
+      __ AssertSmi(scratch);
+    }
+
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+    representation = Representation::Integer32();
+  }
+  __ Load(result, FieldOperand(object, offset), representation);
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  __ Move(LoadDescriptor::NameRegister(), instr->name());
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ movp(result,
+         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
+  __ j(not_equal, &done, Label::kNear);
+
+  // Get the prototype from the initial map.
+  __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+
+  if (instr->length()->IsConstantOperand() &&
+      instr->index()->IsConstantOperand()) {
+    int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (const_index >= 0 && const_index < const_length) {
+      StackArgumentsAccessor args(arguments, const_length,
+                                  ARGUMENTS_DONT_CONTAIN_RECEIVER);
+      __ movp(result, args.GetArgumentOperand(const_index));
+    } else if (FLAG_debug_code) {
+      __ int3();
+    }
+  } else {
+    Register length = ToRegister(instr->length());
+    // There are two words between the frame pointer and the last argument.
+    // Subtracting from length accounts for one of them add one more.
+    if (instr->index()->IsRegister()) {
+      __ subl(length, ToRegister(instr->index()));
+    } else {
+      __ subl(length, ToOperand(instr->index()));
+    }
+    StackArgumentsAccessor args(arguments, length,
+                                ARGUMENTS_DONT_CONTAIN_RECEIVER);
+    __ movp(result, args.GetArgumentOperand(0));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
+    Register key_reg = ToRegister(key);
+    Representation key_representation =
+        instr->hydrogen()->key()->representation();
+    if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+      __ SmiToInteger64(key_reg, key_reg);
+    } else if (instr->hydrogen()->IsDehoisted()) {
+      // Sign extend key because it could be a 32 bit negative value
+      // and the dehoisted address computation happens in 64 bits
+      __ movsxlq(key_reg, key_reg);
+    }
+  }
+  Operand operand(BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      elements_kind,
+      instr->base_offset()));
+
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    XMMRegister result(ToDoubleRegister(instr->result()));
+    __ Cvtss2sd(result, operand);
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    __ Movsd(ToDoubleRegister(instr->result()), operand);
+  } else {
+    Register result(ToRegister(instr->result()));
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ movsxbl(result, operand);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ movzxbl(result, operand);
+        break;
+      case INT16_ELEMENTS:
+        __ movsxwl(result, operand);
+        break;
+      case UINT16_ELEMENTS:
+        __ movzxwl(result, operand);
+        break;
+      case INT32_ELEMENTS:
+        __ movl(result, operand);
+        break;
+      case UINT32_ELEMENTS:
+        __ movl(result, operand);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          __ testl(result, result);
+          DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  XMMRegister result(ToDoubleRegister(instr->result()));
+  LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    Operand hole_check_operand = BuildFastArrayOperand(
+        instr->elements(),
+        key,
+        instr->hydrogen()->key()->representation(),
+        FAST_DOUBLE_ELEMENTS,
+        instr->base_offset() + sizeof(kHoleNanLower32));
+    __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
+    DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+  }
+
+  Operand double_load_operand = BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      FAST_DOUBLE_ELEMENTS,
+      instr->base_offset());
+  __ Movsd(result, double_load_operand);
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  HLoadKeyed* hinstr = instr->hydrogen();
+  Register result = ToRegister(instr->result());
+  LOperand* key = instr->key();
+  bool requires_hole_check = hinstr->RequiresHoleCheck();
+  Representation representation = hinstr->representation();
+  int offset = instr->base_offset();
+
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
+  if (representation.IsInteger32() && SmiValuesAre32Bits() &&
+      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+    DCHECK(!requires_hole_check);
+    if (FLAG_debug_code) {
+      Register scratch = kScratchRegister;
+      __ Load(scratch,
+              BuildFastArrayOperand(instr->elements(),
+                                    key,
+                                    instr->hydrogen()->key()->representation(),
+                                    FAST_ELEMENTS,
+                                    offset),
+              Representation::Smi());
+      __ AssertSmi(scratch);
+    }
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+  }
+
+  __ Load(result,
+          BuildFastArrayOperand(instr->elements(), key,
+                                instr->hydrogen()->key()->representation(),
+                                FAST_ELEMENTS, offset),
+          representation);
+
+  // Check for the hole value.
+  if (requires_hole_check) {
+    if (IsFastSmiElementsKind(hinstr->elements_kind())) {
+      Condition smi = __ CheckSmi(result);
+      DeoptimizeIf(NegateCondition(smi), instr, Deoptimizer::kNotASmi);
+    } else {
+      __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    }
+  } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &done);
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
+      __ Cmp(FieldOperand(result, Cell::kValueOffset),
+             Smi::FromInt(Isolate::kArrayProtectorValid));
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
+    }
+    __ Move(result, isolate()->factory()->undefined_value());
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+    LOperand* elements_pointer,
+    LOperand* key,
+    Representation key_representation,
+    ElementsKind elements_kind,
+    uint32_t offset) {
+  Register elements_pointer_reg = ToRegister(elements_pointer);
+  int shift_size = ElementsKindToShiftSize(elements_kind);
+  if (key->IsConstantOperand()) {
+    int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
+    if (constant_value & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    return Operand(elements_pointer_reg,
+                   (constant_value << shift_size) + offset);
+  } else {
+    // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
+    DCHECK(key_representation.IsInteger32());
+
+    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+    return Operand(elements_pointer_reg,
+                   ToRegister(key),
+                   scale_factor,
+                   offset);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
+  } else {
+    // Check for arguments adapter frame.
+    Label done, adapted;
+    __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+    __ Cmp(Operand(result, StandardFrameConstants::kContextOffset),
+           Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+    __ j(equal, &adapted, Label::kNear);
+
+    // No arguments adaptor frame.
+    __ movp(result, rbp);
+    __ jmp(&done, Label::kNear);
+
+    // Arguments adaptor frame present.
+    __ bind(&adapted);
+    __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  if (instr->elements()->IsRegister()) {
+    __ cmpp(rbp, ToRegister(instr->elements()));
+  } else {
+    __ cmpp(rbp, ToOperand(instr->elements()));
+  }
+  __ movl(result, Immediate(scope()->num_parameters()));
+  __ j(equal, &done, Label::kNear);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
+  __ SmiToInteger32(result,
+                    Operand(result,
+                            ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, receiver_ok;
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions.
+    __ movp(kScratchRegister,
+            FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ testb(FieldOperand(kScratchRegister,
+                          SharedFunctionInfo::kStrictModeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+    __ j(not_equal, &receiver_ok, dist);
+
+    // Do not transform the receiver to object for builtins.
+    __ testb(FieldOperand(kScratchRegister,
+                          SharedFunctionInfo::kNativeByteOffset),
+             Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ j(not_equal, &receiver_ok, dist);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ CompareRoot(receiver, Heap::kNullValueRootIndex);
+  __ j(equal, &global_object, Label::kNear);
+  __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
+  __ j(equal, &global_object, Label::kNear);
+
+  // The receiver should be a JS object.
+  Condition is_smi = __ CheckSmi(receiver);
+  DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
+  __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
+  DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+
+  __ jmp(&receiver_ok, Label::kNear);
+  __ bind(&global_object);
+  __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
+  __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+  __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
+
+  __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  DCHECK(receiver.is(rax));  // Used for parameter count.
+  DCHECK(function.is(rdi));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmpp(length, Immediate(kArgumentsLimit));
+  DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+
+  __ Push(receiver);
+  __ movp(receiver, length);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ testl(length, length);
+  __ j(zero, &invoke, Label::kNear);
+  __ bind(&loop);
+  StackArgumentsAccessor args(elements, length,
+                              ARGUMENTS_DONT_CONTAIN_RECEIVER);
+  __ Push(args.GetArgumentOperand(0));
+  __ decl(length);
+  __ j(not_zero, &loop);
+
+  // Invoke the function.
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount actual(rax);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in rsi.
+    DCHECK(result.is(rsi));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  __ Push(instr->hydrogen()->pairs());
+  __ Push(Smi::FromInt(instr->hydrogen()->flags()));
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = rdi;
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+    __ Set(rax, arity);
+
+    // Invoke function.
+    if (function.is_identical_to(info()->closure())) {
+      __ CallSelf();
+    } else {
+      __ Call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+    }
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
+  } else {
+    // We need to adapt arguments.
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, no_reg, expected, count, CALL_FUNCTION,
+                      generator);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ jmp(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ jmp(target);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code));
+      __ call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ call(target);
+    }
+    generator.AfterCall();
+  }
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(rdi));
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  // Change context.
+  __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
+  __ Set(rax, instr->arity());
+
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+  bool is_self_call = false;
+  if (instr->hydrogen()->function()->IsConstant()) {
+    Handle<JSFunction> jsfun = Handle<JSFunction>::null();
+    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+    jsfun = Handle<JSFunction>::cast(fun_const->handle(isolate()));
+    is_self_call = jsfun.is_identical_to(info()->closure());
+  }
+
+  if (is_self_call) {
+    __ CallSelf();
+  } else {
+    Operand target = FieldOperand(rdi, JSFunction::kCodeEntryOffset);
+    generator.BeforeCall(__ CallSize(target));
+    __ Call(target);
+  }
+  generator.AfterCall();
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  Register input_reg = ToRegister(instr->value());
+  __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+                 Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+
+  Label slow, allocated, done;
+  Register tmp = input_reg.is(rax) ? rcx : rax;
+  Register tmp2 = tmp.is(rcx) ? rdx : input_reg.is(rcx) ? rdx : rcx;
+
+  // Preserve the value of all registers.
+  PushSafepointRegistersScope scope(this);
+
+  __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it. We do not need to patch the stack since |input| and
+  // |result| are the same register and |input| will be restored
+  // unchanged by popping safepoint registers.
+  __ testl(tmp, Immediate(HeapNumber::kSignMask));
+  __ j(zero, &done);
+
+  __ AllocateHeapNumber(tmp, tmp2, &slow);
+  __ jmp(&allocated, Label::kNear);
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateHeapNumber, 0, instr, instr->context());
+  // Set the pointer to the new heap number in tmp.
+  if (!tmp.is(rax)) __ movp(tmp, rax);
+  // Restore input_reg after call to runtime.
+  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+  __ bind(&allocated);
+  __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ shlq(tmp2, Immediate(1));
+  __ shrq(tmp2, Immediate(1));
+  __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
+  __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+  Register input_reg = ToRegister(instr->value());
+  __ testl(input_reg, input_reg);
+  Label is_positive;
+  __ j(not_sign, &is_positive, Label::kNear);
+  __ negl(input_reg);  // Sets flags.
+  DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+  __ bind(&is_positive);
+}
+
+
+void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
+  Register input_reg = ToRegister(instr->value());
+  __ testp(input_reg, input_reg);
+  Label is_positive;
+  __ j(not_sign, &is_positive, Label::kNear);
+  __ negp(input_reg);  // Sets flags.
+  DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+  __ bind(&is_positive);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  DCHECK(instr->value()->Equals(instr->result()));
+  Representation r = instr->hydrogen()->value()->representation();
+
+  if (r.IsDouble()) {
+    XMMRegister scratch = double_scratch0();
+    XMMRegister input_reg = ToDoubleRegister(instr->value());
+    __ Xorpd(scratch, scratch);
+    __ Subsd(scratch, input_reg);
+    __ Andpd(input_reg, scratch);
+  } else if (r.IsInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else if (r.IsSmi()) {
+    EmitSmiMathAbs(instr);
+  } else {  // Tagged case.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input_reg = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+    EmitSmiMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  XMMRegister xmm_scratch = double_scratch0();
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+
+  if (CpuFeatures::IsSupported(SSE4_1)) {
+    CpuFeatureScope scope(masm(), SSE4_1);
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Deoptimize if minus zero.
+      __ Movq(output_reg, input_reg);
+      __ subq(output_reg, Immediate(1));
+      DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero);
+    }
+    __ Roundsd(xmm_scratch, input_reg, kRoundDown);
+    __ Cvttsd2si(output_reg, xmm_scratch);
+    __ cmpl(output_reg, Immediate(0x1));
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  } else {
+    Label negative_sign, done;
+    // Deoptimize on unordered.
+    __ Xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
+    __ Ucomisd(input_reg, xmm_scratch);
+    DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+    __ j(below, &negative_sign, Label::kNear);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // Check for negative zero.
+      Label positive_sign;
+      __ j(above, &positive_sign, Label::kNear);
+      __ Movmskpd(output_reg, input_reg);
+      __ testl(output_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+      __ Set(output_reg, 0);
+      __ jmp(&done);
+      __ bind(&positive_sign);
+    }
+
+    // Use truncating instruction (OK because input is positive).
+    __ Cvttsd2si(output_reg, input_reg);
+    // Overflow is signalled with minint.
+    __ cmpl(output_reg, Immediate(0x1));
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    __ jmp(&done, Label::kNear);
+
+    // Non-zero negative reaches here.
+    __ bind(&negative_sign);
+    // Truncate, then compare and compensate.
+    __ Cvttsd2si(output_reg, input_reg);
+    __ Cvtlsi2sd(xmm_scratch, output_reg);
+    __ Ucomisd(input_reg, xmm_scratch);
+    __ j(equal, &done, Label::kNear);
+    __ subl(output_reg, Immediate(1));
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  const XMMRegister xmm_scratch = double_scratch0();
+  Register output_reg = ToRegister(instr->result());
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister input_temp = ToDoubleRegister(instr->temp());
+  static int64_t one_half = V8_INT64_C(0x3FE0000000000000);  // 0.5
+  static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000);  // -0.5
+
+  Label done, round_to_zero, below_one_half;
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+  __ movq(kScratchRegister, one_half);
+  __ Movq(xmm_scratch, kScratchRegister);
+  __ Ucomisd(xmm_scratch, input_reg);
+  __ j(above, &below_one_half, Label::kNear);
+
+  // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
+  __ Addsd(xmm_scratch, input_reg);
+  __ Cvttsd2si(output_reg, xmm_scratch);
+  // Overflow is signalled with minint.
+  __ cmpl(output_reg, Immediate(0x1));
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  __ jmp(&done, dist);
+
+  __ bind(&below_one_half);
+  __ movq(kScratchRegister, minus_one_half);
+  __ Movq(xmm_scratch, kScratchRegister);
+  __ Ucomisd(xmm_scratch, input_reg);
+  __ j(below_equal, &round_to_zero, Label::kNear);
+
+  // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
+  // compare and compensate.
+  __ Movapd(input_temp, input_reg);  // Do not alter input_reg.
+  __ Subsd(input_temp, xmm_scratch);
+  __ Cvttsd2si(output_reg, input_temp);
+  // Catch minint due to overflow, and to prevent overflow when compensating.
+  __ cmpl(output_reg, Immediate(0x1));
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+
+  __ Cvtlsi2sd(xmm_scratch, output_reg);
+  __ Ucomisd(xmm_scratch, input_temp);
+  __ j(equal, &done, dist);
+  __ subl(output_reg, Immediate(1));
+  // No overflow because we already ruled out minint.
+  __ jmp(&done, dist);
+
+  __ bind(&round_to_zero);
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Movq(output_reg, input_reg);
+    __ testq(output_reg, output_reg);
+    DeoptimizeIf(negative, instr, Deoptimizer::kMinusZero);
+  }
+  __ Set(output_reg, 0);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  __ Cvtsd2ss(output_reg, input_reg);
+  __ Cvtss2sd(output_reg, output_reg);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  XMMRegister output = ToDoubleRegister(instr->result());
+  if (instr->value()->IsDoubleRegister()) {
+    XMMRegister input = ToDoubleRegister(instr->value());
+    __ Sqrtsd(output, input);
+  } else {
+    Operand input = ToOperand(instr->value());
+    __ Sqrtsd(output, input);
+  }
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  XMMRegister xmm_scratch = double_scratch0();
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity.  According to IEEE-754, double-precision
+  // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
+  __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
+  __ Movq(xmm_scratch, kScratchRegister);
+  __ Ucomisd(xmm_scratch, input_reg);
+  // Comparing -Infinity with NaN results in "unordered", which sets the
+  // zero flag as if both were equal.  However, it also sets the carry flag.
+  __ j(not_equal, &sqrt, Label::kNear);
+  __ j(carry, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ Xorpd(input_reg, input_reg);
+  __ Subsd(input_reg, xmm_scratch);
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
+  __ Xorpd(xmm_scratch, xmm_scratch);
+  __ Addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
+  __ Sqrtsd(input_reg, input_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(xmm1));
+  DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
+    __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  XMMRegister input = ToDoubleRegister(instr->value());
+  XMMRegister result = ToDoubleRegister(instr->result());
+  XMMRegister temp0 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  DCHECK(instr->value()->Equals(instr->result()));
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister xmm_scratch = double_scratch0();
+  Label positive, done, zero;
+  __ Xorpd(xmm_scratch, xmm_scratch);
+  __ Ucomisd(input_reg, xmm_scratch);
+  __ j(above, &positive, Label::kNear);
+  __ j(not_carry, &zero, Label::kNear);
+  __ Pcmpeqd(input_reg, input_reg);
+  __ jmp(&done, Label::kNear);
+  __ bind(&zero);
+  ExternalReference ninf =
+      ExternalReference::address_of_negative_infinity();
+  Operand ninf_operand = masm()->ExternalOperand(ninf);
+  __ Movsd(input_reg, ninf_operand);
+  __ jmp(&done, Label::kNear);
+  __ bind(&positive);
+  __ fldln2();
+  __ subp(rsp, Immediate(kDoubleSize));
+  __ Movsd(Operand(rsp, 0), input_reg);
+  __ fld_d(Operand(rsp, 0));
+  __ fyl2x();
+  __ fstp_d(Operand(rsp, 0));
+  __ Movsd(input_reg, Operand(rsp, 0));
+  __ addp(rsp, Immediate(kDoubleSize));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ Lzcntl(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->function()).is(rdi));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(rdi, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->function()).is(rdi));
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(rdx));
+    DCHECK(vector_register.is(rbx));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ Move(vector_register, vector);
+    __ Move(slot_register, Smi::FromInt(index));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ Set(rax, arity);
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->constructor()).is(rdi));
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  __ Set(rax, instr->arity());
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ Move(rbx, instr->hydrogen()->site());
+  } else {
+    __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+  }
+
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here
+      // look at the first argument
+      __ movp(rcx, Operand(rsp, 0));
+      __ testp(rcx, rcx);
+      __ j(zero, &packed_case, Label::kNear);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ jmp(&done, Label::kNear);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
+  __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ leap(result, Operand(base, ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ leap(result, Operand(base, offset, times_1, 0));
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  HStoreNamedField* hinstr = instr->hydrogen();
+  Representation representation = instr->representation();
+
+  HObjectAccess access = hinstr->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    Register value = ToRegister(instr->value());
+    if (instr->object()->IsConstantOperand()) {
+      DCHECK(value.is(rax));
+      LConstantOperand* object = LConstantOperand::cast(instr->object());
+      __ store_rax(ToExternalReference(object));
+    } else {
+      Register object = ToRegister(instr->object());
+      __ Store(MemOperand(object, offset), value, representation);
+    }
+    return;
+  }
+
+  Register object = ToRegister(instr->object());
+  __ AssertNotSmi(object);
+
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsInteger32Constant(LConstantOperand::cast(instr->value())));
+  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!hinstr->has_transition());
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    XMMRegister value = ToDoubleRegister(instr->value());
+    __ Movsd(FieldOperand(object, offset), value);
+    return;
+  }
+
+  if (hinstr->has_transition()) {
+    Handle<Map> transition = hinstr->transition_map();
+    AddDeprecationDependency(transition);
+    if (!hinstr->NeedsWriteBarrierForMap()) {
+      __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
+    } else {
+      Register temp = ToRegister(instr->temp());
+      __ Move(kScratchRegister, transition);
+      __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object,
+                           kScratchRegister,
+                           temp,
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register write_register = object;
+  if (!access.IsInobject()) {
+    write_register = ToRegister(instr->temp());
+    __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+  }
+
+  if (representation.IsSmi() && SmiValuesAre32Bits() &&
+      hinstr->value()->representation().IsInteger32()) {
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    if (FLAG_debug_code) {
+      Register scratch = kScratchRegister;
+      __ Load(scratch, FieldOperand(write_register, offset), representation);
+      __ AssertSmi(scratch);
+    }
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+    representation = Representation::Integer32();
+  }
+
+  Operand operand = FieldOperand(write_register, offset);
+
+  if (FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    XMMRegister value = ToDoubleRegister(instr->value());
+    __ Movsd(operand, value);
+
+  } else if (instr->value()->IsRegister()) {
+    Register value = ToRegister(instr->value());
+    __ Store(operand, value, representation);
+  } else {
+    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+    if (IsInteger32Constant(operand_value)) {
+      DCHECK(!hinstr->NeedsWriteBarrier());
+      int32_t value = ToInteger32(operand_value);
+      if (representation.IsSmi()) {
+        __ Move(operand, Smi::FromInt(value));
+
+      } else {
+        __ movl(operand, Immediate(value));
+      }
+
+    } else if (IsExternalConstant(operand_value)) {
+      DCHECK(!hinstr->NeedsWriteBarrier());
+      ExternalReference ptr = ToExternalReference(operand_value);
+      __ Move(kScratchRegister, ptr);
+      __ movp(operand, kScratchRegister);
+    } else {
+      Handle<Object> handle_value = ToHandle(operand_value);
+      DCHECK(!hinstr->NeedsWriteBarrier());
+      __ Move(operand, handle_value);
+    }
+  }
+
+  if (hinstr->NeedsWriteBarrier()) {
+    Register value = ToRegister(instr->value());
+    Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
+    // Update the write barrier for the object for in-object properties.
+    __ RecordWriteField(write_register,
+                        offset,
+                        value,
+                        temp,
+                        kSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        hinstr->SmiCheckForWriteBarrier(),
+                        hinstr->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Representation representation = instr->hydrogen()->length()->representation();
+  DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+  DCHECK(representation.IsSmiOrInteger32());
+
+  Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
+  if (instr->length()->IsConstantOperand()) {
+    int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+    Register index = ToRegister(instr->index());
+    if (representation.IsSmi()) {
+      __ Cmp(index, Smi::FromInt(length));
+    } else {
+      __ cmpl(index, Immediate(length));
+    }
+    cc = CommuteCondition(cc);
+  } else if (instr->index()->IsConstantOperand()) {
+    int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+    if (instr->length()->IsRegister()) {
+      Register length = ToRegister(instr->length());
+      if (representation.IsSmi()) {
+        __ Cmp(length, Smi::FromInt(index));
+      } else {
+        __ cmpl(length, Immediate(index));
+      }
+    } else {
+      Operand length = ToOperand(instr->length());
+      if (representation.IsSmi()) {
+        __ Cmp(length, Smi::FromInt(index));
+      } else {
+        __ cmpl(length, Immediate(index));
+      }
+    }
+  } else {
+    Register index = ToRegister(instr->index());
+    if (instr->length()->IsRegister()) {
+      Register length = ToRegister(instr->length());
+      if (representation.IsSmi()) {
+        __ cmpp(length, index);
+      } else {
+        __ cmpl(length, index);
+      }
+    } else {
+      Operand length = ToOperand(instr->length());
+      if (representation.IsSmi()) {
+        __ cmpp(length, index);
+      } else {
+        __ cmpl(length, index);
+      }
+    }
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ j(NegateCondition(cc), &done, Label::kNear);
+    __ int3();
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
+    Register key_reg = ToRegister(key);
+    Representation key_representation =
+        instr->hydrogen()->key()->representation();
+    if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
+      __ SmiToInteger64(key_reg, key_reg);
+    } else if (instr->hydrogen()->IsDehoisted()) {
+      // Sign extend key because it could be a 32 bit negative value
+      // and the dehoisted address computation happens in 64 bits
+      __ movsxlq(key_reg, key_reg);
+    }
+  }
+  Operand operand(BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      elements_kind,
+      instr->base_offset()));
+
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    XMMRegister value(ToDoubleRegister(instr->value()));
+    __ Cvtsd2ss(value, value);
+    __ Movss(operand, value);
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    __ Movsd(operand, ToDoubleRegister(instr->value()));
+  } else {
+    Register value(ToRegister(instr->value()));
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ movb(operand, value);
+        break;
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        __ movw(operand, value);
+        break;
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        __ movl(operand, value);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  XMMRegister value = ToDoubleRegister(instr->value());
+  LOperand* key = instr->key();
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
+  if (instr->NeedsCanonicalization()) {
+    XMMRegister xmm_scratch = double_scratch0();
+    // Turn potential sNaN value into qNaN.
+    __ Xorpd(xmm_scratch, xmm_scratch);
+    __ Subsd(value, xmm_scratch);
+  }
+
+  Operand double_store_operand = BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      FAST_DOUBLE_ELEMENTS,
+      instr->base_offset());
+
+  __ Movsd(double_store_operand, value);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  HStoreKeyed* hinstr = instr->hydrogen();
+  LOperand* key = instr->key();
+  int offset = instr->base_offset();
+  Representation representation = hinstr->value()->representation();
+
+  if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
+      instr->hydrogen()->IsDehoisted()) {
+    // Sign extend key because it could be a 32 bit negative value
+    // and the dehoisted address computation happens in 64 bits
+    __ movsxlq(ToRegister(key), ToRegister(key));
+  }
+  if (representation.IsInteger32() && SmiValuesAre32Bits()) {
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+    if (FLAG_debug_code) {
+      Register scratch = kScratchRegister;
+      __ Load(scratch,
+              BuildFastArrayOperand(instr->elements(),
+                                    key,
+                                    instr->hydrogen()->key()->representation(),
+                                    FAST_ELEMENTS,
+                                    offset),
+              Representation::Smi());
+      __ AssertSmi(scratch);
+    }
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+  }
+
+  Operand operand =
+      BuildFastArrayOperand(instr->elements(),
+                            key,
+                            instr->hydrogen()->key()->representation(),
+                            FAST_ELEMENTS,
+                            offset);
+  if (instr->value()->IsRegister()) {
+    __ Store(operand, ToRegister(instr->value()), representation);
+  } else {
+    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+    if (IsInteger32Constant(operand_value)) {
+      int32_t value = ToInteger32(operand_value);
+      if (representation.IsSmi()) {
+        __ Move(operand, Smi::FromInt(value));
+
+      } else {
+        __ movl(operand, Immediate(value));
+      }
+    } else {
+      Handle<Object> handle_value = ToHandle(operand_value);
+      __ Move(operand, handle_value);
+    }
+  }
+
+  if (hinstr->NeedsWriteBarrier()) {
+    Register elements = ToRegister(instr->elements());
+    DCHECK(instr->value()->IsRegister());
+    Register value = ToRegister(instr->value());
+    DCHECK(!key->IsConstantOperand());
+    SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    Register key_reg(ToRegister(key));
+    __ leap(key_reg, operand);
+    __ RecordWrite(elements,
+                   key_reg,
+                   value,
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed,
+                   hinstr->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = rax;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ jmp(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
+    __ j(less_equal, deferred->entry());
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ cmpl(ToRegister(key), Immediate(constant_capacity));
+    __ j(greater_equal, deferred->entry());
+  } else {
+    __ cmpl(ToRegister(key), ToRegister(current_capacity));
+    __ j(greater_equal, deferred->entry());
+  }
+
+  if (instr->elements()->IsRegister()) {
+    __ movp(result, ToRegister(instr->elements()));
+  } else {
+    __ movp(result, ToOperand(instr->elements()));
+  }
+
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = rax;
+  __ Move(result, Smi::FromInt(0));
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsConstantOperand()) {
+      LConstantOperand* constant_object =
+          LConstantOperand::cast(instr->object());
+      if (IsSmiConstant(constant_object)) {
+        Smi* immediate = ToSmi(constant_object);
+        __ Move(result, immediate);
+      } else {
+        Handle<Object> handle_value = ToHandle(constant_object);
+        __ Move(result, handle_value);
+      }
+    } else if (instr->object()->IsRegister()) {
+      __ Move(result, ToRegister(instr->object()));
+    } else {
+      __ movp(result, ToOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
+    } else {
+      __ Move(rbx, ToRegister(key));
+      __ Integer32ToSmi(rbx, rbx);
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  Condition is_smi = __ CheckSmi(result);
+  DeoptimizeIf(is_smi, instr, Deoptimizer::kSmi);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+  __ j(not_equal, &not_applicable);
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
+    __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
+    // Write barrier.
+    __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
+                         kDontSaveFPRegs);
+  } else {
+    DCHECK(object_reg.is(rax));
+    DCHECK(ToRegister(instr->context()).is(rsi));
+    PushSafepointRegistersScope scope(this);
+    __ Move(rbx, to_map);
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+  DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->left()).is(rdx));
+  DCHECK(ToRegister(instr->right()).is(rax));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr);
+
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Set(result, 0);
+
+  PushSafepointRegistersScope scope(this);
+  __ Push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+  if (instr->index()->IsConstantOperand()) {
+    int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ Push(Smi::FromInt(const_index));
+  } else {
+    Register index = ToRegister(instr->index());
+    __ Integer32ToSmi(index, index);
+    __ Push(index);
+  }
+  CallRuntimeFromDeferred(
+      Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
+  __ AssertSmi(rax);
+  __ SmiToInteger32(rax, rax);
+  __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  DCHECK(!char_code.is(result));
+
+  __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
+  __ j(above, deferred->entry());
+  __ movsxlq(char_code, char_code);
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ movp(result, FieldOperand(result,
+                               char_code, times_pointer_size,
+                               FixedArray::kHeaderSize));
+  __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
+  __ j(equal, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Set(result, 0);
+
+  PushSafepointRegistersScope scope(this);
+  __ Integer32ToSmi(char_code, char_code);
+  __ Push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  if (input->IsRegister()) {
+    __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
+  } else {
+    __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
+  }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+
+  __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI final : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), SIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  if (SmiValuesAre32Bits()) {
+    __ Integer32ToSmi(reg, reg);
+  } else {
+    DCHECK(SmiValuesAre31Bits());
+    DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
+    __ Integer32ToSmi(reg, reg);
+    __ j(overflow, deferred->entry());
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+  __ cmpl(reg, Immediate(Smi::kMaxValue));
+  __ j(above, deferred->entry());
+  __ Integer32ToSmi(reg, reg);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+                                     LOperand* value,
+                                     LOperand* temp1,
+                                     LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register reg = ToRegister(value);
+  Register tmp = ToRegister(temp1);
+  XMMRegister temp_xmm = ToDoubleRegister(temp2);
+
+  // Load value into temp_xmm which will be preserved across potential call to
+  // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
+  // XMM registers on x64).
+  if (signedness == SIGNED_INT32) {
+    DCHECK(SmiValuesAre31Bits());
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    __ SmiToInteger32(reg, reg);
+    __ xorl(reg, Immediate(0x80000000));
+    __ Cvtlsi2sd(temp_xmm, reg);
+  } else {
+    DCHECK(signedness == UNSIGNED_INT32);
+    __ LoadUint32(temp_xmm, reg);
+  }
+
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, &slow);
+    __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // Put a valid pointer value in the stack slot where the result
+    // register is stored, as this register is in the pointer map, but contains
+    // an integer value.
+    __ Set(reg, 0);
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagIU uses the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(reg, rax);
+  }
+
+  // Done. Put the value in temp_xmm into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  Register reg = ToRegister(instr->result());
+  Register tmp = ToRegister(instr->temp());
+
+  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ Move(reg, Smi::FromInt(0));
+
+  {
+    PushSafepointRegistersScope scope(this);
+    // NumberTagD uses the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ movp(kScratchRegister, rax);
+  }
+  __ movp(reg, kScratchRegister);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
+    DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kOverflow);
+  }
+  __ Integer32ToSmi(output, input);
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  DCHECK(instr->value()->Equals(instr->result()));
+  Register input = ToRegister(instr->value());
+  if (instr->needs_check()) {
+    Condition is_smi = __ CheckSmi(input);
+    DeoptimizeIf(NegateCondition(is_smi), instr, Deoptimizer::kNotASmi);
+  } else {
+    __ AssertSmi(input);
+  }
+  __ SmiToInteger32(input, input);
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                XMMRegister result_reg, NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Label convert, load_smi, done;
+
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+
+    // Heap number map check.
+    __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+
+    // On x64 it is safe to load at heap number offset before evaluating the map
+    // check, since all heap objects are at least two words long.
+    __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+    if (can_convert_undefined_to_nan) {
+      __ j(not_equal, &convert, Label::kNear);
+    } else {
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    }
+
+    if (deoptimize_on_minus_zero) {
+      XMMRegister xmm_scratch = double_scratch0();
+      __ Xorpd(xmm_scratch, xmm_scratch);
+      __ Ucomisd(xmm_scratch, result_reg);
+      __ j(not_equal, &done, Label::kNear);
+      __ Movmskpd(kScratchRegister, result_reg);
+      __ testl(kScratchRegister, Immediate(1));
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+    }
+    __ jmp(&done, Label::kNear);
+
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+
+      // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
+      __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+
+      __ Pcmpeqd(result_reg, result_reg);
+      __ jmp(&done, Label::kNear);
+    }
+  } else {
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+
+  // Smi to XMM conversion
+  __ bind(&load_smi);
+  __ SmiToInteger32(kScratchRegister, input_reg);
+  __ Cvtlsi2sd(result_reg, kScratchRegister);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+  Register input_reg = ToRegister(instr->value());
+
+  if (instr->truncating()) {
+    Label no_heap_number, check_bools, check_false;
+
+    // Heap number map check.
+    __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    __ j(not_equal, &no_heap_number, Label::kNear);
+    __ TruncateHeapNumberToI(input_reg, input_reg);
+    __ jmp(done);
+
+    __ bind(&no_heap_number);
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
+    __ j(not_equal, &check_bools, Label::kNear);
+    __ Set(input_reg, 0);
+    __ jmp(done);
+
+    __ bind(&check_bools);
+    __ CompareRoot(input_reg, Heap::kTrueValueRootIndex);
+    __ j(not_equal, &check_false, Label::kNear);
+    __ Set(input_reg, 1);
+    __ jmp(done);
+
+    __ bind(&check_false);
+    __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
+    DeoptimizeIf(not_equal, instr,
+                 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+    __ Set(input_reg, 0);
+  } else {
+    XMMRegister scratch = ToDoubleRegister(instr->temp());
+    DCHECK(!scratch.is(xmm0));
+    __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    __ Movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ Cvttsd2si(input_reg, xmm0);
+    __ Cvtlsi2sd(scratch, input_reg);
+    __ Ucomisd(xmm0, scratch);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+    DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+      __ testl(input_reg, input_reg);
+      __ j(not_zero, done);
+      __ Movmskpd(input_reg, xmm0);
+      __ andl(input_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+    }
+  }
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiToInteger32(input_reg, input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+    __ SmiToInteger32(input_reg, input_reg);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  XMMRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsRegister());
+
+  XMMRegister input_reg = ToDoubleRegister(input);
+  Register result_reg = ToRegister(result);
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, input_reg);
+  } else {
+    Label lost_precision, is_nan, minus_zero, done;
+    XMMRegister xmm_scratch = double_scratch0();
+    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+    __ DoubleToI(result_reg, input_reg, xmm_scratch,
+                 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+                 &is_nan, &minus_zero, dist);
+    __ jmp(&done, dist);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsRegister());
+
+  XMMRegister input_reg = ToDoubleRegister(input);
+  Register result_reg = ToRegister(result);
+
+  Label lost_precision, is_nan, minus_zero, done;
+  XMMRegister xmm_scratch = double_scratch0();
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+  __ DoubleToI(result_reg, input_reg, xmm_scratch,
+               instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+               &minus_zero, dist);
+  __ jmp(&done, dist);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+  __ bind(&done);
+  __ Integer32ToSmi(result_reg, result_reg);
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  Condition cc = masm()->CheckSmi(ToRegister(input));
+  DeoptimizeIf(NegateCondition(cc), instr, Deoptimizer::kNotASmi);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    Condition cc = masm()->CheckSmi(ToRegister(input));
+    DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
+  }
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+
+  __ movp(kScratchRegister,
+          FieldOperand(view, JSArrayBufferView::kBufferOffset));
+  __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
+           Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+
+  __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+            Immediate(static_cast<int8_t>(first)));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+                Immediate(static_cast<int8_t>(last)));
+        DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
+               Immediate(mask));
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
+                   Deoptimizer::kWrongInstanceType);
+    } else {
+      __ movzxbl(kScratchRegister,
+                 FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
+      __ andb(kScratchRegister, Immediate(mask));
+      __ cmpb(kScratchRegister, Immediate(tag));
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  __ Cmp(reg, instr->hydrogen()->object().handle());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ Push(object);
+    __ Set(rsi, 0);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
+    __ testp(rax, Immediate(kSmiTagMask));
+  }
+  DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(reg, map);
+    __ j(equal, &success, Label::kNear);
+  }
+
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(reg, map);
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ j(not_equal, deferred->entry());
+  } else {
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
+  XMMRegister xmm_scratch = double_scratch0();
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  DCHECK(instr->unclamped()->Equals(instr->result()));
+  Register value_reg = ToRegister(instr->result());
+  __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  DCHECK(instr->unclamped()->Equals(instr->result()));
+  Register input_reg = ToRegister(instr->unclamped());
+  XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
+  XMMRegister xmm_scratch = double_scratch0();
+  Label is_smi, done, heap_number;
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+  __ JumpIfSmi(input_reg, &is_smi, dist);
+
+  // Check for heap number
+  __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  __ j(equal, &heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ Cmp(input_reg, factory()->undefined_value());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+  __ xorl(input_reg, input_reg);
+  __ jmp(&done, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
+  __ jmp(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  __ SmiToInteger32(input_reg, input_reg);
+  __ ClampUint8(input_reg);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  XMMRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ Movq(result_reg, value_reg);
+    __ shrq(result_reg, Immediate(32));
+  } else {
+    __ Movd(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  XMMRegister result_reg = ToDoubleRegister(instr->result());
+  __ movl(kScratchRegister, hi_reg);
+  __ shlq(kScratchRegister, Immediate(32));
+  __ orq(kScratchRegister, lo_reg);
+  __ Movq(result_reg, kScratchRegister);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ movl(temp, Immediate((size / kPointerSize) - 1));
+    } else {
+      temp = ToRegister(instr->size());
+      __ sarp(temp, Immediate(kPointerSizeLog2));
+      __ decl(temp);
+    }
+    Label loop;
+    __ bind(&loop);
+    __ Move(FieldOperand(result, temp, times_pointer_size, 0),
+        isolate()->factory()->one_pointer_filler_map());
+    __ decl(temp);
+    __ j(not_zero, &loop);
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Move(result, Smi::FromInt(0));
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ Integer32ToSmi(size, size);
+    __ Push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    __ Push(Smi::FromInt(size));
+  }
+
+  int flags = 0;
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(result, rax);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(rax));
+  __ Push(rax);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->value()).is(rbx));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ Move(rax, isolate()->factory()->number_string());
+  __ jmp(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  DCHECK(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    __ Push(ToHandle(LConstantOperand::cast(operand)));
+  } else if (operand->IsRegister()) {
+    __ Push(ToRegister(operand));
+  } else {
+    __ Push(ToOperand(operand));
+  }
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Condition final_branch_condition = EmitTypeofIs(instr, input);
+  if (final_branch_condition != no_condition) {
+    EmitBranch(instr, final_branch_condition);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+  Handle<String> type_name = instr->type_literal();
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+  int next_block = GetNextEmittedBlock();
+
+  Label::Distance true_distance = left_block == next_block ? Label::kNear
+                                                           : Label::kFar;
+  Label::Distance false_distance = right_block == next_block ? Label::kNear
+                                                             : Label::kFar;
+  Condition final_branch_condition = no_condition;
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label, true_distance);
+    __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+    final_branch_condition = below;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ CmpObjectType(input, SYMBOL_TYPE, input);
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ CompareRoot(input, Heap::kTrueValueRootIndex);
+    __ j(equal, true_label, true_distance);
+    __ CompareRoot(input, Heap::kFalseValueRootIndex);
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+    __ j(equal, true_label, true_distance);
+    __ JumpIfSmi(input, false_label, false_distance);
+    // Check for undetectable objects => true.
+    __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ testb(FieldOperand(input, Map::kBitFieldOffset),
+             Immediate(1 << Map::kIsUndetectable));
+    final_branch_condition = not_zero;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    // Check for callable and not undetectable objects => true.
+    __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset));
+    __ andb(input,
+            Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    __ cmpb(input, Immediate(1 << Map::kIsCallable));
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ j(equal, true_label, true_distance);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
+    __ j(below, false_label, false_distance);
+    // Check for callable or undetectable objects => false.
+    __ testb(FieldOperand(input, Map::kBitFieldOffset),
+             Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+    final_branch_condition = zero;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)       \
+  } else if (String::Equals(type_name, factory->type##_string())) { \
+    __ JumpIfSmi(input, false_label, false_distance);               \
+    __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),     \
+                   Heap::k##Type##MapRootIndex);                    \
+    final_branch_condition = equal;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    __ jmp(false_label, false_distance);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(above_equal, &done, Label::kNear);
+
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(rsi));
+    CallCode(isolate()->builtins()->StackCheck(),
+             RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr);
+    __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+    __ j(below, deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+
+  Condition cc = masm()->CheckSmi(rax);
+  DeoptimizeIf(cc, instr, Deoptimizer::kSmi);
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ CmpObjectType(rax, JS_PROXY_TYPE, rcx);
+  DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
+
+  Label use_cache, call_runtime;
+  Register null_value = rdi;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
+  __ jmp(&use_cache, Label::kNear);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ Push(rax);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kMetaMapRootIndex);
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ Cmp(result, Smi::FromInt(0));
+  __ j(not_equal, &load_cache, Label::kNear);
+  __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
+  __ jmp(&done, Label::kNear);
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ movp(result,
+          FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ movp(result,
+          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ bind(&done);
+  Condition cc = masm()->CheckSmi(result);
+  DeoptimizeIf(cc, instr, Deoptimizer::kNoCache);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  __ cmpp(ToRegister(instr->map()),
+          FieldOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object);
+  __ Push(index);
+  __ xorp(rsi, rsi);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(object, rax);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          object_(object),
+          index_(index) {
+    }
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
+
+  Label out_of_object, done;
+  __ Move(kScratchRegister, Smi::FromInt(1));
+  __ testp(index, kScratchRegister);
+  __ j(not_zero, deferred->entry());
+
+  __ sarp(index, Immediate(1));
+
+  __ SmiToInteger32(index, index);
+  __ cmpl(index, Immediate(0));
+  __ j(less, &out_of_object, Label::kNear);
+  __ movp(object, FieldOperand(object,
+                               index,
+                               times_pointer_size,
+                               JSObject::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&out_of_object);
+  __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
+  __ negl(index);
+  // Index is now equal to out of object property index plus 1.
+  __ movp(object, FieldOperand(object,
+                               index,
+                               times_pointer_size,
+                               FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), context);
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ Push(scope_info);
+  __ Push(ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/crankshaft/x64/lithium-codegen-x64.h b/src/crankshaft/x64/lithium-codegen-x64.h
new file mode 100644
index 0000000..6fb918b
--- /dev/null
+++ b/src/crankshaft/x64/lithium-codegen-x64.h
@@ -0,0 +1,383 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
+
+
+#include "src/ast/scopes.h"
+#include "src/base/logging.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
+#include "src/crankshaft/x64/lithium-x64.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  // Support for converting LOperands to assembler types.
+  Register ToRegister(LOperand* op) const;
+  XMMRegister ToDoubleRegister(LOperand* op) const;
+  bool IsInteger32Constant(LConstantOperand* op) const;
+  bool IsExternalConstant(LConstantOperand* op) const;
+  bool IsDehoistedKeyConstant(LConstantOperand* op) const;
+  bool IsSmiConstant(LConstantOperand* op) const;
+  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  ExternalReference ToExternalReference(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr,
+                             LOperand* value,
+                             LOperand* temp1,
+                             LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register object,
+                                   Register index);
+
+// Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  LPlatformChunk* chunk() const { return chunk_; }
+  Scope* scope() const { return scope_; }
+  HGraph* graph() const { return chunk()->graph(); }
+
+  XMMRegister double_scratch0() const { return xmm0; }
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register scratch);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  void GenerateBodyInstructionPost(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS
+  };
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode,
+                       int argc);
+
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  void LoadContextFromDeferred(LOperand* context);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in rdi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode,
+                                    int argc);
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition cc, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type);
+  void DeoptimizeIf(Condition cc, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason);
+
+  bool DeoptEveryNTimes() {
+    return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+  }
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  XMMRegister ToDoubleRegister(int index) const;
+  Operand BuildFastArrayOperand(
+      LOperand* elements_pointer,
+      LOperand* key,
+      Representation key_representation,
+      ElementsKind elements_kind,
+      uint32_t base_offset);
+
+  Operand BuildSeqStringOperand(Register string,
+                                LOperand* index,
+                                String::Encoding encoding);
+
+  void EmitIntegerMathAbs(LMathAbs* instr);
+  void EmitSmiMathAbs(LMathAbs* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition cc);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition cc);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition cc);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        XMMRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset,
+                    AllocationSiteMode mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+#ifdef _MSC_VER
+  // On windows, you may not access the stack more than one page below
+  // the most recently mapped page. To make the allocated area randomly
+  // accessible, we write an arbitrary value to each page in range
+  // rsp + offset - page_size .. rsp in turn.
+  void MakeSureStackPagesMapped(int offset);
+#endif
+
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode: public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label done_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_X64_LITHIUM_CODEGEN_X64_H_
diff --git a/src/crankshaft/x64/lithium-gap-resolver-x64.cc b/src/crankshaft/x64/lithium-gap-resolver-x64.cc
new file mode 100644
index 0000000..3808c37
--- /dev/null
+++ b/src/crankshaft/x64/lithium-gap-resolver-x64.cc
@@ -0,0 +1,321 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/crankshaft/x64/lithium-gap-resolver-x64.h"
+
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner), moves_(32, owner->zone()) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      PerformMove(i);
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.  We use operand swaps to resolve cycles,
+  // which means that a call to PerformMove could change any source operand
+  // in the move graph.
+
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack-allocated local.  Recursion may allow
+  // multiple moves to be pending.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does
+      // not miss any).  Assume there is a non-blocking move with source A
+      // and this move is blocked on source B and there is a swap of A and
+      // B.  Then A and B must be involved in the same cycle (or they would
+      // not be swapped).  Since this move's destination is B and there is
+      // only a single incoming edge to an operand, this move must also be
+      // involved in the same cycle.  In that case, the blocking move will
+      // be created but will be "pending" when we return from PerformMove.
+      PerformMove(i);
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and
+  // so it may now be the last move in the cycle.  If so remove it.
+  if (moves_[index].source()->Equals(destination)) {
+    moves_[index].Eliminate();
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case
+  // we have a cycle.  Search for such a blocking move and perform a swap to
+  // resolve it.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination)) {
+      DCHECK(other_move.IsPending());
+      EmitSwap(index);
+      return;
+    }
+  }
+
+  // This move is not blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    Register src = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      __ movp(dst, src);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movp(dst, src);
+    }
+
+  } else if (source->IsStackSlot()) {
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      __ movp(dst, src);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      __ movp(kScratchRegister, src);
+      __ movp(dst, kScratchRegister);
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsSmiConstant(constant_source)) {
+        __ Move(dst, cgen_->ToSmi(constant_source));
+      } else if (cgen_->IsInteger32Constant(constant_source)) {
+        int32_t constant = cgen_->ToInteger32(constant_source);
+        // Do sign extension only for constant used as de-hoisted array key.
+        // Others only need zero extension, which saves 2 bytes.
+        if (cgen_->IsDehoistedKeyConstant(constant_source)) {
+          __ Set(dst, constant);
+        } else {
+          __ Set(dst, static_cast<uint32_t>(constant));
+        }
+      } else {
+        __ Move(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      double v = cgen_->ToDouble(constant_source);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
+      XMMRegister dst = cgen_->ToDoubleRegister(destination);
+      if (int_val == 0) {
+        __ Xorpd(dst, dst);
+      } else {
+        __ Set(kScratchRegister, int_val);
+        __ Movq(dst, kScratchRegister);
+      }
+    } else {
+      DCHECK(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      if (cgen_->IsSmiConstant(constant_source)) {
+        __ Move(dst, cgen_->ToSmi(constant_source));
+      } else if (cgen_->IsInteger32Constant(constant_source)) {
+        // Do sign extension to 64 bits when stored into stack slot.
+        __ movp(dst, Immediate(cgen_->ToInteger32(constant_source)));
+      } else {
+        __ Move(kScratchRegister, cgen_->ToHandle(constant_source));
+        __ movp(dst, kScratchRegister);
+      }
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ Movapd(cgen_->ToDoubleRegister(destination), src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ Movsd(cgen_->ToOperand(destination), src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ Movsd(cgen_->ToDoubleRegister(destination), src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ Movsd(xmm0, src);
+      __ Movsd(cgen_->ToOperand(destination), xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Swap two general-purpose registers.
+    Register src = cgen_->ToRegister(source);
+    Register dst = cgen_->ToRegister(destination);
+    __ movp(kScratchRegister, src);
+    __ movp(src, dst);
+    __ movp(dst, kScratchRegister);
+
+  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+             (source->IsStackSlot() && destination->IsRegister())) {
+    // Swap a general-purpose register and a stack slot.
+    Register reg =
+        cgen_->ToRegister(source->IsRegister() ? source : destination);
+    Operand mem =
+        cgen_->ToOperand(source->IsRegister() ? destination : source);
+    __ movp(kScratchRegister, mem);
+    __ movp(mem, reg);
+    __ movp(reg, kScratchRegister);
+
+  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+      (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot())) {
+    // Swap two stack slots or two double stack slots.
+    Operand src = cgen_->ToOperand(source);
+    Operand dst = cgen_->ToOperand(destination);
+    __ Movsd(xmm0, src);
+    __ movp(kScratchRegister, dst);
+    __ Movsd(dst, xmm0);
+    __ movp(src, kScratchRegister);
+
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // Swap two double registers.
+    XMMRegister source_reg = cgen_->ToDoubleRegister(source);
+    XMMRegister destination_reg = cgen_->ToDoubleRegister(destination);
+    __ Movapd(xmm0, source_reg);
+    __ Movapd(source_reg, destination_reg);
+    __ Movapd(destination_reg, xmm0);
+
+  } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
+    // Swap a double register and a double stack slot.
+    DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
+           (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
+    XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
+                                                  ? source
+                                                  : destination);
+    LOperand* other = source->IsDoubleRegister() ? destination : source;
+    DCHECK(other->IsDoubleStackSlot());
+    Operand other_operand = cgen_->ToOperand(other);
+    __ Movapd(xmm0, reg);
+    __ Movsd(reg, other_operand);
+    __ Movsd(other_operand, xmm0);
+
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+
+  // The swap of source and destination has executed a move from source to
+  // destination.
+  moves_[index].Eliminate();
+
+  // Any unperformed (including pending) move with a source of either
+  // this move's source or destination needs to have their source
+  // changed to reflect the state of affairs after the swap.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(source)) {
+      moves_[i].set_source(destination);
+    } else if (other_move.Blocks(destination)) {
+      moves_[i].set_source(source);
+    }
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/crankshaft/x64/lithium-gap-resolver-x64.h b/src/crankshaft/x64/lithium-gap-resolver-x64.h
new file mode 100644
index 0000000..641f0ee
--- /dev/null
+++ b/src/crankshaft/x64/lithium-gap-resolver-x64.h
@@ -0,0 +1,50 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Execute a move by emitting a swap of two operands.  The move from
+  // source to destination is removed from the move graph.
+  void EmitSwap(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_X64_LITHIUM_GAP_RESOLVER_X64_H_
diff --git a/src/crankshaft/x64/lithium-x64.cc b/src/crankshaft/x64/lithium-x64.cc
new file mode 100644
index 0000000..3c932a2
--- /dev/null
+++ b/src/crankshaft/x64/lithium-x64.cc
@@ -0,0 +1,2685 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/x64/lithium-x64.h"
+
+#include <sstream>
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/x64/lithium-codegen-x64.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "sal-t";
+    case Token::SAR: return "sar-t";
+    case Token::SHR: return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+template<int R>
+bool LTemplateResultInstruction<R>::MustSignExtendResult(
+    LPlatformChunk* chunk) const {
+  HValue* hvalue = this->hydrogen_value();
+  return hvalue != NULL &&
+      hvalue->representation().IsInteger32() &&
+      chunk->GetDehoistedKeyIds()->Contains(hvalue->id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  if (kind == DOUBLE_REGISTERS && kDoubleSize == 2 * kPointerSize) {
+    // Skip a slot if for a double-width slot for x32 port.
+    spill_slot_count_++;
+    // The spill slot's address is at rbp - (index + 1) * kPointerSize -
+    // StandardFrameConstants::kFixedFrameSizeFromFp. kFixedFrameSizeFromFp is
+    // 2 * kPointerSize, if rbp is aligned at 8-byte boundary, the below "|= 1"
+    // will make sure the spilled doubles are aligned at 8-byte boundary.
+    // TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port.
+    spill_slot_count_ |= 1;
+  }
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+  // All stack slots are Double stack slots on x64.
+  // Alternatively, at some point, start using half-size
+  // stack slots for int32 values.
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(XMMRegister reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseTempRegister(value);
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr,
+    int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr,
+    XMMRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(
+      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      if (SmiValuesAre31Bits() && instr->representation().IsSmi() &&
+          constant_value > 0) {
+        // Left shift can deoptimize if we shift by > 0 and the result
+        // cannot be truncated to smi.
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseFixed(right_value, rcx);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift by 0 and
+    // the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return MarkAsCall(DefineSameAsFirst(result), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return CpuFeatures::IsSupported(AVX) ? DefineAsRegister(result)
+                                         : DefineSameAsFirst(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* left_operand = UseFixed(left, rdx);
+  LOperand* right_operand = UseFixed(right, rax);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpMapAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegister(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
+  LOperand* receiver = UseFixed(instr->receiver(), rax);
+  LOperand* length = UseFixed(instr->length(), rbx);
+  LOperand* elements = UseFixed(instr->elements(), rcx);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                receiver,
+                                                length,
+                                                elements);
+  return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = UseOrConstant(instr->argument(i));
+    AddInstruction(new(zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, rsi);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), rdi);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), rsi);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+      descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* function = UseFixed(instr->function(), rdi);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, rax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathFloor* result = new(zone()) LMathFloor(input);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = FixedTemp(xmm4);
+  LMathRound* result = new(zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  LOperand* context = UseAny(instr->context());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+  Representation r = instr->value()->representation();
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new(zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* value = UseTempRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMathSqrt(input));
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathPowHalf* result = new(zone()) LMathPowHalf(input);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* constructor = UseFixed(instr->constructor(), rdi);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* function = UseFixed(instr->function(), rdi);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(rdx);
+    vector = FixedTemp(rbx);
+  }
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LCallRuntime* result = new(zone()) LCallRuntime(context);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right;
+    if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
+      // We don't support tagged immediates, so we request it in a register.
+      right = UseRegisterAtStart(instr->BetterRightOperand());
+    } else {
+      right = UseOrConstantAtStart(instr->BetterRightOperand());
+    }
+    return DefineSameAsFirst(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(rax);
+  LOperand* temp2 = FixedTemp(rdx);
+  LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+          dividend, divisor, temp1, temp2), rdx);
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), rax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(rdx);
+  LInstruction* result = DefineFixed(new(zone()) LDivI(
+          dividend, divisor, temp), rax);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      instr->CheckFlag(HValue::kCanOverflow) ||
+      !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(rax);
+  LOperand* temp2 = FixedTemp(rdx);
+  LOperand* temp3 =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result =
+      DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+                                                   divisor,
+                                                   temp1,
+                                                   temp2,
+                                                   temp3),
+                  rdx);
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), rax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(rdx);
+  LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+          dividend, divisor, temp), rax);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      instr->CheckFlag(HValue::kCanOverflow)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(rax);
+  LOperand* temp2 = FixedTemp(rdx);
+  LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+          dividend, divisor, temp1, temp2), rax);
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), rax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(rdx);
+  LInstruction* result = DefineFixed(new(zone()) LModI(
+          dividend, divisor, temp), rdx);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstant(instr->BetterRightOperand());
+    LMulI* mul = new(zone()) LMulI(left, right);
+    if (instr->CheckFlag(HValue::kCanOverflow) ||
+        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineSameAsFirst(mul);
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right;
+    if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
+      // We don't support tagged immediates, so we request it in a register.
+      right = UseRegisterAtStart(instr->right());
+    } else {
+      right = UseOrConstantAtStart(instr->right());
+    }
+    LSubI* sub = new(zone()) LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    // Check to see if it would be advantageous to use an lea instruction rather
+    // than an add. This is the case when no overflow check is needed and there
+    // are multiple uses of the add's inputs, so using a 3-register add will
+    // preserve all input values for later uses.
+    bool use_lea = LAddI::UseLea(instr);
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    HValue* right_candidate = instr->BetterRightOperand();
+    LOperand* right;
+    if (SmiValuesAre32Bits() && instr->representation().IsSmi()) {
+      // We cannot add a tagged immediate to a tagged value,
+      // so we request it in a register.
+      right = UseRegisterAtStart(right_candidate);
+    } else {
+      right = use_lea ? UseRegisterOrConstantAtStart(right_candidate)
+                      : UseOrConstantAtStart(right_candidate);
+    }
+    LAddI* add = new(zone()) LAddI(left, right);
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    LInstruction* result = use_lea ? DefineAsRegister(add)
+                                   : DefineSameAsFirst(add);
+    if (can_overflow) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    bool use_lea = LAddI::UseLea(instr);
+    LOperand* left = UseRegisterAtStart(instr->left());
+    HValue* right_candidate = instr->right();
+    LOperand* right = use_lea
+        ? UseRegisterOrConstantAtStart(right_candidate)
+        : UseOrConstantAtStart(right_candidate);
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = use_lea
+        ? DefineAsRegister(add)
+        : DefineSameAsFirst(add);
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  if (instr->representation().IsSmi()) {
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseAtStart(instr->BetterRightOperand());
+  } else if (instr->representation().IsInteger32()) {
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+  return DefineSameAsFirst(minmax);
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), xmm2);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), xmm1)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left;
+    LOperand* right;
+    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+      left = UseRegisterOrConstantAtStart(instr->left());
+      right = UseRegisterOrConstantAtStart(instr->right());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+      right = UseRegisterAtStart(instr->right());
+    }
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  return new(zone()) LCompareMinusZeroAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsUndetectableAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(context, left, right);
+
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasCachedArrayIndexAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  return new(zone()) LClassOfTestAndBranch(value,
+                                           TempRegister(),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegisterAtStart(instr->index())
+      : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = FLAG_debug_code
+      ? UseRegisterAtStart(instr->value())
+      : UseRegisterOrConstantAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), rsi) : NULL;
+  LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+                                                       index, value);
+  if (FLAG_debug_code) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+      ? UseOrConstantAtStart(instr->length())
+      : UseAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegister(val);
+        return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        bool truncating = instr->CanTruncateToInt32();
+        LOperand* xmm_temp = truncating ? NULL : FixedTemp(xmm1);
+        LInstruction* result =
+            DefineSameAsFirst(new(zone()) LTaggedToI(value, xmm_temp));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp = TempRegister();
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        LOperand* value = UseRegister(val);
+        return DefineAsRegister(new(zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = FixedTemp(xmm1);
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineSameAsFirst(result));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = SmiValuesAre32Bits() ? NULL : TempRegister();
+        LOperand* temp2 = SmiValuesAre32Bits() ? NULL : FixedTemp(xmm1);
+        LNumberTagI* result = new(zone()) LNumberTagI(value, temp1, temp2);
+        return AssignPointerMap(DefineSameAsFirst(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        LOperand* value = Use(val);
+        return DefineAsRegister(new(zone()) LInteger32ToDouble(value));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    // Register allocator doesn't (yet) support allocation of double
+    // temps. Reserve xmm1 explicitly.
+    LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
+                                                        FixedTemp(xmm1));
+    return AssignEnvironment(DefineSameAsFirst(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub() ? UseFixed(instr->context(), rsi) : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(
+      UseFixed(instr->value(), rax), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new (zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  LOperand* temp;
+  context = UseRegister(instr->context());
+  if (instr->NeedsWriteBarrier()) {
+    value = UseTempRegister(instr->value());
+    temp = TempRegister();
+  } else {
+    value = UseRegister(instr->value());
+    temp = NULL;
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  // Use the special mov rax, moffs64 encoding for external
+  // memory accesses with 64-bit word-sized values.
+  if (instr->access().IsExternalMemory() &&
+      instr->access().offset() == 0 &&
+      (instr->access().representation().IsSmi() ||
+       instr->access().representation().IsTagged() ||
+       instr->access().representation().IsHeapObject() ||
+       instr->access().representation().IsExternal())) {
+    LOperand* obj = UseRegisterOrConstantAtStart(instr->object());
+    return DefineFixed(new(zone()) LLoadNamedField(obj), rax);
+  }
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
+      context, object, vector);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+void LChunkBuilder::FindDehoistedKeyDefinitions(HValue* candidate) {
+  // We sign extend the dehoisted key at the definition point when the pointer
+  // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
+  // points and should not invoke this function. We can't use STATIC_ASSERT
+  // here as the pointer size is 32-bit for x32.
+  DCHECK(kPointerSize == kInt64Size);
+  BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
+  if (dehoisted_key_ids->Contains(candidate->id())) return;
+  dehoisted_key_ids->Add(candidate->id());
+  if (!candidate->IsPhi()) return;
+  for (int i = 0; i < candidate->OperandCount(); ++i) {
+    FindDehoistedKeyDefinitions(candidate->OperandAt(i));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK((kPointerSize == kInt64Size &&
+          instr->key()->representation().IsInteger32()) ||
+         (kPointerSize == kInt32Size &&
+          instr->key()->representation().IsSmiOrInteger32()));
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = NULL;
+  LInstruction* result = NULL;
+
+  if (kPointerSize == kInt64Size) {
+    key = UseRegisterOrConstantAtStart(instr->key());
+  } else {
+    bool clobbers_key = ExternalArrayOpRequiresTemp(
+        instr->key()->representation(), elements_kind);
+    key = clobbers_key
+        ? UseTempRegister(instr->key())
+        : UseRegisterOrConstantAtStart(instr->key());
+  }
+
+  if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
+    FindDehoistedKeyDefinitions(instr->key());
+  }
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = UseRegisterAtStart(instr->elements());
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK(
+        (instr->representation().IsInteger32() &&
+         !(IsDoubleOrFloatElementsKind(elements_kind))) ||
+        (instr->representation().IsDouble() &&
+         (IsDoubleOrFloatElementsKind(elements_kind))));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LLoadKeyedGeneric* result =
+      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+
+  if ((kPointerSize == kInt64Size) && instr->IsDehoisted()) {
+    FindDehoistedKeyDefinitions(instr->key());
+  }
+
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* key = NULL;
+    LOperand* val = NULL;
+
+    Representation value_representation = instr->value()->representation();
+    if (value_representation.IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      val = UseRegisterAtStart(instr->value());
+      key = UseRegisterOrConstantAtStart(instr->key());
+    } else {
+      DCHECK(value_representation.IsSmiOrTagged() ||
+             value_representation.IsInteger32());
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterOrConstantAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new (zone()) LStoreKeyed(object, key, val, nullptr);
+  }
+
+  DCHECK(
+       (instr->value()->representation().IsInteger32() &&
+       !IsDoubleOrFloatElementsKind(elements_kind)) ||
+       (instr->value()->representation().IsDouble() &&
+       IsDoubleOrFloatElementsKind(elements_kind)));
+  DCHECK(instr->elements()->representation().IsExternal());
+  bool val_is_temp_register = elements_kind == UINT8_CLAMPED_ELEMENTS ||
+                              elements_kind == FLOAT32_ELEMENTS;
+  LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+      : UseRegister(instr->value());
+  LOperand* key = NULL;
+  if (kPointerSize == kInt64Size) {
+    key = UseRegisterOrConstantAtStart(instr->key());
+  } else {
+    bool clobbers_key = ExternalArrayOpRequiresTemp(
+        instr->key()->representation(), elements_kind);
+    key = clobbers_key
+        ? UseTempRegister(instr->key())
+        : UseRegisterOrConstantAtStart(instr->key());
+  }
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result = new (zone())
+      LStoreKeyedGeneric(context, object, key, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LOperand* temp_reg = TempRegister();
+    LTransitionElementsKind* result = new(zone()) LTransitionElementsKind(
+        object, NULL, new_map_reg, temp_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), rax);
+    LOperand* context = UseFixed(instr->context(), rsi);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, rax);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool is_external_location = instr->access().IsExternalMemory() &&
+      instr->access().offset() == 0;
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map = instr->has_transition() &&
+      instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else if (is_external_location) {
+    DCHECK(!is_in_object);
+    DCHECK(!needs_write_barrier);
+    DCHECK(!needs_write_barrier_for_map);
+    obj = UseRegisterOrConstant(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map
+        ? UseRegister(instr->object())
+        : UseRegisterAtStart(instr->object());
+  }
+
+  bool can_be_constant = instr->value()->IsConstant() &&
+      HConstant::cast(instr->value())->NotInNewSpace() &&
+      !instr->field_representation().IsDouble();
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (is_external_location) {
+    val = UseFixed(instr->value(), rax);
+  } else if (can_be_constant) {
+    val = UseRegisterOrConstant(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!is_in_object || needs_write_barrier ||
+      needs_write_barrier_for_map) ? TempRegister() : NULL;
+
+  return new(zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* left = UseFixed(instr->left(), rdx);
+  LOperand* right = UseFixed(instr->right(), rax);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LStringAdd(context, left, right), rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = instr->size()->IsConstant()
+      ? UseConstant(instr->size())
+      : UseTempRegister(instr->size());
+  LOperand* temp = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(context, size, temp);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LCallStub* result = new(zone()) LCallStub(context);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length;
+  LOperand* index;
+  if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+    length = UseRegisterOrConstant(instr->length());
+    index = UseOrConstant(instr->index());
+  } else {
+    length = UseTempRegister(instr->length());
+    index = Use(instr->index());
+  }
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), rax);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* value = UseFixed(instr->value(), rbx);
+  LTypeof* result = new(zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, rax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  info()->MarkAsDeferredCalling();
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), rsi);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* object = UseFixed(instr->enumerable(), rax);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, rax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, rsi), instr);
+}
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/crankshaft/x64/lithium-x64.h b/src/crankshaft/x64/lithium-x64.h
new file mode 100644
index 0000000..ebe1ef9
--- /dev/null
+++ b/src/crankshaft/x64/lithium-x64.h
@@ -0,0 +1,2768 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_X64_LITHIUM_X64_H_
+#define V8_CRANKSHAFT_X64_LITHIUM_X64_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(DummyUse)                                \
+  V(Dummy)                                   \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {
+  }
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+  virtual bool MustSignExtendResult(LPlatformChunk* chunk) const {
+    return false;
+  }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator support.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+  bool MustSignExtendResult(LPlatformChunk* chunk) const final;
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const final { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos,
+                                         Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LModByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LModI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDivByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend,
+                       int32_t divisor,
+                       LOperand* temp1,
+                       LOperand* temp2,
+                       LOperand* temp3) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* temp3() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFloor(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathPowHalf(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+};
+
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCompareMinusZeroAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  explicit LStringCompareAndBranch(LOperand* context,
+                                   LOperand* left,
+                                   LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+  bool IsInteger32() const {
+    return hydrogen()->representation().IsInteger32();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpMapAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  static bool UseLea(HAdd* add) {
+    return !add->CheckFlag(HValue::kCanOverflow) &&
+        add->BetterLeftOperand()->UseCount() > 1;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  explicit LReturn(LOperand* value,
+                   LOperand* context,
+                   LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* context() { return inputs_[1]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+  DECLARE_HYDROGEN_ACCESSOR(Return)
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  explicit LLoadNamedGeneric(LOperand* context, LOperand* object,
+                             LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+
+  LOperand* function() { return inputs_[0]; }
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+inline static bool ExternalArrayOpRequiresTemp(
+    Representation key_representation,
+    ElementsKind elements_kind) {
+  // Operations that require the key to be divided by two to be converted into
+  // an index cannot fold the scale operation into a load and need an extra
+  // temp register to do the work.
+  return SmiValuesAre31Bits() && key_representation.IsSmi() &&
+         (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
+          elements_kind == UINT8_CLAMPED_ELEMENTS);
+}
+
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+};
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = obj;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                              LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  explicit LNumberTagD(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change);
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value,
+              LOperand* backing_store_owner) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* new_map_temp,
+                          LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* context() { return inputs_[1]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+  LOperand* temp() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view) { inputs_[0] = view; }
+
+  LOperand* view() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped,
+                 LOperand* temp_xmm) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp_xmm;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp_xmm() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph),
+        dehoisted_key_ids_(graph->GetMaximumValueID(), graph->zone()) { }
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+  BitVector* GetDehoistedKeyIds() { return &dehoisted_key_ids_; }
+  bool IsDehoistedKey(HValue* value) {
+    return dehoisted_key_ids_.Contains(value->id());
+  }
+
+ private:
+  BitVector dehoisted_key_ids_;
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           XMMRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register that may be trashed or a constant operand.
+  MUST_USE_RESULT LOperand* UseTempRegisterOrConstant(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(XMMRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  XMMRegister reg);
+  // Assigns an environment to an instruction.  An instruction which can
+  // deoptimize must have an environment.
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  // Assigns a pointer map to an instruction.  An instruction which can
+  // trigger a GC or a lazy deoptimization must have a pointer map.
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // Marks a call for the register allocator.  Assigns a pointer map to
+  // support GC and lazy deoptimization.  Assigns an environment to support
+  // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+  void FindDehoistedKeyDefinitions(HValue* candidate);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_X64_LITHIUM_X64_H_
diff --git a/src/crankshaft/x87/OWNERS b/src/crankshaft/x87/OWNERS
new file mode 100644
index 0000000..dd9998b
--- /dev/null
+++ b/src/crankshaft/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/src/crankshaft/x87/lithium-codegen-x87.cc b/src/crankshaft/x87/lithium-codegen-x87.cc
new file mode 100644
index 0000000..fe2baa5
--- /dev/null
+++ b/src/crankshaft/x87/lithium-codegen-x87.cc
@@ -0,0 +1,6038 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/deoptimizer.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/profiler/cpu-profiler.h"
+#include "src/x87/frames-x87.h"
+
+namespace v8 {
+namespace internal {
+
+
+// When invoking builtins, we need to record the safepoint in the middle of
+// the invoke instruction sequence generated by the macro assembler.
+class SafepointGenerator final : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) {}
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const override {}
+
+  void AfterCall() const override {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+
+  support_aligned_spilled_doubles_ = info()->IsOptimizing();
+
+  dynamic_frame_alignment_ = info()->IsOptimizing() &&
+      ((chunk()->num_double_slots() > 2 &&
+        !chunk()->graph()->is_recursive()) ||
+       !info()->osr_ast_id().IsNone());
+
+  return GeneratePrologue() &&
+      GenerateBody() &&
+      GenerateDeferredCode() &&
+      GenerateJumpTable() &&
+      GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  PopulateDeoptimizationData(code);
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
+  }
+}
+
+
+#ifdef _MSC_VER
+void LCodeGen::MakeSureStackPagesMapped(int offset) {
+  const int kPageSize = 4 * KB;
+  for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
+    __ mov(Operand(esp, offset), eax);
+  }
+}
+#endif
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ int3();
+    }
+#endif
+
+    if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+      // Move state of dynamic frame alignment into edx.
+      __ Move(edx, Immediate(kNoAlignmentPadding));
+
+      Label do_not_pad, align_loop;
+      STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
+      // Align esp + 4 to a multiple of 2 * kPointerSize.
+      __ test(esp, Immediate(kPointerSize));
+      __ j(not_zero, &do_not_pad, Label::kNear);
+      __ push(Immediate(0));
+      __ mov(ebx, esp);
+      __ mov(edx, Immediate(kAlignmentPaddingPushed));
+      // Copy arguments, receiver, and return address.
+      __ mov(ecx, Immediate(scope()->num_parameters() + 2));
+
+      __ bind(&align_loop);
+      __ mov(eax, Operand(ebx, 1 * kPointerSize));
+      __ mov(Operand(ebx, 0), eax);
+      __ add(Operand(ebx), Immediate(kPointerSize));
+      __ dec(ecx);
+      __ j(not_zero, &align_loop, Label::kNear);
+      __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+      __ bind(&do_not_pad);
+    }
+  }
+
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    DCHECK(!frame_is_built_);
+    frame_is_built_ = true;
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->GeneratePreagedPrologue());
+    }
+  }
+
+  if (info()->IsOptimizing() &&
+      dynamic_frame_alignment_ &&
+      FLAG_debug_code) {
+    __ test(esp, Immediate(kPointerSize));
+    __ Assert(zero, kFrameIsExpectedToBeAligned);
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  DCHECK(slots != 0 || !info()->IsOptimizing());
+  if (slots > 0) {
+    if (slots == 1) {
+      if (dynamic_frame_alignment_) {
+        __ push(edx);
+      } else {
+        __ push(Immediate(kNoAlignmentPadding));
+      }
+    } else {
+      if (FLAG_debug_code) {
+        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+        MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+        __ push(eax);
+        __ mov(Operand(eax), Immediate(slots));
+        Label loop;
+        __ bind(&loop);
+        __ mov(MemOperand(esp, eax, times_4, 0),
+               Immediate(kSlotsZapValue));
+        __ dec(eax);
+        __ j(not_zero, &loop);
+        __ pop(eax);
+      } else {
+        __ sub(Operand(esp), Immediate(slots * kPointerSize));
+#ifdef _MSC_VER
+        MakeSureStackPagesMapped(slots * kPointerSize);
+#endif
+      }
+
+      if (support_aligned_spilled_doubles_) {
+        Comment(";;; Store dynamic frame alignment tag for spilled doubles");
+        // Store dynamic frame alignment state in the first local.
+        int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
+        if (dynamic_frame_alignment_) {
+          __ mov(Operand(ebp, offset), edx);
+        } else {
+          __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
+        }
+      }
+    }
+  }
+
+  // Initailize FPU state.
+  __ fninit();
+
+  return !is_aborted();
+}
+
+
+void LCodeGen::DoPrologue(LPrologue* instr) {
+  Comment(";;; Prologue begin");
+
+  // Possibly allocate a local context.
+  if (info_->num_heap_slots() > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is still in edi.
+    int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+    Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
+    if (info()->scope()->is_script_scope()) {
+      __ push(edi);
+      __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
+      __ CallRuntime(Runtime::kNewScriptContext);
+      deopt_mode = Safepoint::kLazyDeopt;
+    } else if (slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(edi);
+      __ CallRuntime(Runtime::kNewFunctionContext);
+    }
+    RecordSafepoint(deopt_mode);
+
+    // Context is returned in eax.  It replaces the context passed to us.
+    // It's saved in the stack and kept live in esi.
+    __ mov(esi, eax);
+    __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
+
+    // Copy parameters into context if necessary.
+    int num_parameters = scope()->num_parameters();
+    int first_parameter = scope()->has_this_declaration() ? -1 : 0;
+    for (int i = first_parameter; i < num_parameters; i++) {
+      Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ mov(eax, Operand(ebp, parameter_offset));
+        // Store it in the context.
+        int context_offset = Context::SlotOffset(var->index());
+        __ mov(Operand(esi, context_offset), eax);
+        // Update the write barrier. This clobbers eax and ebx.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
+                                    kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  Comment(";;; Prologue end");
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+    // Move state of dynamic frame alignment into edx.
+  __ Move(edx, Immediate(kNoAlignmentPadding));
+
+  if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
+    Label do_not_pad, align_loop;
+    // Align ebp + 4 to a multiple of 2 * kPointerSize.
+    __ test(ebp, Immediate(kPointerSize));
+    __ j(zero, &do_not_pad, Label::kNear);
+    __ push(Immediate(0));
+    __ mov(ebx, esp);
+    __ mov(edx, Immediate(kAlignmentPaddingPushed));
+
+    // Move all parts of the frame over one word. The frame consists of:
+    // unoptimized frame slots, alignment state, context, frame pointer, return
+    // address, receiver, and the arguments.
+    __ mov(ecx, Immediate(scope()->num_parameters() +
+           5 + graph()->osr()->UnoptimizedFrameSlots()));
+
+    __ bind(&align_loop);
+    __ mov(eax, Operand(ebx, 1 * kPointerSize));
+    __ mov(Operand(ebx, 0), eax);
+    __ add(Operand(ebx), Immediate(kPointerSize));
+    __ dec(ecx);
+    __ j(not_zero, &align_loop, Label::kNear);
+    __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
+    __ sub(Operand(ebp), Immediate(kPointerSize));
+    __ bind(&do_not_pad);
+  }
+
+  // Save the first local, which is overwritten by the alignment state.
+  Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
+  __ push(alignment_loc);
+
+  // Set the dynamic frame alignment state.
+  __ mov(alignment_loc, edx);
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 1);
+  __ sub(esp, Immediate((slots - 1) * kPointerSize));
+
+  // Initailize FPU state.
+  __ fninit();
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+  FlushX87StackIfNecessary(instr);
+}
+
+
+void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+  // When return from function call, FPU should be initialized again.
+  if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
+    bool double_result = instr->HasDoubleRegisterResult();
+    if (double_result) {
+      __ lea(esp, Operand(esp, -kDoubleSize));
+      __ fstp_d(Operand(esp, 0));
+    }
+    __ fninit();
+    if (double_result) {
+      __ fld_d(Operand(esp, 0));
+      __ lea(esp, Operand(esp, kDoubleSize));
+    }
+  }
+  if (instr->IsGoto()) {
+    x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
+  } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
+             !instr->IsGap() && !instr->IsReturn()) {
+    if (instr->ClobbersDoubleRegisters(isolate())) {
+      if (instr->HasDoubleRegisterResult()) {
+        DCHECK_EQ(1, x87_stack_.depth());
+      } else {
+        DCHECK_EQ(0, x87_stack_.depth());
+      }
+    }
+    __ VerifyX87StackDepth(x87_stack_.depth());
+  }
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  if (!jump_table_.length()) return !is_aborted();
+
+  Label needs_frame;
+  Comment(";;; -------------------- Jump table --------------------");
+
+  for (int i = 0; i < jump_table_.length(); i++) {
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->deopt_info);
+    if (table_entry->needs_frame) {
+      DCHECK(!info()->saves_caller_doubles());
+      __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
+      __ call(&needs_frame);
+    } else {
+      __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    }
+    info()->LogDeoptCallPosition(masm()->pc_offset(),
+                                 table_entry->deopt_info.inlining_id);
+  }
+  if (needs_frame.is_linked()) {
+    __ bind(&needs_frame);
+
+    /* stack layout
+       4: entry address
+       3: return address  <-- esp
+       2: garbage
+       1: garbage
+       0: garbage
+    */
+    __ sub(esp, Immediate(kPointerSize));    // Reserve space for stub marker.
+    __ push(MemOperand(esp, kPointerSize));  // Copy return address.
+    __ push(MemOperand(esp, 3 * kPointerSize));  // Copy entry address.
+
+    /* stack layout
+       4: entry address
+       3: return address
+       2: garbage
+       1: return address
+       0: entry address  <-- esp
+    */
+    __ mov(MemOperand(esp, 4 * kPointerSize), ebp);  // Save ebp.
+
+    // Copy context.
+    __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
+    __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
+    // Fill ebp with the right stack frame address.
+    __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
+
+    // This variant of deopt can only be used with stubs. Since we don't
+    // have a function pointer to install in the stack frame that we're
+    // building, install a special marker there instead.
+    DCHECK(info()->IsStub());
+    __ mov(MemOperand(esp, 2 * kPointerSize),
+           Immediate(Smi::FromInt(StackFrame::STUB)));
+
+    /* stack layout
+       4: old ebp
+       3: context pointer
+       2: stub marker
+       1: return address
+       0: entry address  <-- esp
+    */
+    __ ret(0);  // Call the continuation without clobbering registers.
+  }
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+      X87Stack copy(code->x87_stack());
+      x87_stack_ = copy;
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        // Build the frame in such a way that esi isn't trashed.
+        __ push(ebp);  // Caller's frame pointer.
+        __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
+        __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
+        __ lea(ebp, Operand(esp, 2 * kPointerSize));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        __ bind(code->done());
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        frame_is_built_ = false;
+        __ mov(esp, ebp);
+        __ pop(ebp);
+      }
+      __ jmp(code->exit());
+    }
+  }
+
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // For lazy deoptimization we need space to patch a call after every call.
+    // Ensure there is always space for such patching, even if the code ends
+    // in a call.
+    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+    while (masm()->pc_offset() < target_offset) {
+      masm()->nop();
+    }
+  }
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int code) const {
+  return Register::from_code(code);
+}
+
+
+X87Register LCodeGen::ToX87Register(int code) const {
+  return X87Register::from_code(code);
+}
+
+
+void LCodeGen::X87LoadForUsage(X87Register reg) {
+  DCHECK(x87_stack_.Contains(reg));
+  x87_stack_.Fxch(reg);
+  x87_stack_.pop();
+}
+
+
+void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
+  DCHECK(x87_stack_.Contains(reg1));
+  DCHECK(x87_stack_.Contains(reg2));
+  if (reg1.is(reg2) && x87_stack_.depth() == 1) {
+    __ fld(x87_stack_.st(reg1));
+    x87_stack_.push(reg1);
+    x87_stack_.pop();
+    x87_stack_.pop();
+  } else {
+    x87_stack_.Fxch(reg1, 1);
+    x87_stack_.Fxch(reg2);
+    x87_stack_.pop();
+    x87_stack_.pop();
+  }
+}
+
+
+int LCodeGen::X87Stack::GetLayout() {
+  int layout = stack_depth_;
+  for (int i = 0; i < stack_depth_; i++) {
+    layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
+  }
+
+  return layout;
+}
+
+
+void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
+  DCHECK(is_mutable_);
+  DCHECK(Contains(reg) && stack_depth_ > other_slot);
+  int i  = ArrayIndex(reg);
+  int st = st2idx(i);
+  if (st != other_slot) {
+    int other_i = st2idx(other_slot);
+    X87Register other = stack_[other_i];
+    stack_[other_i]   = reg;
+    stack_[i]         = other;
+    if (st == 0) {
+      __ fxch(other_slot);
+    } else if (other_slot == 0) {
+      __ fxch(st);
+    } else {
+      __ fxch(st);
+      __ fxch(other_slot);
+      __ fxch(st);
+    }
+  }
+}
+
+
+int LCodeGen::X87Stack::st2idx(int pos) {
+  return stack_depth_ - pos - 1;
+}
+
+
+int LCodeGen::X87Stack::ArrayIndex(X87Register reg) {
+  for (int i = 0; i < stack_depth_; i++) {
+    if (stack_[i].is(reg)) return i;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+bool LCodeGen::X87Stack::Contains(X87Register reg) {
+  for (int i = 0; i < stack_depth_; i++) {
+    if (stack_[i].is(reg)) return true;
+  }
+  return false;
+}
+
+
+void LCodeGen::X87Stack::Free(X87Register reg) {
+  DCHECK(is_mutable_);
+  DCHECK(Contains(reg));
+  int i  = ArrayIndex(reg);
+  int st = st2idx(i);
+  if (st > 0) {
+    // keep track of how fstp(i) changes the order of elements
+    int tos_i = st2idx(0);
+    stack_[i] = stack_[tos_i];
+  }
+  pop();
+  __ fstp(st);
+}
+
+
+void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
+  if (x87_stack_.Contains(dst)) {
+    x87_stack_.Fxch(dst);
+    __ fstp(0);
+  } else {
+    x87_stack_.push(dst);
+  }
+  X87Fld(src, opts);
+}
+
+
+void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
+  if (x87_stack_.Contains(dst)) {
+    x87_stack_.Fxch(dst);
+    __ fstp(0);
+    x87_stack_.pop();
+    // Push ST(i) onto the FPU register stack
+    __ fld(x87_stack_.st(src));
+    x87_stack_.push(dst);
+  } else {
+    // Push ST(i) onto the FPU register stack
+    __ fld(x87_stack_.st(src));
+    x87_stack_.push(dst);
+  }
+}
+
+
+void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
+  DCHECK(!src.is_reg_only());
+  switch (opts) {
+    case kX87DoubleOperand:
+      __ fld_d(src);
+      break;
+    case kX87FloatOperand:
+      __ fld_s(src);
+      break;
+    case kX87IntOperand:
+      __ fild_s(src);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
+  DCHECK(!dst.is_reg_only());
+  x87_stack_.Fxch(src);
+  switch (opts) {
+    case kX87DoubleOperand:
+      __ fst_d(dst);
+      break;
+    case kX87FloatOperand:
+      __ fst_s(dst);
+      break;
+    case kX87IntOperand:
+      __ fist_s(dst);
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
+  DCHECK(is_mutable_);
+  if (Contains(reg)) {
+    Free(reg);
+  }
+  // Mark this register as the next register to write to
+  stack_[stack_depth_] = reg;
+}
+
+
+void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
+  DCHECK(is_mutable_);
+  // Assert the reg is prepared to write, but not on the virtual stack yet
+  DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
+         stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+  stack_depth_++;
+}
+
+
+void LCodeGen::X87PrepareBinaryOp(
+    X87Register left, X87Register right, X87Register result) {
+  // You need to use DefineSameAsFirst for x87 instructions
+  DCHECK(result.is(left));
+  x87_stack_.Fxch(right, 1);
+  x87_stack_.Fxch(left);
+}
+
+
+void LCodeGen::X87Stack::FlushIfNecessary(LInstruction* instr, LCodeGen* cgen) {
+  if (stack_depth_ > 0 && instr->ClobbersDoubleRegisters(isolate())) {
+    bool double_inputs = instr->HasDoubleRegisterInput();
+
+    // Flush stack from tos down, since FreeX87() will mess with tos
+    for (int i = stack_depth_-1; i >= 0; i--) {
+      X87Register reg = stack_[i];
+      // Skip registers which contain the inputs for the next instruction
+      // when flushing the stack
+      if (double_inputs && instr->IsDoubleInput(reg, cgen)) {
+        continue;
+      }
+      Free(reg);
+      if (i < stack_depth_-1) i++;
+    }
+  }
+  if (instr->IsReturn()) {
+    while (stack_depth_ > 0) {
+      __ fstp(0);
+      stack_depth_--;
+    }
+    if (FLAG_debug_code && FLAG_enable_slow_asserts) __ VerifyX87StackDepth(0);
+  }
+}
+
+
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
+                                      LCodeGen* cgen) {
+  // For going to a joined block, an explicit LClobberDoubles is inserted before
+  // LGoto. Because all used x87 registers are spilled to stack slots. The
+  // ResolvePhis phase of register allocator could guarantee the two input's x87
+  // stacks have the same layout. So don't check stack_depth_ <= 1 here.
+  int goto_block_id = goto_instr->block_id();
+  if (current_block_id + 1 != goto_block_id) {
+    // If we have a value on the x87 stack on leaving a block, it must be a
+    // phi input. If the next block we compile is not the join block, we have
+    // to discard the stack state.
+    // Before discarding the stack state, we need to save it if the "goto block"
+    // has unreachable last predecessor when FLAG_unreachable_code_elimination.
+    if (FLAG_unreachable_code_elimination) {
+      int length = goto_instr->block()->predecessors()->length();
+      bool has_unreachable_last_predecessor = false;
+      for (int i = 0; i < length; i++) {
+        HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
+        if (block->IsUnreachable() &&
+            (block->block_id() + 1) == goto_block_id) {
+          has_unreachable_last_predecessor = true;
+        }
+      }
+      if (has_unreachable_last_predecessor) {
+        if (cgen->x87_stack_map_.find(goto_block_id) ==
+            cgen->x87_stack_map_.end()) {
+          X87Stack* stack = new (cgen->zone()) X87Stack(*this);
+          cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
+        }
+      }
+    }
+
+    // Discard the stack state.
+    stack_depth_ = 0;
+  }
+}
+
+
+void LCodeGen::EmitFlushX87ForDeopt() {
+  // The deoptimizer does not support X87 Registers. But as long as we
+  // deopt from a stub its not a problem, since we will re-materialize the
+  // original stub inputs, which can't be double registers.
+  // DCHECK(info()->IsStub());
+  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+    __ pushfd();
+    __ VerifyX87StackDepth(x87_stack_.depth());
+    __ popfd();
+  }
+
+  // Flush X87 stack in the deoptimizer entry.
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+X87Register LCodeGen::ToX87Register(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToX87Register(op->index());
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                   const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  if (r.IsExternal()) {
+    return reinterpret_cast<int32_t>(
+        constant->ExternalReferenceValue().address());
+  }
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<int32_t>(Smi::FromInt(value));
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasExternalReferenceValue());
+  return constant->ExternalReferenceValue();
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize + kPCOnStackSize;
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) const {
+  if (op->IsRegister()) return Operand(ToRegister(op));
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return Operand(ebp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+Operand LCodeGen::HighOperand(LOperand* op) {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return Operand(
+        esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+
+  WriteTranslation(environment->outer(), translation);
+  WriteTranslationFrame(environment, translation);
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(environment,
+                     translation,
+                     value,
+                     environment->HasTaggedValueAt(i),
+                     environment->HasUint32ValueAt(i),
+                     &object_index,
+                     &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    if (is_tagged) {
+      translation->StoreStackSlot(index);
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(index);
+    } else {
+      translation->StoreInt32StackSlot(index);
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    int index = op->index();
+    if (index >= 0) {
+      index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
+    }
+    translation->StoreDoubleStackSlot(index);
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    X87Register reg = ToX87Register(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::BINARY_OP_IC ||
+      code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
+                           LInstruction* instr, SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+  DCHECK(instr->HasPointerMap());
+
+  __ CallRuntime(fun, argc, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+
+  DCHECK(info()->is_calling());
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    if (!ToRegister(context).is(esi)) {
+      __ mov(esi, ToRegister(context));
+    }
+  } else if (context->IsStackSlot()) {
+    __ mov(esi, ToOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+
+  DCHECK(info()->is_calling());
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(
+    LEnvironment* environment, Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason,
+                            Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (DeoptEveryNTimes()) {
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    __ pushfd();
+    __ push(eax);
+    __ mov(eax, Operand::StaticVariable(count));
+    __ sub(eax, Immediate(1));
+    __ j(not_zero, &no_deopt, Label::kNear);
+    if (FLAG_trap_on_deopt) __ int3();
+    __ mov(eax, Immediate(FLAG_deopt_every_n_times));
+    __ mov(Operand::StaticVariable(count), eax);
+    __ pop(eax);
+    __ popfd();
+    DCHECK(frame_is_built_);
+    // Put the x87 stack layout in TOS.
+    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+    __ push(Immediate(x87_stack_.GetLayout()));
+    __ fild_s(MemOperand(esp, 0));
+    // Don't touch eflags.
+    __ lea(esp, Operand(esp, kPointerSize));
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ mov(Operand::StaticVariable(count), eax);
+    __ pop(eax);
+    __ popfd();
+  }
+
+  // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
+  // the correct location.
+  {
+    Label done;
+    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+
+    int x87_stack_layout = x87_stack_.GetLayout();
+    __ push(Immediate(x87_stack_layout));
+    __ fild_s(MemOperand(esp, 0));
+    // Don't touch eflags.
+    __ lea(esp, Operand(esp, kPointerSize));
+    __ bind(&done);
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    Label done;
+    if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
+    __ int3();
+    __ bind(&done);
+  }
+
+  Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
+
+  DCHECK(info()->IsStub() || frame_is_built_);
+  if (cc == no_condition && frame_is_built_) {
+    DeoptComment(deopt_info);
+    __ call(entry, RelocInfo::RUNTIME_ENTRY);
+    info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
+        jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    if (cc == no_condition) {
+      __ jmp(&jump_table_.last().label);
+    } else {
+      __ j(cc, &jump_table_.last().label);
+    }
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            Deoptimizer::DeoptReason deopt_reason) {
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  DCHECK(kind == expected_safepoint_kind_);
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  if (label->block()->predecessors()->length() > 1) {
+    // A join block's x87 stack is that of its last visited predecessor.
+    // If the last visited predecessor block is unreachable, the stack state
+    // will be wrong. In such case, use the x87 stack of reachable predecessor.
+    X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
+    // Restore x87 stack.
+    if (it != x87_stack_map_.end()) {
+      x87_stack_ = *(it->second);
+    }
+  }
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ test(dividend, dividend);
+    __ j(not_sign, &dividend_is_not_negative, Label::kNear);
+    // Note that this is correct even for kMinInt operands.
+    __ neg(dividend);
+    __ and_(dividend, mask);
+    __ neg(dividend);
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+    }
+    __ jmp(&done, Label::kNear);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ and_(dividend, mask);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  __ TruncatingDiv(dividend, Abs(divisor));
+  __ imul(edx, edx, Abs(divisor));
+  __ mov(eax, dividend);
+  __ sub(eax, edx);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ j(not_zero, &remainder_not_zero, Label::kNear);
+    __ cmp(dividend, Immediate(0));
+    DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+
+  Register left_reg = ToRegister(instr->left());
+  DCHECK(left_reg.is(eax));
+  Register right_reg = ToRegister(instr->right());
+  DCHECK(!right_reg.is(eax));
+  DCHECK(!right_reg.is(edx));
+  Register result_reg = ToRegister(instr->result());
+  DCHECK(result_reg.is(edx));
+
+  Label done;
+  // Check for x % 0, idiv would signal a divide error. We have to
+  // deopt in this case because we can't return a NaN.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(right_reg, Operand(right_reg));
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for kMinInt % -1, idiv would signal a divide error. We
+  // have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ cmp(left_reg, kMinInt);
+    __ j(not_equal, &no_overflow_possible, Label::kNear);
+    __ cmp(right_reg, -1);
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
+    } else {
+      __ j(not_equal, &no_overflow_possible, Label::kNear);
+      __ Move(result_reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  // Sign extend dividend in eax into edx:eax.
+  __ cdq();
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label positive_left;
+    __ test(left_reg, Operand(left_reg));
+    __ j(not_sign, &positive_left, Label::kNear);
+    __ idiv(right_reg);
+    __ test(result_reg, Operand(result_reg));
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+    __ jmp(&done, Label::kNear);
+    __ bind(&positive_left);
+  }
+  __ idiv(right_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ test(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    __ cmp(dividend, kMinInt);
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ test(dividend, Immediate(mask));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+  }
+  __ Move(result, dividend);
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (shift > 0) {
+    // The arithmetic shift is always OK, the 'if' is an optimization only.
+    if (shift > 1) __ sar(result, 31);
+    __ shr(result, 32 - shift);
+    __ add(result, dividend);
+    __ sar(result, shift);
+  }
+  if (divisor < 0) __ neg(result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(edx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ test(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  __ TruncatingDiv(dividend, Abs(divisor));
+  if (divisor < 0) __ neg(edx);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ mov(eax, edx);
+    __ imul(eax, eax, divisor);
+    __ sub(eax, dividend);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  Register remainder = ToRegister(instr->temp());
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(divisor, divisor);
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ test(dividend, dividend);
+    __ j(not_zero, &dividend_not_zero, Label::kNear);
+    __ test(divisor, divisor);
+    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label dividend_not_min_int;
+    __ cmp(dividend, kMinInt);
+    __ j(not_zero, &dividend_not_min_int, Label::kNear);
+    __ cmp(divisor, -1);
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+    __ bind(&dividend_not_min_int);
+  }
+
+  // Sign extend to edx (= remainder).
+  __ cdq();
+  __ idiv(divisor);
+
+  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    __ test(remainder, remainder);
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
+  }
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  if (divisor == 1) return;
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ sar(dividend, shift);
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  __ neg(dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ sar(dividend, shift);
+    return;
+  }
+
+  Label not_kmin_int, done;
+  __ j(no_overflow, &not_kmin_int, Label::kNear);
+  __ mov(dividend, Immediate(kMinInt / divisor));
+  __ jmp(&done, Label::kNear);
+  __ bind(&not_kmin_int);
+  __ sar(dividend, shift);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(ToRegister(instr->result()).is(edx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ test(dividend, dividend);
+    DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(dividend, Abs(divisor));
+    if (divisor < 0) __ neg(edx);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp3());
+  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+  Label needs_adjustment, done;
+  __ cmp(dividend, Immediate(0));
+  __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
+  __ TruncatingDiv(dividend, Abs(divisor));
+  if (divisor < 0) __ neg(edx);
+  __ jmp(&done, Label::kNear);
+  __ bind(&needs_adjustment);
+  __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(temp, Abs(divisor));
+  if (divisor < 0) __ neg(edx);
+  __ dec(edx);
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  Register remainder = ToRegister(instr->temp());
+  Register result = ToRegister(instr->result());
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(result.is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ test(divisor, divisor);
+    DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ test(dividend, dividend);
+    __ j(not_zero, &dividend_not_zero, Label::kNear);
+    __ test(divisor, divisor);
+    DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label dividend_not_min_int;
+    __ cmp(dividend, kMinInt);
+    __ j(not_zero, &dividend_not_min_int, Label::kNear);
+    __ cmp(divisor, -1);
+    DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
+    __ bind(&dividend_not_min_int);
+  }
+
+  // Sign extend to edx (= remainder).
+  __ cdq();
+  __ idiv(divisor);
+
+  Label done;
+  __ test(remainder, remainder);
+  __ j(zero, &done, Label::kNear);
+  __ xor_(remainder, divisor);
+  __ sar(remainder, 31);
+  __ add(result, remainder);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register left = ToRegister(instr->left());
+  LOperand* right = instr->right();
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ mov(ToRegister(instr->temp()), left);
+  }
+
+  if (right->IsConstantOperand()) {
+    // Try strength reductions on the multiplication.
+    // All replacement instructions are at most as long as the imul
+    // and have better latency.
+    int constant = ToInteger32(LConstantOperand::cast(right));
+    if (constant == -1) {
+      __ neg(left);
+    } else if (constant == 0) {
+      __ xor_(left, Operand(left));
+    } else if (constant == 2) {
+      __ add(left, Operand(left));
+    } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      // If we know that the multiplication can't overflow, it's safe to
+      // use instructions that don't set the overflow flag for the
+      // multiplication.
+      switch (constant) {
+        case 1:
+          // Do nothing.
+          break;
+        case 3:
+          __ lea(left, Operand(left, left, times_2, 0));
+          break;
+        case 4:
+          __ shl(left, 2);
+          break;
+        case 5:
+          __ lea(left, Operand(left, left, times_4, 0));
+          break;
+        case 8:
+          __ shl(left, 3);
+          break;
+        case 9:
+          __ lea(left, Operand(left, left, times_8, 0));
+          break;
+        case 16:
+          __ shl(left, 4);
+          break;
+        default:
+          __ imul(left, left, constant);
+          break;
+      }
+    } else {
+      __ imul(left, left, constant);
+    }
+  } else {
+    if (instr->hydrogen()->representation().IsSmi()) {
+      __ SmiUntag(left);
+    }
+    __ imul(left, ToOperand(right));
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Bail out if the result is supposed to be negative zero.
+    Label done;
+    __ test(left, Operand(left));
+    __ j(not_zero, &done);
+    if (right->IsConstantOperand()) {
+      if (ToInteger32(LConstantOperand::cast(right)) < 0) {
+        DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+      } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
+        __ cmp(ToRegister(instr->temp()), Immediate(0));
+        DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
+      }
+    } else {
+      // Test the non-zero operand for negative sign.
+      __ or_(ToRegister(instr->temp()), ToOperand(right));
+      DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
+
+  if (right->IsConstantOperand()) {
+    int32_t right_operand =
+        ToRepresentation(LConstantOperand::cast(right),
+                         instr->hydrogen()->representation());
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), right_operand);
+        break;
+      case Token::BIT_XOR:
+        if (right_operand == int32_t(~0)) {
+          __ not_(ToRegister(left));
+        } else {
+          __ xor_(ToRegister(left), right_operand);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    switch (instr->op()) {
+      case Token::BIT_AND:
+        __ and_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_OR:
+        __ or_(ToRegister(left), ToOperand(right));
+        break;
+      case Token::BIT_XOR:
+        __ xor_(ToRegister(left), ToOperand(right));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
+  if (right->IsRegister()) {
+    DCHECK(ToRegister(right).is(ecx));
+
+    switch (instr->op()) {
+      case Token::ROR:
+        __ ror_cl(ToRegister(left));
+        break;
+      case Token::SAR:
+        __ sar_cl(ToRegister(left));
+        break;
+      case Token::SHR:
+        __ shr_cl(ToRegister(left));
+        if (instr->can_deopt()) {
+          __ test(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case Token::SHL:
+        __ shl_cl(ToRegister(left));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    int value = ToInteger32(LConstantOperand::cast(right));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count == 0 && instr->can_deopt()) {
+          __ test(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+        } else {
+          __ ror(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sar(ToRegister(left), shift_count);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ shr(ToRegister(left), shift_count);
+        } else if (instr->can_deopt()) {
+          __ test(ToRegister(left), ToRegister(left));
+          DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          if (instr->hydrogen_value()->representation().IsSmi() &&
+              instr->can_deopt()) {
+            if (shift_count != 1) {
+              __ shl(ToRegister(left), shift_count - 1);
+            }
+            __ SmiTag(ToRegister(left));
+            DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+          } else {
+            __ shl(ToRegister(left), shift_count);
+          }
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+
+  if (right->IsConstantOperand()) {
+    __ sub(ToOperand(left),
+           ToImmediate(right, instr->hydrogen()->representation()));
+  } else {
+    __ sub(ToRegister(left), ToOperand(right));
+  }
+  if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ Move(ToRegister(instr->result()), Immediate(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  uint64_t const bits = instr->bits();
+  uint32_t const lower = static_cast<uint32_t>(bits);
+  uint32_t const upper = static_cast<uint32_t>(bits >> 32);
+  DCHECK(instr->result()->IsDoubleRegister());
+
+  __ push(Immediate(upper));
+  __ push(Immediate(lower));
+  X87Register reg = ToX87Register(instr->result());
+  X87Mov(reg, Operand(esp, 0));
+  __ add(Operand(esp), Immediate(kDoubleSize));
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Register reg = ToRegister(instr->result());
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ LoadObject(reg, object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+Operand LCodeGen::BuildSeqStringOperand(Register string,
+                                        LOperand* index,
+                                        String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToRepresentation(LConstantOperand::cast(index),
+                                  Representation::Integer32());
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldOperand(string, SeqString::kHeaderSize + offset);
+  }
+  return FieldOperand(
+      string, ToRegister(index),
+      encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
+      SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register result = ToRegister(instr->result());
+  Register string = ToRegister(instr->string());
+
+  if (FLAG_debug_code) {
+    __ push(string);
+    __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
+    __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
+
+    __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
+                             ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(equal, kUnexpectedStringType);
+    __ pop(string);
+  }
+
+  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ movzx_b(result, operand);
+  } else {
+    __ movzx_w(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+
+  if (FLAG_debug_code) {
+    Register value = ToRegister(instr->value());
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+  }
+
+  Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (instr->value()->IsConstantOperand()) {
+    int value = ToRepresentation(LConstantOperand::cast(instr->value()),
+                                 Representation::Integer32());
+    DCHECK_LE(0, value);
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      DCHECK_LE(value, String::kMaxOneByteCharCode);
+      __ mov_b(operand, static_cast<int8_t>(value));
+    } else {
+      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
+      __ mov_w(operand, static_cast<int16_t>(value));
+    }
+  } else {
+    Register value = ToRegister(instr->value());
+    if (encoding == String::ONE_BYTE_ENCODING) {
+      __ mov_b(operand, value);
+    } else {
+      __ mov_w(operand, value);
+    }
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+
+  if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
+    if (right->IsConstantOperand()) {
+      int32_t offset = ToRepresentation(LConstantOperand::cast(right),
+                                        instr->hydrogen()->representation());
+      __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
+    } else {
+      Operand address(ToRegister(left), ToRegister(right), times_1, 0);
+      __ lea(ToRegister(instr->result()), address);
+    }
+  } else {
+    if (right->IsConstantOperand()) {
+      __ add(ToOperand(left),
+             ToImmediate(right, instr->hydrogen()->representation()));
+    } else {
+      __ add(ToRegister(left), ToOperand(right));
+    }
+    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
+      DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+    }
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  DCHECK(left->Equals(instr->result()));
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Label return_left;
+    Condition condition = (operation == HMathMinMax::kMathMin)
+        ? less_equal
+        : greater_equal;
+    if (right->IsConstantOperand()) {
+      Operand left_op = ToOperand(left);
+      Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
+                                        instr->hydrogen()->representation());
+      __ cmp(left_op, immediate);
+      __ j(condition, &return_left, Label::kNear);
+      __ mov(left_op, immediate);
+    } else {
+      Register left_reg = ToRegister(left);
+      Operand right_op = ToOperand(right);
+      __ cmp(left_reg, right_op);
+      __ j(condition, &return_left, Label::kNear);
+      __ mov(left_reg, right_op);
+    }
+    __ bind(&return_left);
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    Label check_nan_left, check_zero, return_left, return_right;
+    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+    X87Register left_reg = ToX87Register(left);
+    X87Register right_reg = ToX87Register(right);
+
+    X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
+    __ fld(1);
+    __ fld(1);
+    __ FCmp();
+    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
+    __ j(equal, &check_zero, Label::kNear);            // left == right.
+    __ j(condition, &return_left, Label::kNear);
+    __ jmp(&return_right, Label::kNear);
+
+    __ bind(&check_zero);
+    __ fld(0);
+    __ fldz();
+    __ FCmp();
+    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+      // load it to left.
+      Register scratch_reg = ToRegister(instr->temp());
+      __ fld(1);
+      __ fld(1);
+      __ sub(esp, Immediate(2 * kPointerSize));
+      __ fstp_s(MemOperand(esp, 0));
+      __ fstp_s(MemOperand(esp, kPointerSize));
+      __ pop(scratch_reg);
+      __ xor_(MemOperand(esp, 0), scratch_reg);
+      X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
+      __ pop(scratch_reg);  // restore esp
+    } else {
+      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+      X87Fxch(left_reg);
+      __ fadd(1);
+    }
+    __ jmp(&return_left, Label::kNear);
+
+    __ bind(&check_nan_left);
+    __ fld(0);
+    __ fld(0);
+    __ FCmp();                                      // NaN check.
+    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
+
+    __ bind(&return_right);
+    X87Fxch(left_reg);
+    X87Mov(left_reg, right_reg);
+
+    __ bind(&return_left);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  X87Register left = ToX87Register(instr->left());
+  X87Register right = ToX87Register(instr->right());
+  X87Register result = ToX87Register(instr->result());
+  if (instr->op() != Token::MOD) {
+    X87PrepareBinaryOp(left, right, result);
+  }
+  // Set the precision control to double-precision.
+  __ X87SetFPUCW(0x027F);
+  switch (instr->op()) {
+    case Token::ADD:
+      __ fadd_i(1);
+      break;
+    case Token::SUB:
+      __ fsub_i(1);
+      break;
+    case Token::MUL:
+      __ fmul_i(1);
+      break;
+    case Token::DIV:
+      __ fdiv_i(1);
+      break;
+    case Token::MOD: {
+      // Pass two doubles as arguments on the stack.
+      __ PrepareCallCFunction(4, eax);
+      X87Mov(Operand(esp, 1 * kDoubleSize), right);
+      X87Mov(Operand(esp, 0), left);
+      X87Free(right);
+      DCHECK(left.is(result));
+      X87PrepareToWrite(result);
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()),
+          4);
+
+      // Return value is in st(0) on ia32.
+      X87CommitWrite(result);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+
+  // Restore the default value of control word.
+  __ X87SetFPUCW(0x037F);
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block || cc == no_condition) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
+  } else if (right_block == next_block) {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(left_block));
+    __ jmp(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
+  int true_block = instr->TrueDestination(chunk_);
+  if (cc == no_condition) {
+    __ jmp(chunk_->GetAssemblyLabel(true_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(true_block));
+  }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
+  int false_block = instr->FalseDestination(chunk_);
+  if (cc == no_condition) {
+    __ jmp(chunk_->GetAssemblyLabel(false_block));
+  } else {
+    __ j(cc, chunk_->GetAssemblyLabel(false_block));
+  }
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsSmiOrInteger32()) {
+    Register reg = ToRegister(instr->value());
+    __ test(reg, Operand(reg));
+    EmitBranch(instr, not_zero);
+  } else if (r.IsDouble()) {
+    X87Register reg = ToX87Register(instr->value());
+    X87LoadForUsage(reg);
+    __ fldz();
+    __ FCmp();
+    EmitBranch(instr, not_zero);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ cmp(reg, factory()->true_value());
+      EmitBranch(instr, equal);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      __ test(reg, Operand(reg));
+      EmitBranch(instr, not_equal);
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, no_condition);
+    } else if (type.IsHeapNumber()) {
+      UNREACHABLE();
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+      EmitBranch(instr, not_equal);
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ cmp(reg, factory()->undefined_value());
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // true -> true.
+        __ cmp(reg, factory()->true_value());
+        __ j(equal, instr->TrueLabel(chunk_));
+        // false -> false.
+        __ cmp(reg, factory()->false_value());
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ cmp(reg, factory()->null_value());
+        __ j(equal, instr->FalseLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ test(reg, Operand(reg));
+        __ j(equal, instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ test(reg, Immediate(kSmiTagMask));
+        DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+      }
+
+      Register map = no_reg;  // Keep the compiler happy.
+      if (expected.NeedsMap()) {
+        map = ToRegister(instr->temp());
+        DCHECK(!map.is(reg));
+        __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+                    1 << Map::kIsUndetectable);
+          __ j(not_zero, instr->FalseLabel(chunk_));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
+        __ j(above_equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
+        __ j(above_equal, &not_string, Label::kNear);
+        __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
+        __ j(not_zero, instr->TrueLabel(chunk_));
+        __ jmp(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CmpInstanceType(map, SYMBOL_TYPE);
+        __ j(equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
+        // SIMD value -> true.
+        __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
+        __ j(equal, instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
+               factory()->heap_number_map());
+        __ j(not_equal, &not_heap_number, Label::kNear);
+        __ fldz();
+        __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
+        __ FCmp();
+        __ j(zero, instr->FalseLabel(chunk_));
+        __ jmp(instr->TrueLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoClobberDoubles(LClobberDoubles* instr) {
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = no_condition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = equal;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = not_equal;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? below : less;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? above : greater;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? below_equal : less_equal;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? above_equal : greater_equal;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->is_double() ||
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cc = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      X87LoadForUsage(ToX87Register(right), ToX87Register(left));
+      __ FCmp();
+      // Don't base result on EFLAGS when a NaN is involved. Instead
+      // jump to the false block.
+      __ j(parity_even, instr->FalseLabel(chunk_));
+    } else {
+      if (right->IsConstantOperand()) {
+        __ cmp(ToOperand(left),
+               ToImmediate(right, instr->hydrogen()->representation()));
+      } else if (left->IsConstantOperand()) {
+        __ cmp(ToOperand(right),
+               ToImmediate(left, instr->hydrogen()->representation()));
+        // We commuted the operands, so commute the condition.
+        cc = CommuteCondition(cc);
+      } else {
+        __ cmp(ToRegister(left), ToOperand(right));
+      }
+    }
+    EmitBranch(instr, cc);
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+
+  if (instr->right()->IsConstantOperand()) {
+    Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
+    __ CmpObject(left, right);
+  } else {
+    Operand right = ToOperand(instr->right());
+    __ cmp(left, right);
+  }
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ cmp(input_reg, factory()->the_hole_value());
+    EmitBranch(instr, equal);
+    return;
+  }
+
+  // Put the value to the top of stack
+  X87Register src = ToX87Register(instr->object());
+  X87LoadForUsage(src);
+  __ fld(0);
+  __ fld(0);
+  __ FCmp();
+  Label ok;
+  __ j(parity_even, &ok, Label::kNear);
+  __ fstp(0);
+  EmitFalseBranch(instr, no_condition);
+  __ bind(&ok);
+
+
+  __ sub(esp, Immediate(kDoubleSize));
+  __ fstp_d(MemOperand(esp, 0));
+
+  __ add(esp, Immediate(kDoubleSize));
+  int offset = sizeof(kHoleNanUpper32);
+  // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
+  // so we check the upper with 0xffffffff for hole as a temporary fix.
+  __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+
+  if (rep.IsDouble()) {
+    X87Register input = ToX87Register(instr->value());
+    X87LoadForUsage(input);
+    __ FXamMinusZero();
+    EmitBranch(instr, equal);
+  } else {
+    Register value = ToRegister(instr->value());
+    Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
+    __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
+    __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
+           Immediate(0x1));
+    EmitFalseBranch(instr, no_overflow);
+    __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
+           Immediate(0x00000000));
+    EmitBranch(instr, equal);
+  }
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+
+  Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
+
+  return cond;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+
+  Condition true_cond = EmitIsString(
+      reg, temp, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Operand input = ToOperand(instr->value());
+
+  __ test(input, Immediate(kSmiTagMask));
+  EmitBranch(instr, zero);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+  __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
+            1 << Map::kIsUndetectable);
+  EmitBranch(instr, not_zero);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+
+  Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  __ test(eax, eax);
+
+  EmitBranch(instr, ComputeCompareCondition(instr->op()));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return equal;
+  if (to == LAST_TYPE) return above_equal;
+  if (from == FIRST_TYPE) return below_equal;
+  UNREACHABLE();
+  return equal;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ mov(result, FieldOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  __ test(FieldOperand(input, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  EmitBranch(instr, equal);
+}
+
+
+// Branches to a label or falls through with the answer in the z flag.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+  __ JumpIfSmi(input, is_false);
+
+  __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    __ j(equal, is_true);
+  } else {
+    __ j(equal, is_false);
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  __ GetMapConstructor(temp, temp, temp2);
+  // Objects with a non-function constructor have class 'Object'.
+  __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
+    __ j(not_equal, is_true);
+  } else {
+    __ j(not_equal, is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ mov(temp, FieldOperand(temp,
+                            SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  __ cmp(temp, class_name);
+  // End with the answer in the z flag.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+      class_name, input, temp, temp2);
+
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
+  EmitBranch(instr, equal);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
+  DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  InstanceOfStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoHasInPrototypeChainAndBranch(
+    LHasInPrototypeChainAndBranch* instr) {
+  Register const object = ToRegister(instr->object());
+  Register const object_map = ToRegister(instr->scratch());
+  Register const object_prototype = object_map;
+  Register const prototype = ToRegister(instr->prototype());
+
+  // The {object} must be a spec object.  It's sufficient to know that {object}
+  // is not a smi, since all other non-spec objects have {null} prototypes and
+  // will be ruled out below.
+  if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
+    __ test(object, Immediate(kSmiTagMask));
+    EmitFalseBranch(instr, zero);
+  }
+
+  // Loop through the {object}s prototype chain looking for the {prototype}.
+  __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
+  Label loop;
+  __ bind(&loop);
+
+  // Deoptimize if the object needs to be access checked.
+  __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
+            1 << Map::kIsAccessCheckNeeded);
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
+  // Deoptimize for proxies.
+  __ CmpInstanceType(object_map, JS_PROXY_TYPE);
+  DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
+
+  __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
+  __ cmp(object_prototype, prototype);
+  EmitTrueBranch(instr, equal);
+  __ cmp(object_prototype, factory()->null_value());
+  EmitFalseBranch(instr, equal);
+  __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
+  __ jmp(&loop);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  Token::Value op = instr->op();
+
+  Handle<Code> ic =
+      CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+  Label true_value, done;
+  __ test(eax, Operand(eax));
+  __ j(condition, &true_value, Label::kNear);
+  __ mov(ToRegister(instr->result()), factory()->false_value());
+  __ jmp(&done, Label::kNear);
+  __ bind(&true_value);
+  __ mov(ToRegister(instr->result()), factory()->true_value());
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
+  int extra_value_count = dynamic_frame_alignment ? 2 : 1;
+
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    if (dynamic_frame_alignment && FLAG_debug_code) {
+      __ cmp(Operand(esp,
+                     (parameter_count + extra_value_count) * kPointerSize),
+             Immediate(kAlignmentZapValue));
+      __ Assert(equal, kExpectedAlignmentMarker);
+    }
+    __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
+  } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    __ SmiUntag(reg);
+    Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
+    if (dynamic_frame_alignment && FLAG_debug_code) {
+      DCHECK(extra_value_count == 2);
+      __ cmp(Operand(esp, reg, times_pointer_size,
+                     extra_value_count * kPointerSize),
+             Immediate(kAlignmentZapValue));
+      __ Assert(equal, kExpectedAlignmentMarker);
+    }
+
+    // emit code to restore stack based on instr->parameter_count()
+    __ pop(return_addr_reg);  // save return address
+    if (dynamic_frame_alignment) {
+      __ inc(reg);  // 1 more for alignment
+    }
+    __ shl(reg, kPointerSizeLog2);
+    __ add(esp, reg);
+    __ jmp(return_addr_reg);
+  }
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Preserve the return value on the stack and rely on the runtime call
+    // to return the value in the same register.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(eax);
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit);
+  }
+  if (dynamic_frame_alignment_) {
+    // Fetch the state of the dynamic frame alignment.
+    __ mov(edx, Operand(ebp,
+      JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
+  }
+  if (NeedsEagerFrame()) {
+    __ mov(esp, ebp);
+    __ pop(ebp);
+  }
+  if (dynamic_frame_alignment_) {
+    Label no_padding;
+    __ cmp(edx, Immediate(kNoAlignmentPadding));
+    __ j(equal, &no_padding, Label::kNear);
+
+    EmitReturn(instr, true);
+    __ bind(&no_padding);
+  }
+
+  EmitReturn(instr, false);
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = LoadWithVectorDescriptor::SlotRegister();
+  DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(eax));
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ mov(vector_register, vector);
+  // No need to allocate this register.
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Immediate(Smi::FromInt(index)));
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
+  Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = ToRegister(instr->temp_slot());
+
+  AllowDeferredHandleDereference vector_structure_check;
+  Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+  __ mov(vector_register, vector);
+  FeedbackVectorSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Immediate(Smi::FromInt(index)));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
+                                         SLOPPY, PREMONOMORPHIC).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ mov(result, ContextOperand(context, instr->slot_index()));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(result, factory()->the_hole_value());
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    } else {
+      Label is_not_hole;
+      __ j(not_equal, &is_not_hole, Label::kNear);
+      __ mov(result, factory()->undefined_value());
+      __ bind(&is_not_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+
+  Label skip_assignment;
+
+  Operand target = ContextOperand(context, instr->slot_index());
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ cmp(target, factory()->the_hole_value());
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    } else {
+      __ j(not_equal, &skip_assignment, Label::kNear);
+    }
+  }
+
+  __ mov(target, value);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    Register temp = ToRegister(instr->temp());
+    int offset = Context::SlotOffset(instr->slot_index());
+    __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
+                              EMIT_REMEMBERED_SET, check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = instr->object()->IsConstantOperand()
+        ? MemOperand::StaticVariable(ToExternalReference(
+                LConstantOperand::cast(instr->object())))
+        : MemOperand(ToRegister(instr->object()), offset);
+    __ Load(result, operand, access.representation());
+    return;
+  }
+
+  Register object = ToRegister(instr->object());
+  if (instr->hydrogen()->representation().IsDouble()) {
+    X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+  __ Load(result, FieldOperand(object, offset), access.representation());
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+  DCHECK(!operand->IsDoubleRegister());
+  if (operand->IsConstantOperand()) {
+    Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+    AllowDeferredHandleDereference smi_check;
+    if (object->IsSmi()) {
+      __ Push(Handle<Smi>::cast(object));
+    } else {
+      __ PushHeapObject(Handle<HeapObject>::cast(object));
+    }
+  } else if (operand->IsRegister()) {
+    __ push(ToRegister(operand));
+  } else {
+    __ push(ToOperand(operand));
+  }
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  Handle<Code> ic =
+      CodeFactory::LoadICInOptimizedCode(
+          isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
+          instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register function = ToRegister(instr->function());
+  Register temp = ToRegister(instr->temp());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ mov(result,
+         FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
+  DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ CmpObjectType(result, MAP_TYPE, temp);
+  __ j(not_equal, &done, Label::kNear);
+
+  // Get the prototype from the initial map.
+  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  if (instr->length()->IsConstantOperand() &&
+      instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    int index = (const_length - const_index) + 1;
+    __ mov(result, Operand(arguments, index * kPointerSize));
+  } else {
+    Register length = ToRegister(instr->length());
+    Operand index = ToOperand(instr->index());
+    // There are two words between the frame pointer and the last argument.
+    // Subtracting from length accounts for one of them add one more.
+    __ sub(length, index);
+    __ mov(result, Operand(arguments, length, times_4, kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = instr->key();
+  if (!key->IsConstantOperand() &&
+      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+                                  elements_kind)) {
+    __ SmiUntag(ToRegister(key));
+  }
+  Operand operand(BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      elements_kind,
+      instr->base_offset()));
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    X87Mov(ToX87Register(instr->result()), operand);
+  } else {
+    Register result(ToRegister(instr->result()));
+    switch (elements_kind) {
+      case INT8_ELEMENTS:
+        __ movsx_b(result, operand);
+        break;
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ movzx_b(result, operand);
+        break;
+      case INT16_ELEMENTS:
+        __ movsx_w(result, operand);
+        break;
+      case UINT16_ELEMENTS:
+        __ movzx_w(result, operand);
+        break;
+      case INT32_ELEMENTS:
+        __ mov(result, operand);
+        break;
+      case UINT32_ELEMENTS:
+        __ mov(result, operand);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          __ test(result, Operand(result));
+          DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    Operand hole_check_operand = BuildFastArrayOperand(
+        instr->elements(), instr->key(),
+        instr->hydrogen()->key()->representation(),
+        FAST_DOUBLE_ELEMENTS,
+        instr->base_offset() + sizeof(kHoleNanLower32));
+    __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
+    DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+  }
+
+  Operand double_load_operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      instr->hydrogen()->key()->representation(),
+      FAST_DOUBLE_ELEMENTS,
+      instr->base_offset());
+  X87Mov(ToX87Register(instr->result()), double_load_operand);
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  Register result = ToRegister(instr->result());
+
+  // Load the result.
+  __ mov(result,
+         BuildFastArrayOperand(instr->elements(), instr->key(),
+                               instr->hydrogen()->key()->representation(),
+                               FAST_ELEMENTS, instr->base_offset()));
+
+  // Check for the hole value.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+      __ test(result, Immediate(kSmiTagMask));
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
+    } else {
+      __ cmp(result, factory()->the_hole_value());
+      DeoptimizeIf(equal, instr, Deoptimizer::kHole);
+    }
+  } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
+    Label done;
+    __ cmp(result, factory()->the_hole_value());
+    __ j(not_equal, &done);
+    if (info()->IsStub()) {
+      // A stub can safely convert the hole to undefined only if the array
+      // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
+      // it needs to bail out.
+      __ mov(result, isolate()->factory()->array_protector());
+      __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
+             Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
+    }
+    __ mov(result, isolate()->factory()->undefined_value());
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_fixed_typed_array()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+Operand LCodeGen::BuildFastArrayOperand(
+    LOperand* elements_pointer,
+    LOperand* key,
+    Representation key_representation,
+    ElementsKind elements_kind,
+    uint32_t base_offset) {
+  Register elements_pointer_reg = ToRegister(elements_pointer);
+  int element_shift_size = ElementsKindToShiftSize(elements_kind);
+  int shift_size = element_shift_size;
+  if (key->IsConstantOperand()) {
+    int constant_value = ToInteger32(LConstantOperand::cast(key));
+    if (constant_value & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    return Operand(elements_pointer_reg,
+                   ((constant_value) << shift_size)
+                       + base_offset);
+  } else {
+    // Take the tag bit into account while computing the shift size.
+    if (key_representation.IsSmi() && (shift_size >= 1)) {
+      shift_size -= kSmiTagSize;
+    }
+    ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
+    return Operand(elements_pointer_reg,
+                   ToRegister(key),
+                   scale_factor,
+                   base_offset);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
+                        isolate(), instr->hydrogen()->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ lea(result, Operand(esp, -2 * kPointerSize));
+  } else {
+    // Check for arguments adapter frame.
+    Label done, adapted;
+    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+    __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
+    __ cmp(Operand(result),
+           Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+    __ j(equal, &adapted, Label::kNear);
+
+    // No arguments adaptor frame.
+    __ mov(result, Operand(ebp));
+    __ jmp(&done, Label::kNear);
+
+    // Arguments adaptor frame present.
+    __ bind(&adapted);
+    __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Operand elem = ToOperand(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ cmp(ebp, elem);
+  __ mov(result, Immediate(scope()->num_parameters()));
+  __ j(equal, &done, Label::kNear);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
+  __ mov(result, Operand(result,
+                         ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label receiver_ok, global_object;
+  Register scratch = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions.
+    __ mov(scratch,
+           FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
+              1 << SharedFunctionInfo::kStrictModeBitWithinByte);
+    __ j(not_equal, &receiver_ok);
+
+    // Do not transform the receiver to object for builtins.
+    __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
+              1 << SharedFunctionInfo::kNativeBitWithinByte);
+    __ j(not_equal, &receiver_ok);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ cmp(receiver, factory()->null_value());
+  __ j(equal, &global_object);
+  __ cmp(receiver, factory()->undefined_value());
+  __ j(equal, &global_object);
+
+  // The receiver should be a JS object.
+  __ test(receiver, Immediate(kSmiTagMask));
+  DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+  __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
+  DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
+
+  __ jmp(&receiver_ok, Label::kNear);
+  __ bind(&global_object);
+  __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
+  __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
+  __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
+  __ bind(&receiver_ok);
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  DCHECK(receiver.is(eax));  // Used for parameter count.
+  DCHECK(function.is(edi));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmp(length, kArgumentsLimit);
+  DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
+
+  __ push(receiver);
+  __ mov(receiver, length);
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ test(length, Operand(length));
+  __ j(zero, &invoke, Label::kNear);
+  __ bind(&loop);
+  __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
+  __ dec(length);
+  __ j(not_zero, &loop);
+
+  // Invoke the function.
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  ParameterCount actual(eax);
+  __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
+                    safepoint_generator);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ int3();
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  EmitPushTaggedOperand(argument);
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in esi.
+    DCHECK(result.is(esi));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  __ push(Immediate(instr->hydrogen()->pairs()));
+  __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
+  CallRuntime(Runtime::kDeclareGlobals, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  Register function_reg = edi;
+
+  if (can_invoke_directly) {
+    // Change context.
+    __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
+
+    // Always initialize new target and number of actual arguments.
+    __ mov(edx, factory()->undefined_value());
+    __ mov(eax, arity);
+
+    // Invoke function directly.
+    if (function.is_identical_to(info()->closure())) {
+      __ CallSelf();
+    } else {
+      __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
+    }
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    // We need to adapt arguments.
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ jmp(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ jmp(target);
+    }
+  } else {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(Operand(target)));
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ call(target);
+    }
+    generator.AfterCall();
+  }
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  // Change context.
+  __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
+
+  // Always initialize new target and number of actual arguments.
+  __ mov(edx, factory()->undefined_value());
+  __ mov(eax, instr->arity());
+
+  bool is_self_call = false;
+  if (instr->hydrogen()->function()->IsConstant()) {
+    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+    Handle<JSFunction> jsfun =
+      Handle<JSFunction>::cast(fun_const->handle(isolate()));
+    is_self_call = jsfun.is_identical_to(info()->closure());
+  }
+
+  if (is_self_call) {
+    __ CallSelf();
+  } else {
+    __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
+  }
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  Register input_reg = ToRegister(instr->value());
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+
+  Label slow, allocated, done;
+  Register tmp = input_reg.is(eax) ? ecx : eax;
+  Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
+
+  // Preserve the value of all registers.
+  PushSafepointRegistersScope scope(this);
+
+  __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it. We do not need to patch the stack since |input| and
+  // |result| are the same register and |input| will be restored
+  // unchanged by popping safepoint registers.
+  __ test(tmp, Immediate(HeapNumber::kSignMask));
+  __ j(zero, &done, Label::kNear);
+
+  __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
+  __ jmp(&allocated, Label::kNear);
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
+                          instr, instr->context());
+  // Set the pointer to the new heap number in tmp.
+  if (!tmp.is(eax)) __ mov(tmp, eax);
+  // Restore input_reg after call to runtime.
+  __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
+
+  __ bind(&allocated);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  __ and_(tmp2, ~HeapNumber::kSignMask);
+  __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
+  __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+  __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
+  __ StoreToSafepointRegisterSlot(input_reg, tmp);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+  Register input_reg = ToRegister(instr->value());
+  __ test(input_reg, Operand(input_reg));
+  Label is_positive;
+  __ j(not_sign, &is_positive, Label::kNear);
+  __ neg(input_reg);  // Sets flags.
+  DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
+  __ bind(&is_positive);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
+                                    LMathAbs* instr,
+                                    const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  DCHECK(instr->value()->Equals(instr->result()));
+  Representation r = instr->hydrogen()->value()->representation();
+
+  if (r.IsDouble()) {
+    X87Register value = ToX87Register(instr->value());
+    X87Fxch(value);
+    __ fabs();
+  } else if (r.IsSmiOrInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {  // Tagged case.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr, x87_stack_);
+    Register input_reg = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  Register output_reg = ToRegister(instr->result());
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+
+  Label not_minus_zero, done;
+  // Deoptimize on unordered.
+  __ fldz();
+  __ fld(1);
+  __ FCmp();
+  DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+  __ j(below, &not_minus_zero, Label::kNear);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Check for negative zero.
+    __ j(not_equal, &not_minus_zero, Label::kNear);
+    // +- 0.0.
+    __ fld(0);
+    __ FXamSign();
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+    __ Move(output_reg, Immediate(0));
+    __ jmp(&done, Label::kFar);
+  }
+
+  // Positive input.
+  // rc=01B, round down.
+  __ bind(&not_minus_zero);
+  __ fnclex();
+  __ X87SetRC(0x0400);
+  __ sub(esp, Immediate(kPointerSize));
+  __ fist_s(Operand(esp, 0));
+  __ pop(output_reg);
+  __ X87CheckIA();
+  DeoptimizeIf(equal, instr, Deoptimizer::kOverflow);
+  __ fnclex();
+  __ X87SetRC(0x0000);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  X87Register input_reg = ToX87Register(instr->value());
+  Register result = ToRegister(instr->result());
+  X87Fxch(input_reg);
+  Label below_one_half, below_minus_one_half, done;
+
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+  ExternalReference minus_one_half =
+      ExternalReference::address_of_minus_one_half();
+
+  __ fld_d(Operand::StaticVariable(one_half));
+  __ fld(1);
+  __ FCmp();
+  __ j(carry, &below_one_half);
+
+  // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
+  __ fld(0);
+  __ fadd_d(Operand::StaticVariable(one_half));
+  // rc=11B, round toward zero.
+  __ X87SetRC(0x0c00);
+  __ sub(esp, Immediate(kPointerSize));
+  // Clear exception bits.
+  __ fnclex();
+  __ fistp_s(MemOperand(esp, 0));
+  // Check overflow.
+  __ X87CheckIA();
+  __ pop(result);
+  DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
+  __ fnclex();
+  // Restore round mode.
+  __ X87SetRC(0x0000);
+  __ jmp(&done);
+
+  __ bind(&below_one_half);
+  __ fld_d(Operand::StaticVariable(minus_one_half));
+  __ fld(1);
+  __ FCmp();
+  __ j(carry, &below_minus_one_half);
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // If the sign is positive, we return +0.
+    __ fld(0);
+    __ FXamSign();
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+  }
+  __ Move(result, Immediate(0));
+  __ jmp(&done);
+
+  __ bind(&below_minus_one_half);
+  __ fld(0);
+  __ fadd_d(Operand::StaticVariable(one_half));
+  // rc=01B, round down.
+  __ X87SetRC(0x0400);
+  __ sub(esp, Immediate(kPointerSize));
+  // Clear exception bits.
+  __ fnclex();
+  __ fistp_s(MemOperand(esp, 0));
+  // Check overflow.
+  __ X87CheckIA();
+  __ pop(result);
+  DeoptimizeIf(equal, instr, Deoptimizer::kConversionOverflow);
+  __ fnclex();
+  // Restore round mode.
+  __ X87SetRC(0x0000);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+  __ sub(esp, Immediate(kPointerSize));
+  __ fstp_s(MemOperand(esp, 0));
+  X87Fld(MemOperand(esp, 0), kX87FloatOperand);
+  __ add(esp, Immediate(kPointerSize));
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  X87Register input_reg = ToX87Register(instr->value());
+  __ X87SetFPUCW(0x027F);
+  X87Fxch(input_reg);
+  __ fsqrt();
+  __ X87SetFPUCW(0x037F);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  X87Register input_reg = ToX87Register(instr->value());
+  DCHECK(ToX87Register(instr->result()).is(input_reg));
+  X87Fxch(input_reg);
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
+  __ fxam();
+  __ push(eax);
+  __ fnstsw_ax();
+  __ and_(eax, Immediate(0x4700));
+  __ cmp(eax, Immediate(0x0700));
+  __ j(not_equal, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ fchs();
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
+  __ fldz();
+  __ faddp();  // Convert -0 to +0.
+  __ fsqrt();
+  __ bind(&done);
+  __ pop(eax);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  X87Register result = ToX87Register(instr->result());
+  // Having marked this as a call, we can use any registers.
+  X87Register base = ToX87Register(instr->left());
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+
+  if (exponent_type.IsSmi()) {
+    Register exponent = ToRegister(instr->right());
+    X87LoadForUsage(base);
+    __ SmiUntag(exponent);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+  } else if (exponent_type.IsTagged()) {
+    Register exponent = ToRegister(instr->right());
+    Register temp = exponent.is(ecx) ? eax : ecx;
+    Label no_deopt, done;
+    X87LoadForUsage(base);
+    __ JumpIfSmi(exponent, &no_deopt);
+    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    // Heap number(double)
+    __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
+    __ jmp(&done);
+    // SMI
+    __ bind(&no_deopt);
+    __ SmiUntag(exponent);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+    __ bind(&done);
+  } else if (exponent_type.IsInteger32()) {
+    Register exponent = ToRegister(instr->right());
+    X87LoadForUsage(base);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    X87Register exponent_double = ToX87Register(instr->right());
+    X87LoadForUsage(base, exponent_double);
+  }
+
+  // FP data stack {base, exponent(TOS)}.
+  // Handle (exponent==+-0.5 && base == -0).
+  Label not_plus_0;
+  __ fld(0);
+  __ fabs();
+  X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
+  __ FCmp();
+  __ j(parity_even, &not_plus_0, Label::kNear);  // NaN.
+  __ j(not_equal, &not_plus_0, Label::kNear);
+  __ fldz();
+  // FP data stack {base, exponent(TOS), zero}.
+  __ faddp(2);
+  __ bind(&not_plus_0);
+
+  {
+    __ PrepareCallCFunction(4, eax);
+    __ fstp_d(MemOperand(esp, kDoubleSize));  // Exponent value.
+    __ fstp_d(MemOperand(esp, 0));            // Base value.
+    X87PrepareToWrite(result);
+    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+                     4);
+    // Return value is in st(0) on ia32.
+    X87CommitWrite(result);
+  }
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  DCHECK(instr->value()->Equals(instr->result()));
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+
+  Label positive, done, zero, nan_result;
+  __ fldz();
+  __ fld(1);
+  __ FCmp();
+  __ j(below, &nan_result, Label::kNear);
+  __ j(equal, &zero, Label::kNear);
+  // Positive input.
+  // {input, ln2}.
+  __ fldln2();
+  // {ln2, input}.
+  __ fxch();
+  // {result}.
+  __ fyl2x();
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&nan_result);
+  X87PrepareToWrite(input_reg);
+  __ push(Immediate(0xffffffff));
+  __ push(Immediate(0x7fffffff));
+  __ fld_d(MemOperand(esp, 0));
+  __ lea(esp, Operand(esp, kDoubleSize));
+  X87CommitWrite(input_reg);
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&zero);
+  ExternalReference ninf = ExternalReference::address_of_negative_infinity();
+  X87PrepareToWrite(input_reg);
+  __ fld_d(Operand::StaticVariable(ninf));
+  X87CommitWrite(input_reg);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ Lzcnt(result, input);
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  X87Register input = ToX87Register(instr->value());
+  X87Register result_reg = ToX87Register(instr->result());
+  Register temp_result = ToRegister(instr->temp1());
+  Register temp = ToRegister(instr->temp2());
+  Label slow, done, smi, finish;
+  DCHECK(result_reg.is(input));
+
+  // Store input into Heap number and call runtime function kMathExpRT.
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
+    __ jmp(&done, Label::kNear);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ Move(temp_result, Immediate(0));
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  __ bind(&done);
+  X87LoadForUsage(input);
+  __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+
+  {
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ push(temp_result);
+    __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  X87PrepareToWrite(result_reg);
+  // return value of MathExpRT is Smi or Heap Number.
+  __ JumpIfSmi(temp_result, &smi);
+  // Heap number(double)
+  __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+  __ jmp(&finish);
+  // SMI
+  __ bind(&smi);
+  __ SmiUntag(temp_result);
+  __ push(temp_result);
+  __ fild_s(MemOperand(esp, 0));
+  __ pop(temp_result);
+  __ bind(&finish);
+  X87CommitWrite(result_reg);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(
+        this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr);
+  }
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  int arity = instr->arity();
+  ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    Register slot_register = ToRegister(instr->temp_slot());
+    Register vector_register = ToRegister(instr->temp_vector());
+    DCHECK(slot_register.is(edx));
+    DCHECK(vector_register.is(ebx));
+
+    AllowDeferredHandleDereference vector_structure_check;
+    Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
+    int index = vector->GetIndex(instr->hydrogen()->slot());
+
+    __ mov(vector_register, vector);
+    __ mov(slot_register, Immediate(Smi::FromInt(index)));
+
+    Handle<Code> ic =
+        CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
+    CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ Set(eax, arity);
+    CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->constructor()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ Move(eax, Immediate(instr->arity()));
+  if (instr->arity() == 1) {
+    // We only need the allocation site for the case we have a length argument.
+    // The case may bail out to the runtime, which will determine the correct
+    // elements kind with the site.
+    __ mov(ebx, instr->hydrogen()->site());
+  } else {
+    __ mov(ebx, isolate()->factory()->undefined_value());
+  }
+
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here
+      // look at the first argument
+      __ mov(ecx, Operand(esp, 0));
+      __ test(ecx, ecx);
+      __ j(zero, &packed_case, Label::kNear);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      __ jmp(&done, Label::kNear);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
+  __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ lea(result, Operand(base, ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ lea(result, Operand(base, offset, times_1, 0));
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Representation representation = instr->hydrogen()->field_representation();
+
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    MemOperand operand = instr->object()->IsConstantOperand()
+        ? MemOperand::StaticVariable(
+            ToExternalReference(LConstantOperand::cast(instr->object())))
+        : MemOperand(ToRegister(instr->object()), offset);
+    if (instr->value()->IsConstantOperand()) {
+      LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+      __ mov(operand, Immediate(ToInteger32(operand_value)));
+    } else {
+      Register value = ToRegister(instr->value());
+      __ Store(value, operand, representation);
+    }
+    return;
+  }
+
+  Register object = ToRegister(instr->object());
+  __ AssertNotSmi(object);
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+  if (representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    X87Register value = ToX87Register(instr->value());
+    X87Mov(FieldOperand(object, offset), value);
+    return;
+  }
+
+  if (instr->hydrogen()->has_transition()) {
+    Handle<Map> transition = instr->hydrogen()->transition_map();
+    AddDeprecationDependency(transition);
+    __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
+    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      Register temp_map = ToRegister(instr->temp_map());
+      __ mov(temp_map, transition);
+      __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register write_register = object;
+  if (!access.IsInobject()) {
+    write_register = ToRegister(instr->temp());
+    __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
+  }
+
+  MemOperand operand = FieldOperand(write_register, offset);
+  if (instr->value()->IsConstantOperand()) {
+    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+    if (operand_value->IsRegister()) {
+      Register value = ToRegister(operand_value);
+      __ Store(value, operand, representation);
+    } else if (representation.IsInteger32() || representation.IsExternal()) {
+      Immediate immediate = ToImmediate(operand_value, representation);
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+      __ mov(operand, immediate);
+    } else {
+      Handle<Object> handle_value = ToHandle(operand_value);
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+      __ mov(operand, handle_value);
+    }
+  } else {
+    Register value = ToRegister(instr->value());
+    __ Store(value, operand, representation);
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    Register value = ToRegister(instr->value());
+    Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
+    // Update the write barrier for the object for in-object properties.
+    __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        instr->hydrogen()->SmiCheckForWriteBarrier(),
+                        instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
+  }
+
+  __ mov(StoreDescriptor::NameRegister(), instr->name());
+  Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
+  if (instr->index()->IsConstantOperand()) {
+    __ cmp(ToOperand(instr->length()),
+           ToImmediate(LConstantOperand::cast(instr->index()),
+                       instr->hydrogen()->length()->representation()));
+    cc = CommuteCondition(cc);
+  } else if (instr->length()->IsConstantOperand()) {
+    __ cmp(ToOperand(instr->index()),
+           ToImmediate(LConstantOperand::cast(instr->length()),
+                       instr->hydrogen()->index()->representation()));
+  } else {
+    __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ j(NegateCondition(cc), &done, Label::kNear);
+    __ int3();
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = instr->key();
+  if (!key->IsConstantOperand() &&
+      ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+                                  elements_kind)) {
+    __ SmiUntag(ToRegister(key));
+  }
+  Operand operand(BuildFastArrayOperand(
+      instr->elements(),
+      key,
+      instr->hydrogen()->key()->representation(),
+      elements_kind,
+      instr->base_offset()));
+  if (elements_kind == FLOAT32_ELEMENTS) {
+    X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
+  } else if (elements_kind == FLOAT64_ELEMENTS) {
+    uint64_t int_val = kHoleNanInt64;
+    int32_t lower = static_cast<int32_t>(int_val);
+    int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+    Operand operand2 = BuildFastArrayOperand(
+        instr->elements(), instr->key(),
+        instr->hydrogen()->key()->representation(), elements_kind,
+        instr->base_offset() + kPointerSize);
+
+    Label no_special_nan_handling, done;
+    X87Register value = ToX87Register(instr->value());
+    X87Fxch(value);
+    __ lea(esp, Operand(esp, -kDoubleSize));
+    __ fst_d(MemOperand(esp, 0));
+    __ lea(esp, Operand(esp, kDoubleSize));
+    int offset = sizeof(kHoleNanUpper32);
+    // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
+    // so we check the upper with 0xffffffff for hole as a temporary fix.
+    __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+    __ j(not_equal, &no_special_nan_handling, Label::kNear);
+    __ mov(operand, Immediate(lower));
+    __ mov(operand2, Immediate(upper));
+    __ jmp(&done, Label::kNear);
+
+    __ bind(&no_special_nan_handling);
+    __ fst_d(operand);
+    __ bind(&done);
+  } else {
+    Register value = ToRegister(instr->value());
+    switch (elements_kind) {
+      case UINT8_ELEMENTS:
+      case INT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ mov_b(operand, value);
+        break;
+      case UINT16_ELEMENTS:
+      case INT16_ELEMENTS:
+        __ mov_w(operand, value);
+        break;
+      case UINT32_ELEMENTS:
+      case INT32_ELEMENTS:
+        __ mov(operand, value);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
+      case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  Operand double_store_operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      instr->hydrogen()->key()->representation(),
+      FAST_DOUBLE_ELEMENTS,
+      instr->base_offset());
+
+  uint64_t int_val = kHoleNanInt64;
+  int32_t lower = static_cast<int32_t>(int_val);
+  int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+  Operand double_store_operand2 = BuildFastArrayOperand(
+      instr->elements(), instr->key(),
+      instr->hydrogen()->key()->representation(), FAST_DOUBLE_ELEMENTS,
+      instr->base_offset() + kPointerSize);
+
+  if (instr->hydrogen()->IsConstantHoleStore()) {
+    // This means we should store the (double) hole. No floating point
+    // registers required.
+    __ mov(double_store_operand, Immediate(lower));
+    __ mov(double_store_operand2, Immediate(upper));
+  } else {
+    Label no_special_nan_handling, done;
+    X87Register value = ToX87Register(instr->value());
+    X87Fxch(value);
+
+    if (instr->NeedsCanonicalization()) {
+      __ fld(0);
+      __ fld(0);
+      __ FCmp();
+      __ j(parity_odd, &no_special_nan_handling, Label::kNear);
+      // All NaNs are Canonicalized to 0x7fffffffffffffff
+      __ mov(double_store_operand, Immediate(0xffffffff));
+      __ mov(double_store_operand2, Immediate(0x7fffffff));
+      __ jmp(&done, Label::kNear);
+    } else {
+      __ lea(esp, Operand(esp, -kDoubleSize));
+      __ fst_d(MemOperand(esp, 0));
+      __ lea(esp, Operand(esp, kDoubleSize));
+      int offset = sizeof(kHoleNanUpper32);
+      // x87 converts sNaN(0xfff7fffffff7ffff) to QNaN(0xfffffffffff7ffff),
+      // so we check the upper with 0xffffffff for hole as a temporary fix.
+      __ cmp(MemOperand(esp, -offset), Immediate(0xffffffff));
+      __ j(not_equal, &no_special_nan_handling, Label::kNear);
+      __ mov(double_store_operand, Immediate(lower));
+      __ mov(double_store_operand2, Immediate(upper));
+      __ jmp(&done, Label::kNear);
+    }
+    __ bind(&no_special_nan_handling);
+    __ fst_d(double_store_operand);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+
+  Operand operand = BuildFastArrayOperand(
+      instr->elements(),
+      instr->key(),
+      instr->hydrogen()->key()->representation(),
+      FAST_ELEMENTS,
+      instr->base_offset());
+  if (instr->value()->IsRegister()) {
+    __ mov(operand, ToRegister(instr->value()));
+  } else {
+    LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
+    if (IsSmi(operand_value)) {
+      Immediate immediate = ToImmediate(operand_value, Representation::Smi());
+      __ mov(operand, immediate);
+    } else {
+      DCHECK(!IsInteger32(operand_value));
+      Handle<Object> handle_value = ToHandle(operand_value);
+      __ mov(operand, handle_value);
+    }
+  }
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    DCHECK(instr->value()->IsRegister());
+    Register value = ToRegister(instr->value());
+    DCHECK(!instr->key()->IsConstantOperand());
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ lea(key, operand);
+    __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
+                   check_needed,
+                   instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases...external, fast-double, fast
+  if (instr->is_fixed_typed_array()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  if (instr->hydrogen()->HasVectorAndSlot()) {
+    EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
+                        isolate(), instr->language_mode(),
+                        instr->hydrogen()->initialization_state()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+  DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
+  class DeferredMaybeGrowElements final : public LDeferredCode {
+   public:
+    DeferredMaybeGrowElements(LCodeGen* codegen,
+                              LMaybeGrowElements* instr,
+                              const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) {}
+    void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LMaybeGrowElements* instr_;
+  };
+
+  Register result = eax;
+  DeferredMaybeGrowElements* deferred =
+      new (zone()) DeferredMaybeGrowElements(this, instr, x87_stack_);
+  LOperand* key = instr->key();
+  LOperand* current_capacity = instr->current_capacity();
+
+  DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
+  DCHECK(key->IsConstantOperand() || key->IsRegister());
+  DCHECK(current_capacity->IsConstantOperand() ||
+         current_capacity->IsRegister());
+
+  if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    if (constant_key >= constant_capacity) {
+      // Deferred case.
+      __ jmp(deferred->entry());
+    }
+  } else if (key->IsConstantOperand()) {
+    int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
+    __ cmp(ToOperand(current_capacity), Immediate(constant_key));
+    __ j(less_equal, deferred->entry());
+  } else if (current_capacity->IsConstantOperand()) {
+    int32_t constant_capacity =
+        ToInteger32(LConstantOperand::cast(current_capacity));
+    __ cmp(ToRegister(key), Immediate(constant_capacity));
+    __ j(greater_equal, deferred->entry());
+  } else {
+    __ cmp(ToRegister(key), ToRegister(current_capacity));
+    __ j(greater_equal, deferred->entry());
+  }
+
+  __ mov(result, ToOperand(instr->elements()));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register result = eax;
+  __ Move(result, Immediate(0));
+
+  // We have to call a stub.
+  {
+    PushSafepointRegistersScope scope(this);
+    if (instr->object()->IsRegister()) {
+      __ Move(result, ToRegister(instr->object()));
+    } else {
+      __ mov(result, ToOperand(instr->object()));
+    }
+
+    LOperand* key = instr->key();
+    if (key->IsConstantOperand()) {
+      __ mov(ebx, ToImmediate(key, Representation::Smi()));
+    } else {
+      __ Move(ebx, ToRegister(key));
+      __ SmiTag(ebx);
+    }
+
+    GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
+                               instr->hydrogen()->kind());
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(
+        instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    __ StoreToSafepointRegisterSlot(result, result);
+  }
+
+  // Deopt on smi, which means the elements array changed to dictionary mode.
+  __ test(result, Immediate(kSmiTagMask));
+  DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  bool is_simple_map_transition =
+      IsSimpleMapChangeTransition(from_kind, to_kind);
+  Label::Distance branch_distance =
+      is_simple_map_transition ? Label::kNear : Label::kFar;
+  __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
+  __ j(not_equal, &not_applicable, branch_distance);
+  if (is_simple_map_transition) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
+           Immediate(to_map));
+    // Write barrier.
+    DCHECK_NOT_NULL(instr->temp());
+    __ RecordWriteForMap(object_reg, to_map, new_map_reg,
+                         ToRegister(instr->temp()), kDontSaveFPRegs);
+  } else {
+    DCHECK(ToRegister(instr->context()).is(esi));
+    DCHECK(object_reg.is(eax));
+    PushSafepointRegistersScope scope(this);
+    __ mov(ebx, to_map);
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithLazyDeopt(instr,
+        RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt final : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen,
+                             LStringCharCodeAt* instr,
+                             const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr, x87_stack_);
+
+  StringCharLoadGenerator::Generate(masm(),
+                                    factory(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Move(result, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
+  if (instr->index()->IsConstantOperand()) {
+    Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
+                                      Representation::Smi());
+    __ push(immediate);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
+                          instr, instr->context());
+  __ AssertSmi(eax);
+  __ SmiUntag(eax);
+  __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode final : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen,
+                               LStringCharFromCode* instr,
+                               const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  DCHECK(!char_code.is(result));
+
+  __ cmp(char_code, String::kMaxOneByteCharCode);
+  __ j(above, deferred->entry());
+  __ Move(result, Immediate(factory()->single_character_string_cache()));
+  __ mov(result, FieldOperand(result,
+                              char_code, times_pointer_size,
+                              FixedArray::kHeaderSize));
+  __ cmp(result, factory()->undefined_value());
+  __ j(equal, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Move(result, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  DCHECK(output->IsDoubleRegister());
+  if (input->IsRegister()) {
+    Register input_reg = ToRegister(input);
+    __ push(input_reg);
+    X87Mov(ToX87Register(output), Operand(esp, 0), kX87IntOperand);
+    __ pop(input_reg);
+  } else {
+    X87Mov(ToX87Register(output), ToOperand(input), kX87IntOperand);
+  }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+  X87Register res = ToX87Register(output);
+  X87PrepareToWrite(res);
+  __ LoadUint32NoSSE2(ToRegister(input));
+  X87CommitWrite(res);
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI final : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen,
+                       LNumberTagI* instr,
+                       const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+                                       SIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagI* deferred =
+      new(zone()) DeferredNumberTagI(this, instr, x87_stack_);
+  __ SmiTag(reg);
+  __ j(overflow, deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU final : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen,
+                       LNumberTagU* instr,
+                       const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
+                                       UNSIGNED_INT32);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  Register reg = ToRegister(input);
+
+  DeferredNumberTagU* deferred =
+      new(zone()) DeferredNumberTagU(this, instr, x87_stack_);
+  __ cmp(reg, Immediate(Smi::kMaxValue));
+  __ j(above, deferred->entry());
+  __ SmiTag(reg);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+                                     LOperand* value,
+                                     LOperand* temp,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register reg = ToRegister(value);
+  Register tmp = ToRegister(temp);
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    __ SmiUntag(reg);
+    __ xor_(reg, 0x80000000);
+    __ push(reg);
+    __ fild_s(Operand(esp, 0));
+    __ pop(reg);
+  } else {
+    // There's no fild variant for unsigned values, so zero-extend to a 64-bit
+    // int manually.
+    __ push(Immediate(0));
+    __ push(reg);
+    __ fild_d(Operand(esp, 0));
+    __ pop(reg);
+    __ pop(reg);
+  }
+
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
+    __ jmp(&done, Label::kNear);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ Move(reg, Immediate(0));
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(reg, eax);
+  }
+
+  __ bind(&done);
+  __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD final : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen,
+                       LNumberTagD* instr,
+                       const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  Register reg = ToRegister(instr->result());
+
+  // Put the value to the top of stack
+  X87Register src = ToX87Register(instr->value());
+  // Don't use X87LoadForUsage here, which is only used by Instruction which
+  // clobbers fp registers.
+  x87_stack_.Fxch(src);
+
+  DeferredNumberTagD* deferred =
+      new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
+  if (FLAG_inline_new) {
+    Register tmp = ToRegister(instr->temp());
+    __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
+  } else {
+    __ jmp(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ Move(reg, Immediate(0));
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(reg, eax);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ test(input, Immediate(0xc0000000));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
+  }
+  __ SmiTag(input);
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+  }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  LOperand* input = instr->value();
+  Register result = ToRegister(input);
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
+  if (instr->needs_check()) {
+    __ test(result, Immediate(kSmiTagMask));
+    DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+  } else {
+    __ AssertSmi(result);
+  }
+  __ SmiUntag(result);
+}
+
+
+void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
+                                      Register temp_reg, X87Register res_reg,
+                                      NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Label load_smi, done;
+
+  X87PrepareToWrite(res_reg);
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ JumpIfSmi(input_reg, &load_smi);
+
+    // Heap number map check.
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           factory()->heap_number_map());
+    if (!can_convert_undefined_to_nan) {
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+    } else {
+      Label heap_number, convert;
+      __ j(equal, &heap_number);
+
+      // Convert undefined (or hole) to NaN.
+      __ cmp(input_reg, factory()->undefined_value());
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+
+      __ bind(&convert);
+      __ push(Immediate(0xffffffff));
+      __ push(Immediate(0x7fffffff));
+      __ fld_d(MemOperand(esp, 0));
+      __ lea(esp, Operand(esp, kDoubleSize));
+      __ jmp(&done, Label::kNear);
+
+      __ bind(&heap_number);
+    }
+    // Heap number to x87 conversion.
+    __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+      __ fldz();
+      __ FCmp();
+      __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+      __ j(not_zero, &done, Label::kNear);
+
+      // Use general purpose registers to check if we have -0.0
+      __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+      __ test(temp_reg, Immediate(HeapNumber::kSignMask));
+      __ j(zero, &done, Label::kNear);
+
+      // Pop FPU stack before deoptimizing.
+      __ fstp(0);
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+    }
+    __ jmp(&done, Label::kNear);
+  } else {
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+
+  __ bind(&load_smi);
+  // Clobbering a temp is faster than re-tagging the
+  // input register since we avoid dependencies.
+  __ mov(temp_reg, input_reg);
+  __ SmiUntag(temp_reg);  // Untag smi before converting to float.
+  __ push(temp_reg);
+  __ fild_s(Operand(esp, 0));
+  __ add(esp, Immediate(kPointerSize));
+  __ bind(&done);
+  X87CommitWrite(res_reg);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
+  Register input_reg = ToRegister(instr->value());
+
+  // The input was optimistically untagged; revert it.
+  STATIC_ASSERT(kSmiTagSize == 1);
+  __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
+
+  if (instr->truncating()) {
+    Label no_heap_number, check_bools, check_false;
+
+    // Heap number map check.
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           factory()->heap_number_map());
+    __ j(not_equal, &no_heap_number, Label::kNear);
+    __ TruncateHeapNumberToI(input_reg, input_reg);
+    __ jmp(done);
+
+    __ bind(&no_heap_number);
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ cmp(input_reg, factory()->undefined_value());
+    __ j(not_equal, &check_bools, Label::kNear);
+    __ Move(input_reg, Immediate(0));
+    __ jmp(done);
+
+    __ bind(&check_bools);
+    __ cmp(input_reg, factory()->true_value());
+    __ j(not_equal, &check_false, Label::kNear);
+    __ Move(input_reg, Immediate(1));
+    __ jmp(done);
+
+    __ bind(&check_false);
+    __ cmp(input_reg, factory()->false_value());
+    DeoptimizeIf(not_equal, instr,
+                 Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+    __ Move(input_reg, Immediate(0));
+  } else {
+    // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+    // should first try a fast conversion and then bailout to this slow case.
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           isolate()->factory()->heap_number_map());
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
+
+    __ sub(esp, Immediate(kPointerSize));
+    __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+      Label no_precision_lost, not_nan, zero_check;
+      __ fld(0);
+
+      __ fist_s(MemOperand(esp, 0));
+      __ fild_s(MemOperand(esp, 0));
+      __ FCmp();
+      __ pop(input_reg);
+
+      __ j(equal, &no_precision_lost, Label::kNear);
+      __ fstp(0);
+      DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+      __ bind(&no_precision_lost);
+
+      __ j(parity_odd, &not_nan);
+      __ fstp(0);
+      DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+      __ bind(&not_nan);
+
+      __ test(input_reg, Operand(input_reg));
+      __ j(zero, &zero_check, Label::kNear);
+      __ fstp(0);
+      __ jmp(done);
+
+      __ bind(&zero_check);
+      // To check for minus zero, we load the value again as float, and check
+      // if that is still 0.
+      __ sub(esp, Immediate(kPointerSize));
+      __ fstp_s(Operand(esp, 0));
+      __ pop(input_reg);
+      __ test(input_reg, Operand(input_reg));
+      DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
+    } else {
+      __ fist_s(MemOperand(esp, 0));
+      __ fild_s(MemOperand(esp, 0));
+      __ FCmp();
+      __ pop(input_reg);
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
+      DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
+    }
+  }
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI final : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen,
+                      LTaggedToI* instr,
+                      const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register input_reg = ToRegister(input);
+  DCHECK(input_reg.is(ToRegister(instr->result())));
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred =
+        new(zone()) DeferredTaggedToI(this, instr, x87_stack_);
+    // Optimistically untag the input.
+    // If the input is a HeapObject, SmiUntag will set the carry flag.
+    STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+    __ SmiUntag(input_reg);
+    // Branch to deferred code if the input was tagged.
+    // The deferred code will take care of restoring the tag.
+    __ j(carry, deferred->entry());
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* temp = instr->temp();
+  DCHECK(temp->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  Register temp_reg = ToRegister(temp);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
+                         mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsRegister());
+  Register result_reg = ToRegister(result);
+
+  if (instr->truncating()) {
+    X87Register input_reg = ToX87Register(input);
+    X87Fxch(input_reg);
+    __ TruncateX87TOSToI(result_reg);
+  } else {
+    Label lost_precision, is_nan, minus_zero, done;
+    X87Register input_reg = ToX87Register(input);
+    X87Fxch(input_reg);
+    __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+                 &lost_precision, &is_nan, &minus_zero);
+    __ jmp(&done);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsDoubleRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsRegister());
+  Register result_reg = ToRegister(result);
+
+  Label lost_precision, is_nan, minus_zero, done;
+  X87Register input_reg = ToX87Register(input);
+  X87Fxch(input_reg);
+  __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
+               &lost_precision, &is_nan, &minus_zero);
+  __ jmp(&done);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
+  __ bind(&done);
+  __ SmiTag(result_reg);
+  DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ test(ToOperand(input), Immediate(kSmiTagMask));
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ test(ToOperand(input), Immediate(kSmiTagMask));
+    DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+  }
+}
+
+
+void LCodeGen::DoCheckArrayBufferNotNeutered(
+    LCheckArrayBufferNotNeutered* instr) {
+  Register view = ToRegister(instr->view());
+  Register scratch = ToRegister(instr->scratch());
+
+  __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
+  __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
+            1 << JSArrayBuffer::WasNeutered::kShift);
+  DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+            static_cast<int8_t>(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+    } else {
+      DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
+                static_cast<int8_t>(last));
+        DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
+                   Deoptimizer::kWrongInstanceType);
+    } else {
+      __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
+      __ and_(temp, mask);
+      __ cmp(temp, tag);
+      DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  if (instr->hydrogen()->object_in_new_space()) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ cmp(reg, Operand::ForCell(cell));
+  } else {
+    Operand operand = ToOperand(instr->value());
+    __ cmp(operand, object);
+  }
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ xor_(esi, esi);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+
+    __ test(eax, Immediate(kSmiTagMask));
+  }
+  DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps final : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen,
+                      LCheckMaps* instr,
+                      Register object,
+                      const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() override {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, reg, x87_stack_);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(reg, map);
+    __ j(equal, &success, Label::kNear);
+  }
+
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(reg, map);
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ j(not_equal, deferred->entry());
+  } else {
+    DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  X87Register value_reg = ToX87Register(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  X87Fxch(value_reg);
+  __ ClampTOSToUint8(result_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  DCHECK(instr->unclamped()->Equals(instr->result()));
+  Register value_reg = ToRegister(instr->result());
+  __ ClampUint8(value_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->scratch());
+  Register scratch2 = ToRegister(instr->scratch2());
+  Register scratch3 = ToRegister(instr->scratch3());
+  Label is_smi, done, heap_number, valid_exponent,
+      largest_value, zero_result, maybe_nan_or_infinity;
+
+  __ JumpIfSmi(input_reg, &is_smi);
+
+  // Check for heap number
+  __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+         factory()->heap_number_map());
+  __ j(equal, &heap_number, Label::kNear);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ cmp(input_reg, factory()->undefined_value());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
+  __ jmp(&zero_result, Label::kNear);
+
+  // Heap number
+  __ bind(&heap_number);
+
+  // Surprisingly, all of the hand-crafted bit-manipulations below are much
+  // faster than the x86 FPU built-in instruction, especially since "banker's
+  // rounding" would be additionally very expensive
+
+  // Get exponent word.
+  __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+  __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+
+  // Test for negative values --> clamp to zero
+  __ test(scratch, scratch);
+  __ j(negative, &zero_result, Label::kNear);
+
+  // Get exponent alone in scratch2.
+  __ mov(scratch2, scratch);
+  __ and_(scratch2, HeapNumber::kExponentMask);
+  __ shr(scratch2, HeapNumber::kExponentShift);
+  __ j(zero, &zero_result, Label::kNear);
+  __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
+  __ j(negative, &zero_result, Label::kNear);
+
+  const uint32_t non_int8_exponent = 7;
+  __ cmp(scratch2, Immediate(non_int8_exponent + 1));
+  // If the exponent is too big, check for special values.
+  __ j(greater, &maybe_nan_or_infinity, Label::kNear);
+
+  __ bind(&valid_exponent);
+  // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
+  // < 7. The shift bias is the number of bits to shift the mantissa such that
+  // with an exponent of 7 such the that top-most one is in bit 30, allowing
+  // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
+  // 1).
+  int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
+  __ lea(result_reg, MemOperand(scratch2, shift_bias));
+  // Here result_reg (ecx) is the shift, scratch is the exponent word.  Get the
+  // top bits of the mantissa.
+  __ and_(scratch, HeapNumber::kMantissaMask);
+  // Put back the implicit 1 of the mantissa
+  __ or_(scratch, 1 << HeapNumber::kExponentShift);
+  // Shift up to round
+  __ shl_cl(scratch);
+  // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
+  // use the bit in the "ones" place and add it to the "halves" place, which has
+  // the effect of rounding to even.
+  __ mov(scratch2, scratch);
+  const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
+  const uint32_t one_bit_shift = one_half_bit_shift + 1;
+  __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
+  __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
+  Label no_round;
+  __ j(less, &no_round, Label::kNear);
+  Label round_up;
+  __ mov(scratch2, Immediate(1 << one_half_bit_shift));
+  __ j(greater, &round_up, Label::kNear);
+  __ test(scratch3, scratch3);
+  __ j(not_zero, &round_up, Label::kNear);
+  __ mov(scratch2, scratch);
+  __ and_(scratch2, Immediate(1 << one_bit_shift));
+  __ shr(scratch2, 1);
+  __ bind(&round_up);
+  __ add(scratch, scratch2);
+  __ j(overflow, &largest_value, Label::kNear);
+  __ bind(&no_round);
+  __ shr(scratch, 23);
+  __ mov(result_reg, scratch);
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&maybe_nan_or_infinity);
+  // Check for NaN/Infinity, all other values map to 255
+  __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
+  __ j(not_equal, &largest_value, Label::kNear);
+
+  // Check for NaN, which differs from Infinity in that at least one mantissa
+  // bit is set.
+  __ and_(scratch, HeapNumber::kMantissaMask);
+  __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+  __ j(not_zero, &zero_result, Label::kNear);  // M!=0 --> NaN
+  // Infinity -> Fall through to map to 255.
+
+  __ bind(&largest_value);
+  __ mov(result_reg, Immediate(255));
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&zero_result);
+  __ xor_(result_reg, result_reg);
+  __ jmp(&done, Label::kNear);
+
+  // smi
+  __ bind(&is_smi);
+  if (!input_reg.is(result_reg)) {
+    __ mov(result_reg, input_reg);
+  }
+  __ SmiUntag(result_reg);
+  __ ClampUint8(result_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  X87Register value_reg = ToX87Register(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  X87Fxch(value_reg);
+  __ sub(esp, Immediate(kDoubleSize));
+  __ fst_d(Operand(esp, 0));
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ mov(result_reg, Operand(esp, kPointerSize));
+  } else {
+    __ mov(result_reg, Operand(esp, 0));
+  }
+  __ add(esp, Immediate(kDoubleSize));
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  X87Register result_reg = ToX87Register(instr->result());
+  // Follow below pattern to write a x87 fp register.
+  X87PrepareToWrite(result_reg);
+  __ sub(esp, Immediate(kDoubleSize));
+  __ mov(Operand(esp, 0), lo_reg);
+  __ mov(Operand(esp, kPointerSize), hi_reg);
+  __ fld_d(Operand(esp, 0));
+  __ add(esp, Immediate(kDoubleSize));
+  X87CommitWrite(result_reg);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate final : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen,
+                     LAllocate* instr,
+                     const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr, x87_stack_);
+
+  Register result = ToRegister(instr->result());
+  Register temp = ToRegister(instr->temp());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    CHECK(size <= Page::kMaxRegularHeapObjectSize);
+    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ mov(temp, (size / kPointerSize) - 1);
+    } else {
+      temp = ToRegister(instr->size());
+      __ shr(temp, kPointerSizeLog2);
+      __ dec(temp);
+    }
+    Label loop;
+    __ bind(&loop);
+    __ mov(FieldOperand(result, temp, times_pointer_size, 0),
+        isolate()->factory()->one_pointer_filler_map());
+    __ dec(temp);
+    __ j(not_zero, &loop);
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ Move(result, Immediate(Smi::FromInt(0)));
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(ToRegister(instr->size()));
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size >= 0 && size <= Smi::kMaxValue) {
+      __ push(Immediate(Smi::FromInt(size)));
+    } else {
+      // We should never get here at runtime => abort
+      __ int3();
+      return;
+    }
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ push(Immediate(Smi::FromInt(flags)));
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(result, eax);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(eax));
+  __ push(eax);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->value()).is(ebx));
+  Label end, do_call;
+  Register value_register = ToRegister(instr->value());
+  __ JumpIfNotSmi(value_register, &do_call);
+  __ mov(eax, Immediate(isolate()->factory()->number_string()));
+  __ jmp(&end);
+  __ bind(&do_call);
+  TypeofStub stub(isolate());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  __ bind(&end);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Condition final_branch_condition = EmitTypeofIs(instr, input);
+  if (final_branch_condition != no_condition) {
+    EmitBranch(instr, final_branch_condition);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
+  Label* true_label = instr->TrueLabel(chunk_);
+  Label* false_label = instr->FalseLabel(chunk_);
+  Handle<String> type_name = instr->type_literal();
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+  int next_block = GetNextEmittedBlock();
+
+  Label::Distance true_distance = left_block == next_block ? Label::kNear
+                                                           : Label::kFar;
+  Label::Distance false_distance = right_block == next_block ? Label::kNear
+                                                             : Label::kFar;
+  Condition final_branch_condition = no_condition;
+  if (String::Equals(type_name, factory()->number_string())) {
+    __ JumpIfSmi(input, true_label, true_distance);
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),
+           factory()->heap_number_map());
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->string_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
+    final_branch_condition = below;
+
+  } else if (String::Equals(type_name, factory()->symbol_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ CmpObjectType(input, SYMBOL_TYPE, input);
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->boolean_string())) {
+    __ cmp(input, factory()->true_value());
+    __ j(equal, true_label, true_distance);
+    __ cmp(input, factory()->false_value());
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->undefined_string())) {
+    __ cmp(input, factory()->undefined_value());
+    __ j(equal, true_label, true_distance);
+    __ JumpIfSmi(input, false_label, false_distance);
+    // Check for undetectable objects => true.
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              1 << Map::kIsUndetectable);
+    final_branch_condition = not_zero;
+
+  } else if (String::Equals(type_name, factory()->function_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    // Check for callable and not undetectable objects => true.
+    __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
+    __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
+    __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+    __ cmp(input, 1 << Map::kIsCallable);
+    final_branch_condition = equal;
+
+  } else if (String::Equals(type_name, factory()->object_string())) {
+    __ JumpIfSmi(input, false_label, false_distance);
+    __ cmp(input, factory()->null_value());
+    __ j(equal, true_label, true_distance);
+    STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+    __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
+    __ j(below, false_label, false_distance);
+    // Check for callable or undetectable objects => false.
+    __ test_b(FieldOperand(input, Map::kBitFieldOffset),
+              (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
+    final_branch_condition = zero;
+
+// clang-format off
+#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
+  } else if (String::Equals(type_name, factory()->type##_string())) { \
+    __ JumpIfSmi(input, false_label, false_distance);                 \
+    __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
+           factory()->type##_map());                                  \
+    final_branch_condition = equal;
+  SIMD128_TYPES(SIMD128_TYPE)
+#undef SIMD128_TYPE
+    // clang-format on
+
+  } else {
+    __ jmp(false_label, false_distance);
+  }
+  return final_branch_condition;
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (info()->ShouldEnsureSpaceForLazyDeopt()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck final : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen,
+                       LStackCheck* instr,
+                       const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack), instr_(instr) { }
+    void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(above_equal, &done, Label::kNear);
+
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(esi));
+    CallCode(isolate()->builtins()->StackCheck(),
+             RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr, x87_stack_);
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ cmp(esp, Operand::StaticVariable(stack_limit));
+    __ j(below, deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  __ test(eax, Immediate(kSmiTagMask));
+  DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
+
+  STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
+  __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
+  DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
+
+  Label use_cache, call_runtime;
+  __ CheckEnumCache(&call_runtime);
+
+  __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
+  __ jmp(&use_cache, Label::kNear);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(eax);
+  CallRuntime(Runtime::kGetPropertyNamesFast, instr);
+
+  __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
+         isolate()->factory()->meta_map());
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ cmp(result, Immediate(Smi::FromInt(0)));
+  __ j(not_equal, &load_cache, Label::kNear);
+  __ mov(result, isolate()->factory()->empty_fixed_array());
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ mov(result,
+         FieldOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ mov(result,
+         FieldOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ bind(&done);
+  __ test(result, result);
+  DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  __ cmp(ToRegister(instr->map()),
+         FieldOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ push(object);
+  __ push(index);
+  __ xor_(esi, esi);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(object, eax);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble final : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register object,
+                              Register index,
+                              const X87Stack& x87_stack)
+        : LDeferredCode(codegen, x87_stack),
+          instr_(instr),
+          object_(object),
+          index_(index) {
+    }
+    void Generate() override {
+      codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
+    }
+    LInstruction* instr() override { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, object, index, x87_stack_);
+
+  Label out_of_object, done;
+  __ test(index, Immediate(Smi::FromInt(1)));
+  __ j(not_zero, deferred->entry());
+
+  __ sar(index, 1);
+
+  __ cmp(index, Immediate(0));
+  __ j(less, &out_of_object, Label::kNear);
+  __ mov(object, FieldOperand(object,
+                              index,
+                              times_half_pointer_size,
+                              JSObject::kHeaderSize));
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&out_of_object);
+  __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
+  __ neg(index);
+  // Index is now equal to out of object property index plus 1.
+  __ mov(object, FieldOperand(object,
+                              index,
+                              times_half_pointer_size,
+                              FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ Push(scope_info);
+  __ push(ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/crankshaft/x87/lithium-codegen-x87.h b/src/crankshaft/x87/lithium-codegen-x87.h
new file mode 100644
index 0000000..6346344
--- /dev/null
+++ b/src/crankshaft/x87/lithium-codegen-x87.h
@@ -0,0 +1,495 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
+
+#include <map>
+
+#include "src/ast/scopes.h"
+#include "src/base/logging.h"
+#include "src/crankshaft/lithium-codegen.h"
+#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
+#include "src/crankshaft/x87/lithium-x87.h"
+#include "src/deoptimizer.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class LGapNode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        jump_table_(4, info->zone()),
+        scope_(info->scope()),
+        deferred_(8, info->zone()),
+        dynamic_frame_alignment_(false),
+        support_aligned_spilled_doubles_(false),
+        frame_is_built_(false),
+        x87_stack_(assembler),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  // Support for converting LOperands to assembler types.
+  Operand ToOperand(LOperand* op) const;
+  Register ToRegister(LOperand* op) const;
+  X87Register ToX87Register(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Immediate ToImmediate(LOperand* op, const Representation& r) const {
+    return Immediate(ToRepresentation(LConstantOperand::cast(op), r));
+  }
+  double ToDouble(LConstantOperand* op) const;
+
+  // Support for non-sse2 (x87) floating point stack handling.
+  // These functions maintain the mapping of physical stack registers to our
+  // virtual registers between instructions.
+  enum X87OperandType { kX87DoubleOperand, kX87FloatOperand, kX87IntOperand };
+
+  void X87Mov(X87Register reg, Operand src,
+      X87OperandType operand = kX87DoubleOperand);
+  void X87Mov(Operand src, X87Register reg,
+      X87OperandType operand = kX87DoubleOperand);
+  void X87Mov(X87Register reg, X87Register src,
+              X87OperandType operand = kX87DoubleOperand);
+
+  void X87PrepareBinaryOp(
+      X87Register left, X87Register right, X87Register result);
+
+  void X87LoadForUsage(X87Register reg);
+  void X87LoadForUsage(X87Register reg1, X87Register reg2);
+  void X87PrepareToWrite(X87Register reg) { x87_stack_.PrepareToWrite(reg); }
+  void X87CommitWrite(X87Register reg) { x87_stack_.CommitWrite(reg); }
+
+  void X87Fxch(X87Register reg, int other_slot = 0) {
+    x87_stack_.Fxch(reg, other_slot);
+  }
+  void X87Free(X87Register reg) {
+    x87_stack_.Free(reg);
+  }
+
+
+  bool X87StackEmpty() {
+    return x87_stack_.depth() == 0;
+  }
+
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // The operand denoting the second word (the one with a higher address) of
+  // a double stack slot.
+  Operand HighOperand(LOperand* op);
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr,
+                             LOperand* value,
+                             LOperand* temp,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr, Label* done);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredMaybeGrowElements(LMaybeGrowElements* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register object,
+                                   Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  void EnsureRelocSpaceForDeoptimization();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  LanguageMode language_mode() const { return info()->language_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) override;
+  void GenerateBodyInstructionPost(LInstruction* instr) override;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int argc,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, argc, instr);
+  }
+
+  void CallRuntime(Runtime::FunctionId id, LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, instr);
+  }
+
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  void LoadContextFromDeferred(LOperand* context);
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in edi.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition cc, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason,
+                    Deoptimizer::BailoutType bailout_type);
+  void DeoptimizeIf(Condition cc, LInstruction* instr,
+                    Deoptimizer::DeoptReason deopt_reason);
+
+  bool DeoptEveryNTimes() {
+    return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
+  }
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+
+  Register ToRegister(int index) const;
+  X87Register ToX87Register(int index) const;
+  int32_t ToRepresentation(LConstantOperand* op, const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  ExternalReference ToExternalReference(LConstantOperand* op) const;
+
+  Operand BuildFastArrayOperand(LOperand* elements_pointer,
+                                LOperand* key,
+                                Representation key_representation,
+                                ElementsKind elements_kind,
+                                uint32_t base_offset);
+
+  Operand BuildSeqStringOperand(Register string,
+                                LOperand* index,
+                                String::Encoding encoding);
+
+  void EmitIntegerMathAbs(LMathAbs* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) override;
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template<class InstrType>
+  void EmitBranch(InstrType instr, Condition cc);
+  template <class InstrType>
+  void EmitTrueBranch(InstrType instr, Condition cc);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition cc);
+  void EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input,
+                              Register temp, X87Register res_reg,
+                              NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(LTypeofIsAndBranch* instr, Register input);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset,
+                    AllocationSiteMode mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) override;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+  template <class T>
+  void EmitVectorStoreICRegisters(T* instr);
+
+  void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
+
+  // Emits code for pushing either a tagged constant, a (non-double)
+  // register, or a stack slot operand.
+  void EmitPushTaggedOperand(LOperand* operand);
+
+  void X87Fld(Operand src, X87OperandType opts);
+
+  void EmitFlushX87ForDeopt();
+  void FlushX87StackIfNecessary(LInstruction* instr) {
+    x87_stack_.FlushIfNecessary(instr, this);
+  }
+  friend class LGapResolver;
+
+#ifdef _MSC_VER
+  // On windows, you may not access the stack more than one page below
+  // the most recently mapped page. To make the allocated area randomly
+  // accessible, we write an arbitrary value to each page in range
+  // esp + offset - page_size .. esp in turn.
+  void MakeSureStackPagesMapped(int offset);
+#endif
+
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  Scope* const scope_;
+  ZoneList<LDeferredCode*> deferred_;
+  bool dynamic_frame_alignment_;
+  bool support_aligned_spilled_doubles_;
+  bool frame_is_built_;
+
+  class X87Stack : public ZoneObject {
+   public:
+    explicit X87Stack(MacroAssembler* masm)
+        : stack_depth_(0), is_mutable_(true), masm_(masm) { }
+    explicit X87Stack(const X87Stack& other)
+        : stack_depth_(other.stack_depth_), is_mutable_(false), masm_(masm()) {
+      for (int i = 0; i < stack_depth_; i++) {
+        stack_[i] = other.stack_[i];
+      }
+    }
+    bool operator==(const X87Stack& other) const {
+      if (stack_depth_ != other.stack_depth_) return false;
+      for (int i = 0; i < stack_depth_; i++) {
+        if (!stack_[i].is(other.stack_[i])) return false;
+      }
+      return true;
+    }
+    X87Stack& operator=(const X87Stack& other) {
+      stack_depth_ = other.stack_depth_;
+      for (int i = 0; i < stack_depth_; i++) {
+        stack_[i] = other.stack_[i];
+      }
+      return *this;
+    }
+    bool Contains(X87Register reg);
+    void Fxch(X87Register reg, int other_slot = 0);
+    void Free(X87Register reg);
+    void PrepareToWrite(X87Register reg);
+    void CommitWrite(X87Register reg);
+    void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
+    void LeavingBlock(int current_block_id, LGoto* goto_instr, LCodeGen* cgen);
+    int depth() const { return stack_depth_; }
+    int GetLayout();
+    int st(X87Register reg) { return st2idx(ArrayIndex(reg)); }
+    void pop() {
+      DCHECK(is_mutable_);
+      USE(is_mutable_);
+      stack_depth_--;
+    }
+    void push(X87Register reg) {
+      DCHECK(is_mutable_);
+      DCHECK(stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+      stack_[stack_depth_] = reg;
+      stack_depth_++;
+    }
+
+    MacroAssembler* masm() const { return masm_; }
+    Isolate* isolate() const { return masm_->isolate(); }
+
+   private:
+    int ArrayIndex(X87Register reg);
+    int st2idx(int pos);
+
+    X87Register stack_[X87Register::kMaxNumAllocatableRegisters];
+    int stack_depth_;
+    bool is_mutable_;
+    MacroAssembler* masm_;
+  };
+  X87Stack x87_stack_;
+  // block_id -> X87Stack*;
+  typedef std::map<int, X87Stack*> X87StackMap;
+  X87StackMap x87_stack_map_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope final BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->masm_->PushSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      DCHECK(codegen_->info()->is_calling());
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  friend class X87Stack;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen, const LCodeGen::X87Stack& x87_stack)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_),
+        x87_stack_(x87_stack) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  Label* done() { return codegen_->NeedsDeferredFrame() ? &done_ : exit(); }
+  int instruction_index() const { return instruction_index_; }
+  const LCodeGen::X87Stack& x87_stack() const { return x87_stack_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  Label done_;
+  int instruction_index_;
+  LCodeGen::X87Stack x87_stack_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_X87_LITHIUM_CODEGEN_X87_H_
diff --git a/src/crankshaft/x87/lithium-gap-resolver-x87.cc b/src/crankshaft/x87/lithium-gap-resolver-x87.cc
new file mode 100644
index 0000000..aa91835
--- /dev/null
+++ b/src/crankshaft/x87/lithium-gap-resolver-x87.cc
@@ -0,0 +1,460 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/crankshaft/x87/lithium-gap-resolver-x87.h"
+#include "src/register-configuration.h"
+
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      source_uses_(),
+      destination_uses_(),
+      spilled_register_(-1) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(HasBeenReset());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      PerformMove(i);
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  Finish();
+  DCHECK(HasBeenReset());
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) AddMove(move);
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.  We use operand swaps to resolve cycles,
+  // which means that a call to PerformMove could change any source operand
+  // in the move graph.
+
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved on the side.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does
+      // not miss any).  Assume there is a non-blocking move with source A
+      // and this move is blocked on source B and there is a swap of A and
+      // B.  Then A and B must be involved in the same cycle (or they would
+      // not be swapped).  Since this move's destination is B and there is
+      // only a single incoming edge to an operand, this move must also be
+      // involved in the same cycle.  In that case, the blocking move will
+      // be created but will be "pending" when we return from PerformMove.
+      PerformMove(i);
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and
+  // so it may now be the last move in the cycle.  If so remove it.
+  if (moves_[index].source()->Equals(destination)) {
+    RemoveMove(index);
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case
+  // we have a cycle.  Search for such a blocking move and perform a swap to
+  // resolve it.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination)) {
+      DCHECK(other_move.IsPending());
+      EmitSwap(index);
+      return;
+    }
+  }
+
+  // This move is not blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::AddMove(LMoveOperands move) {
+  LOperand* source = move.source();
+  if (source->IsRegister()) ++source_uses_[source->index()];
+
+  LOperand* destination = move.destination();
+  if (destination->IsRegister()) ++destination_uses_[destination->index()];
+
+  moves_.Add(move, cgen_->zone());
+}
+
+
+void LGapResolver::RemoveMove(int index) {
+  LOperand* source = moves_[index].source();
+  if (source->IsRegister()) {
+    --source_uses_[source->index()];
+    DCHECK(source_uses_[source->index()] >= 0);
+  }
+
+  LOperand* destination = moves_[index].destination();
+  if (destination->IsRegister()) {
+    --destination_uses_[destination->index()];
+    DCHECK(destination_uses_[destination->index()] >= 0);
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+int LGapResolver::CountSourceUses(LOperand* operand) {
+  int count = 0;
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated() && moves_[i].source()->Equals(operand)) {
+      ++count;
+    }
+  }
+  return count;
+}
+
+
+Register LGapResolver::GetFreeRegisterNot(Register reg) {
+  int skip_index = reg.is(no_reg) ? -1 : reg.code();
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+    int code = config->GetAllocatableGeneralCode(i);
+    if (source_uses_[code] == 0 && destination_uses_[code] > 0 &&
+        code != skip_index) {
+      return Register::from_code(code);
+    }
+  }
+  return no_reg;
+}
+
+
+bool LGapResolver::HasBeenReset() {
+  if (!moves_.is_empty()) return false;
+  if (spilled_register_ >= 0) return false;
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+    int code = config->GetAllocatableGeneralCode(i);
+    if (source_uses_[code] != 0) return false;
+    if (destination_uses_[code] != 0) return false;
+  }
+  return true;
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::Finish() {
+  if (spilled_register_ >= 0) {
+    __ pop(Register::from_code(spilled_register_));
+    spilled_register_ = -1;
+  }
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::EnsureRestored(LOperand* operand) {
+  if (operand->IsRegister() && operand->index() == spilled_register_) {
+    __ pop(Register::from_code(spilled_register_));
+    spilled_register_ = -1;
+  }
+}
+
+
+Register LGapResolver::EnsureTempRegister() {
+  // 1. We may have already spilled to create a temp register.
+  if (spilled_register_ >= 0) {
+    return Register::from_code(spilled_register_);
+  }
+
+  // 2. We may have a free register that we can use without spilling.
+  Register free = GetFreeRegisterNot(no_reg);
+  if (!free.is(no_reg)) return free;
+
+  // 3. Prefer to spill a register that is not used in any remaining move
+  // because it will not need to be restored until the end.
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+    int code = config->GetAllocatableGeneralCode(i);
+    if (source_uses_[code] == 0 && destination_uses_[code] == 0) {
+      Register scratch = Register::from_code(code);
+      __ push(scratch);
+      spilled_register_ = code;
+      return scratch;
+    }
+  }
+
+  // 4. Use an arbitrary register.  Register 0 is as arbitrary as any other.
+  spilled_register_ = config->GetAllocatableGeneralCode(0);
+  Register scratch = Register::from_code(spilled_register_);
+  __ push(scratch);
+  return scratch;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+  EnsureRestored(source);
+  EnsureRestored(destination);
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = cgen_->ToRegister(source);
+    Operand dst = cgen_->ToOperand(destination);
+    __ mov(dst, src);
+
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = cgen_->ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      __ mov(dst, src);
+    } else {
+      // Spill on demand to use a temporary register for memory-to-memory
+      // moves.
+      Register tmp = EnsureTempRegister();
+      Operand dst = cgen_->ToOperand(destination);
+      __ mov(tmp, src);
+      __ mov(dst, tmp);
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ Move(dst, cgen_->ToImmediate(constant_source, r));
+      } else {
+        __ LoadObject(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      double v = cgen_->ToDouble(constant_source);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+      __ push(Immediate(upper));
+      __ push(Immediate(lower));
+      X87Register dst = cgen_->ToX87Register(destination);
+      cgen_->X87Mov(dst, MemOperand(esp, 0));
+      __ add(esp, Immediate(kDoubleSize));
+    } else {
+      DCHECK(destination->IsStackSlot());
+      Operand dst = cgen_->ToOperand(destination);
+      Representation r = cgen_->IsSmi(constant_source)
+          ? Representation::Smi() : Representation::Integer32();
+      if (cgen_->IsInteger32(constant_source)) {
+        __ Move(dst, cgen_->ToImmediate(constant_source, r));
+      } else {
+        Register tmp = EnsureTempRegister();
+        __ LoadObject(tmp, cgen_->ToHandle(constant_source));
+        __ mov(dst, tmp);
+      }
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    // load from the register onto the stack, store in destination, which must
+    // be a double stack slot in the non-SSE2 case.
+    if (destination->IsDoubleStackSlot()) {
+      Operand dst = cgen_->ToOperand(destination);
+      X87Register src = cgen_->ToX87Register(source);
+      cgen_->X87Mov(dst, src);
+    } else {
+      X87Register dst = cgen_->ToX87Register(destination);
+      X87Register src = cgen_->ToX87Register(source);
+      cgen_->X87Mov(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    // load from the stack slot on top of the floating point stack, and then
+    // store in destination. If destination is a double register, then it
+    // represents the top of the stack and nothing needs to be done.
+    if (destination->IsDoubleStackSlot()) {
+      Register tmp = EnsureTempRegister();
+      Operand src0 = cgen_->ToOperand(source);
+      Operand src1 = cgen_->HighOperand(source);
+      Operand dst0 = cgen_->ToOperand(destination);
+      Operand dst1 = cgen_->HighOperand(destination);
+      __ mov(tmp, src0);  // Then use tmp to copy source to destination.
+      __ mov(dst0, tmp);
+      __ mov(tmp, src1);
+      __ mov(dst1, tmp);
+    } else {
+      Operand src = cgen_->ToOperand(source);
+      X87Register dst = cgen_->ToX87Register(destination);
+      cgen_->X87Mov(dst, src);
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  RemoveMove(index);
+}
+
+
+void LGapResolver::EmitSwap(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+  EnsureRestored(source);
+  EnsureRestored(destination);
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    Register src = cgen_->ToRegister(source);
+    Register dst = cgen_->ToRegister(destination);
+    __ xchg(dst, src);
+
+  } else if ((source->IsRegister() && destination->IsStackSlot()) ||
+             (source->IsStackSlot() && destination->IsRegister())) {
+    // Register-memory.  Use a free register as a temp if possible.  Do not
+    // spill on demand because the simple spill implementation cannot avoid
+    // spilling src at this point.
+    Register tmp = GetFreeRegisterNot(no_reg);
+    Register reg =
+        cgen_->ToRegister(source->IsRegister() ? source : destination);
+    Operand mem =
+        cgen_->ToOperand(source->IsRegister() ? destination : source);
+    if (tmp.is(no_reg)) {
+      __ xor_(reg, mem);
+      __ xor_(mem, reg);
+      __ xor_(reg, mem);
+    } else {
+      __ mov(tmp, mem);
+      __ mov(mem, reg);
+      __ mov(reg, tmp);
+    }
+
+  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+    // Memory-memory.  Spill on demand to use a temporary.  If there is a
+    // free register after that, use it as a second temporary.
+    Register tmp0 = EnsureTempRegister();
+    Register tmp1 = GetFreeRegisterNot(tmp0);
+    Operand src = cgen_->ToOperand(source);
+    Operand dst = cgen_->ToOperand(destination);
+    if (tmp1.is(no_reg)) {
+      // Only one temp register available to us.
+      __ mov(tmp0, dst);
+      __ xor_(tmp0, src);
+      __ xor_(src, tmp0);
+      __ xor_(tmp0, src);
+      __ mov(dst, tmp0);
+    } else {
+      __ mov(tmp0, dst);
+      __ mov(tmp1, src);
+      __ mov(dst, tmp1);
+      __ mov(src, tmp0);
+    }
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+
+  // The swap of source and destination has executed a move from source to
+  // destination.
+  RemoveMove(index);
+
+  // Any unperformed (including pending) move with a source of either
+  // this move's source or destination needs to have their source
+  // changed to reflect the state of affairs after the swap.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(source)) {
+      moves_[i].set_source(destination);
+    } else if (other_move.Blocks(destination)) {
+      moves_[i].set_source(source);
+    }
+  }
+
+  // In addition to swapping the actual uses as sources, we need to update
+  // the use counts.
+  if (source->IsRegister() && destination->IsRegister()) {
+    int temp = source_uses_[source->index()];
+    source_uses_[source->index()] = source_uses_[destination->index()];
+    source_uses_[destination->index()] = temp;
+  } else if (source->IsRegister()) {
+    // We don't have use counts for non-register operands like destination.
+    // Compute those counts now.
+    source_uses_[source->index()] = CountSourceUses(source);
+  } else if (destination->IsRegister()) {
+    source_uses_[destination->index()] = CountSourceUses(destination);
+  }
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/crankshaft/x87/lithium-gap-resolver-x87.h b/src/crankshaft/x87/lithium-gap-resolver-x87.h
new file mode 100644
index 0000000..6b6e2e6
--- /dev/null
+++ b/src/crankshaft/x87/lithium-gap-resolver-x87.h
@@ -0,0 +1,86 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
+
+#include "src/crankshaft/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver final BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // Emit any code necessary at the end of a gap move.
+  void Finish();
+
+  // Add or delete a move from the move graph without emitting any code.
+  // Used to build up the graph and remove trivial moves.
+  void AddMove(LMoveOperands move);
+  void RemoveMove(int index);
+
+  // Report the count of uses of operand as a source in a not-yet-performed
+  // move.  Used to rebuild use counts.
+  int CountSourceUses(LOperand* operand);
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Execute a move by emitting a swap of two operands.  The move from
+  // source to destination is removed from the move graph.
+  void EmitSwap(int index);
+
+  // Ensure that the given operand is not spilled.
+  void EnsureRestored(LOperand* operand);
+
+  // Return a register that can be used as a temp register, spilling
+  // something if necessary.
+  Register EnsureTempRegister();
+
+  // Return a known free register different from the given one (which could
+  // be no_reg---returning any free register), or no_reg if there is no such
+  // register.
+  Register GetFreeRegisterNot(Register reg);
+
+  // Verify that the state is the initial one, ready to resolve a single
+  // parallel move.
+  bool HasBeenReset();
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  // Source and destination use counts for the general purpose registers.
+  int source_uses_[Register::kNumRegisters];
+  int destination_uses_[DoubleRegister::kMaxNumRegisters];
+
+  // If we had to spill on demand, the currently spilled register's
+  // allocation index.
+  int spilled_register_;
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_X87_LITHIUM_GAP_RESOLVER_X87_H_
diff --git a/src/crankshaft/x87/lithium-x87.cc b/src/crankshaft/x87/lithium-x87.cc
new file mode 100644
index 0000000..b422e12
--- /dev/null
+++ b/src/crankshaft/x87/lithium-x87.cc
@@ -0,0 +1,2700 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/crankshaft/x87/lithium-x87.h"
+
+#include <sstream>
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/crankshaft/hydrogen-osr.h"
+#include "src/crankshaft/lithium-inl.h"
+#include "src/crankshaft/x87/lithium-codegen-x87.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+bool LInstruction::HasDoubleRegisterResult() {
+  return HasResult() && result()->IsDoubleRegister();
+}
+
+
+bool LInstruction::HasDoubleRegisterInput() {
+  for (int i = 0; i < InputCount(); i++) {
+    LOperand* op = InputAt(i);
+    if (op != NULL && op->IsDoubleRegister()) {
+      return true;
+    }
+  }
+  return false;
+}
+
+
+bool LInstruction::IsDoubleInput(X87Register reg, LCodeGen* cgen) {
+  for (int i = 0; i < InputCount(); i++) {
+    LOperand* op = InputAt(i);
+    if (op != NULL && op->IsDoubleRegister()) {
+      if (cgen->ToX87Register(op).is(reg)) return true;
+    }
+  }
+  return false;
+}
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "sal-t";
+    case Token::SAR: return "sar-t";
+    case Token::SHR: return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallFunction::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  if (hydrogen()->HasVectorAndSlot()) {
+    stream->Add(" (type-feedback-vector ");
+    temp_vector()->PrintTo(stream);
+    stream->Add(" ");
+    temp_slot()->PrintTo(stream);
+    stream->Add(")");
+  }
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  context()->PrintTo(stream);
+  stream->Add(" ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) {
+    spill_slot_count_++;
+    spill_slot_count_ |= 1;
+    num_double_slots_++;
+  }
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // Reserve the first spill slot for the state of dynamic alignment.
+  if (info()->IsOptimizing()) {
+    int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    DCHECK_EQ(alignment_state_index, 0);
+    USE(alignment_state_index);
+  }
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER, reg.code());
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) {
+  return new (zone())
+      LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER, reg.code());
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                             LUnallocated::USED_AT_START));
+}
+
+
+static inline bool CanBeImmediateConstant(HValue* value) {
+  return value->IsConstant() && HConstant::cast(value)->NotInNewSpace();
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseFixedOrConstant(HValue* value,
+                                            Register fixed_register) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseFixed(value, fixed_register);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return CanBeImmediateConstant(value)
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr,
+    int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         X87Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator,
+                                           &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseFixed(right_value, ecx);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift by 0 and
+    // the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineSameAsFirst(new(zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return MarkAsCall(DefineSameAsFirst(result), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return DefineSameAsFirst(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left_operand = UseFixed(left, edx);
+  LOperand* right_operand = UseFixed(right, eax);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      // Always insert a fpu register barrier here when branch is optimized to
+      // be a direct goto.
+      // TODO(weiliang): require a better solution.
+      if (!current->IsGoto()) {
+        LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate());
+        clobber->set_hydrogen_value(current);
+        chunk_->AddInstruction(clobber, current_block_);
+      }
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  if (instr->IsGoto() &&
+      (LGoto::cast(instr)->jumps_to_join() || next_block_->is_osr_entry())) {
+    // TODO(olivf) Since phis of spilled values are joined as registers
+    // (not in the stack slot), we need to allow the goto gaps to keep one
+    // x87 register alive. To ensure all other values are still spilled, we
+    // insert a fpu register barrier right before.
+    LClobberDoubles* clobber = new(zone()) LClobberDoubles(isolate());
+    clobber->set_hydrogen_value(hydrogen_val);
+    chunk_->AddInstruction(clobber, current_block_);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall() || instr->IsPrologue()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoPrologue(HPrologue* instr) {
+  return new (zone()) LPrologue();
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
+  LInstruction* branch =
+      temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
+                   : new (zone()) LBranch(UseRegisterAtStart(value), temp);
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpMapAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsLength(Use(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* left =
+      UseFixed(instr->left(), InstanceOfDescriptor::LeftRegister());
+  LOperand* right =
+      UseFixed(instr->right(), InstanceOfDescriptor::RightRegister());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LInstanceOf* result = new (zone()) LInstanceOf(context, left, right);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInPrototypeChainAndBranch(
+    HHasInPrototypeChainAndBranch* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* prototype = UseRegister(instr->prototype());
+  LOperand* temp = TempRegister();
+  LHasInPrototypeChainAndBranch* result =
+      new (zone()) LHasInPrototypeChainAndBranch(object, prototype, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegister(instr->receiver());
+  LOperand* function = UseRegister(instr->function());
+  LOperand* temp = TempRegister();
+  LWrapReceiver* result =
+      new(zone()) LWrapReceiver(receiver, function, temp);
+  return AssignEnvironment(DefineSameAsFirst(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), edi);
+  LOperand* receiver = UseFixed(instr->receiver(), eax);
+  LOperand* length = UseFixed(instr->length(), ebx);
+  LOperand* elements = UseFixed(instr->elements(), ecx);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = UseAny(instr->argument(i));
+    AddInstruction(new(zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, esi);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), edi);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  // Target
+  ops.Add(target, zone());
+  // Context
+  LOperand* op = UseFixed(instr->OperandAt(1), esi);
+  ops.Add(op, zone());
+  // Other register parameters
+  for (int i = LCallWithDescriptor::kImplicitRegisterParameterCount;
+       i < instr->OperandCount(); i++) {
+    op =
+        UseFixed(instr->OperandAt(i),
+                 descriptor.GetRegisterParameter(
+                     i - LCallWithDescriptor::kImplicitRegisterParameterCount));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+      descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, eax), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor: return DoMathFloor(instr);
+    case kMathRound: return DoMathRound(instr);
+    case kMathFround: return DoMathFround(instr);
+    case kMathAbs: return DoMathAbs(instr);
+    case kMathLog: return DoMathLog(instr);
+    case kMathExp: return DoMathExp(instr);
+    case kMathSqrt: return DoMathSqrt(instr);
+    case kMathPowHalf: return DoMathPowHalf(instr);
+    case kMathClz32: return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathFloor* result = new(zone()) LMathFloor(input);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result = DefineAsRegister(new (zone()) LMathRound(input));
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  LOperand* context = UseAny(instr->context());  // Deferred use.
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineSameAsFirst(new(zone()) LMathAbs(context, input));
+  Representation r = instr->value()->representation();
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegisterAtStart(instr->value());
+  return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new(zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp1 = FixedTemp(ecx);
+  LOperand* temp2 = FixedTemp(edx);
+  LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LOperand* temp1 = FixedTemp(ecx);
+  LOperand* temp2 = FixedTemp(edx);
+  LMathSqrt* result = new(zone()) LMathSqrt(input, temp1, temp2);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathPowHalf* result = new (zone()) LMathPowHalf(input);
+  return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* constructor = UseFixed(instr->constructor(), edi);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseFixed(instr->function(), edi);
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(edx);
+    vector = FixedTemp(ebx);
+  }
+
+  LCallFunction* call =
+      new (zone()) LCallFunction(context, function, slot, vector);
+  return MarkAsCall(DefineFixed(call, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineSameAsFirst(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LDivByConstI(
+          dividend, divisor, temp1, temp2), edx);
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), eax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LDivI(
+          dividend, divisor, temp), eax);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      instr->CheckFlag(HValue::kCanOverflow) ||
+      !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LOperand* temp3 =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result =
+      DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+                                                   divisor,
+                                                   temp1,
+                                                   temp2,
+                                                   temp3),
+                  edx);
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), eax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LFlooringDivI(
+          dividend, divisor, temp), eax);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      instr->CheckFlag(HValue::kCanOverflow)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LModByConstI(
+          dividend, divisor, temp1, temp2), eax);
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseFixed(instr->left(), eax);
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = FixedTemp(edx);
+  LInstruction* result = DefineFixed(new(zone()) LModI(
+          dividend, divisor, temp), edx);
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstant(instr->BetterRightOperand());
+    LOperand* temp = NULL;
+    if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      temp = TempRegister();
+    }
+    LMulI* mul = new(zone()) LMulI(left, right, temp);
+    if (instr->CheckFlag(HValue::kCanOverflow) ||
+        instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      AssignEnvironment(mul);
+    }
+    return DefineSameAsFirst(mul);
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new(zone()) LSubI(left, right);
+    LInstruction* result = DefineSameAsFirst(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    // Check to see if it would be advantageous to use an lea instruction rather
+    // than an add. This is the case when no overflow check is needed and there
+    // are multiple uses of the add's inputs, so using a 3-register add will
+    // preserve all input values for later uses.
+    bool use_lea = LAddI::UseLea(instr);
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    HValue* right_candidate = instr->BetterRightOperand();
+    LOperand* right = use_lea
+        ? UseRegisterOrConstantAtStart(right_candidate)
+        : UseOrConstantAtStart(right_candidate);
+    LAddI* add = new(zone()) LAddI(left, right);
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    LInstruction* result = use_lea
+        ? DefineAsRegister(add)
+        : DefineSameAsFirst(add);
+    if (can_overflow) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::ADD, instr);
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->IsConsistentExternalRepresentation());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    bool use_lea = LAddI::UseLea(instr);
+    LOperand* left = UseRegisterAtStart(instr->left());
+    HValue* right_candidate = instr->right();
+    LOperand* right = use_lea
+        ? UseRegisterOrConstantAtStart(right_candidate)
+        : UseOrConstantAtStart(right_candidate);
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = use_lea
+        ? DefineAsRegister(add)
+        : DefineSameAsFirst(add);
+    return result;
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  LOperand* scratch = TempRegister();
+
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  LMathMinMax* minmax = new (zone()) LMathMinMax(left, right, scratch);
+  return DefineSameAsFirst(minmax);
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  // Unlike ia32, we don't have a MathPowStub and directly call c function.
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LPower* result = new (zone()) LPower(left, right);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsSmiOrTagged());
+  DCHECK(instr->right()->representation().IsSmiOrTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left;
+    LOperand* right;
+    if (CanBeImmediateConstant(instr->left()) &&
+        CanBeImmediateConstant(instr->right())) {
+      // The code generator requires either both inputs to be constant
+      // operands, or neither.
+      left = UseConstant(instr->left());
+      right = UseConstant(instr->right());
+    } else {
+      left = UseRegisterAtStart(instr->left());
+      right = UseRegisterAtStart(instr->right());
+    }
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseOrConstantAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LCompareMinusZeroAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+
+  LStringCompareAndBranch* result = new(zone())
+      LStringCompareAndBranch(context, left, right);
+
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasInstanceTypeAndBranch(
+      UseRegisterAtStart(instr->value()),
+      TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+                                           TempRegister(),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LOperand* LChunkBuilder::GetSeqStringSetCharOperand(HSeqStringSetChar* instr) {
+  if (instr->encoding() == String::ONE_BYTE_ENCODING) {
+    if (FLAG_debug_code) {
+      return UseFixed(instr->value(), eax);
+    } else {
+      return UseFixedOrConstant(instr->value(), eax);
+    }
+  } else {
+    if (FLAG_debug_code) {
+      return UseRegisterAtStart(instr->value());
+    } else {
+      return UseRegisterOrConstantAtStart(instr->value());
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegisterAtStart(instr->index())
+      : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = GetSeqStringSetCharOperand(instr);
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), esi) : NULL;
+  LInstruction* result = new(zone()) LSeqStringSetChar(context, string,
+                                                       index, value);
+  if (FLAG_debug_code) {
+    result = MarkAsCall(result, instr);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+      ? UseOrConstantAtStart(instr->length())
+      : UseAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LOperand* temp = TempRegister();
+      LInstruction* result =
+          DefineAsRegister(new(zone()) LNumberUntagD(value, temp));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegister(val);
+        return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LInstruction* result = DefineSameAsFirst(new(zone()) LTaggedToI(value));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegisterAtStart(val);
+      LOperand* temp = FLAG_inline_new ? TempRegister() : NULL;
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      bool truncating = instr->CanTruncateToInt32();
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+      if (!truncating) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        LOperand* value = UseRegister(val);
+        return DefineSameAsFirst(new(zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegister(val);
+        LOperand* temp = TempRegister();
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp);
+        return AssignPointerMap(DefineSameAsFirst(result));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp = TempRegister();
+        LNumberTagI* result = new(zone()) LNumberTagI(value, temp);
+        return AssignPointerMap(DefineSameAsFirst(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineSameAsFirst(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckArrayBufferNotNeutered(
+    HCheckArrayBufferNotNeutered* instr) {
+  LOperand* view = UseRegisterAtStart(instr->value());
+  LOperand* scratch = TempRegister();
+  LCheckArrayBufferNotNeutered* result =
+      new (zone()) LCheckArrayBufferNotNeutered(view, scratch);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  LCheckInstanceType* result = new(zone()) LCheckInstanceType(value, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  // If the object is in new space, we'll emit a global cell compare and so
+  // want the value in a register.  If the object gets promoted before we
+  // emit code, we will still get the register but will do an immediate
+  // compare instead of the cell compare.  This is safe.
+  LOperand* value = instr->object_in_new_space()
+      ? UseRegisterAtStart(instr->value()) : UseAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  if (input_rep.IsDouble()) {
+    LOperand* reg = UseRegister(value);
+    return DefineFixed(new (zone()) LClampDToUint8(reg), eax);
+  } else if (input_rep.IsInteger32()) {
+    LOperand* reg = UseFixed(value, eax);
+    return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LOperand* value = UseRegister(instr->value());
+    LClampTToUint8NoSSE2* res =
+        new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
+                                         TempRegister(), TempRegister());
+    return AssignEnvironment(DefineFixed(res, ecx));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub() ? UseFixed(instr->context(), esi) : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(
+      UseFixed(instr->value(), eax), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new (zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* value;
+  LOperand* temp;
+  LOperand* context = UseRegister(instr->context());
+  if (instr->NeedsWriteBarrier()) {
+    value = UseTempRegister(instr->value());
+    temp = TempRegister();
+  } else {
+    value = UseRegister(instr->value());
+    temp = NULL;
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = (instr->access().IsExternalMemory() &&
+                   instr->access().offset() == 0)
+      ? UseRegisterOrConstantAtStart(instr->object())
+      : UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
+      context, object, vector);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()),
+                                         TempRegister())));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  bool clobbers_key = ExternalArrayOpRequiresTemp(
+      instr->key()->representation(), elements_kind);
+  LOperand* key = clobbers_key
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_fixed_typed_array()) {
+    LOperand* obj = UseRegisterAtStart(instr->elements());
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key, nullptr));
+  } else {
+    DCHECK(
+        (instr->representation().IsInteger32() &&
+         !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
+        (instr->representation().IsDouble() &&
+         (IsDoubleOrFloatElementsKind(instr->elements_kind()))));
+    LOperand* backing_store = UseRegister(instr->elements());
+    LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+    result = DefineAsRegister(
+        new (zone()) LLoadKeyed(backing_store, key, backing_store_owner));
+  }
+
+  bool needs_environment;
+  if (instr->is_fixed_typed_array()) {
+    // see LCodeGen::DoLoadKeyedExternalArray
+    needs_environment = elements_kind == UINT32_ELEMENTS &&
+                        !instr->CheckFlag(HInstruction::kUint32);
+  } else {
+    // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+    // LCodeGen::DoLoadKeyedFixedArray
+    needs_environment =
+        instr->RequiresHoleCheck() ||
+        (instr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED && info()->IsStub());
+  }
+
+  if (needs_environment) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    vector = FixedTemp(LoadWithVectorDescriptor::VectorRegister());
+  }
+  LLoadKeyedGeneric* result =
+      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
+  ElementsKind elements_kind = instr->elements_kind();
+
+  // Determine if we need a byte register in this case for the value.
+  bool val_is_fixed_register =
+      elements_kind == UINT8_ELEMENTS ||
+      elements_kind == INT8_ELEMENTS ||
+      elements_kind == UINT8_CLAMPED_ELEMENTS;
+  if (val_is_fixed_register) {
+    return UseFixed(instr->value(), eax);
+  }
+
+  if (IsDoubleOrFloatElementsKind(elements_kind)) {
+    return UseRegisterAtStart(instr->value());
+  }
+
+  return UseRegister(instr->value());
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_fixed_typed_array()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    DCHECK(instr->key()->representation().IsInteger32() ||
+           instr->key()->representation().IsSmi());
+
+    if (instr->value()->representation().IsDouble()) {
+      LOperand* object = UseRegisterAtStart(instr->elements());
+      // For storing double hole, no fp register required.
+      LOperand* val = instr->IsConstantHoleStore()
+                          ? NULL
+                          : UseRegisterAtStart(instr->value());
+      LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+      return new (zone()) LStoreKeyed(object, key, val, nullptr);
+    } else {
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
+      bool needs_write_barrier = instr->NeedsWriteBarrier();
+
+      LOperand* obj = UseRegister(instr->elements());
+      LOperand* val;
+      LOperand* key;
+      if (needs_write_barrier) {
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        val = UseRegisterOrConstantAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+      return new (zone()) LStoreKeyed(obj, key, val, nullptr);
+    }
+  }
+
+  ElementsKind elements_kind = instr->elements_kind();
+  DCHECK(
+      (instr->value()->representation().IsInteger32() &&
+       !IsDoubleOrFloatElementsKind(elements_kind)) ||
+      (instr->value()->representation().IsDouble() &&
+       IsDoubleOrFloatElementsKind(elements_kind)));
+  DCHECK(instr->elements()->representation().IsExternal());
+
+  LOperand* backing_store = UseRegister(instr->elements());
+  LOperand* val = GetStoreKeyedValueOperand(instr);
+  bool clobbers_key = ExternalArrayOpRequiresTemp(
+      instr->key()->representation(), elements_kind);
+  LOperand* key = clobbers_key
+      ? UseTempRegister(instr->key())
+      : UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store_owner = UseAny(instr->backing_store_owner());
+  return new (zone()) LStoreKeyed(backing_store, key, val, backing_store_owner);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreKeyedGeneric* result = new (zone())
+      LStoreKeyedGeneric(context, object, key, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LOperand* temp_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL,
+                                            new_map_reg, temp_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), eax);
+    LOperand* context = UseFixed(instr->context(), esi);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMaybeGrowElements(HMaybeGrowElements* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object = Use(instr->object());
+  LOperand* elements = Use(instr->elements());
+  LOperand* key = UseRegisterOrConstant(instr->key());
+  LOperand* current_capacity = UseRegisterOrConstant(instr->current_capacity());
+
+  LMaybeGrowElements* result = new (zone())
+      LMaybeGrowElements(context, object, elements, key, current_capacity);
+  DefineFixed(result, eax);
+  return AssignPointerMap(AssignEnvironment(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool is_external_location = instr->access().IsExternalMemory() &&
+      instr->access().offset() == 0;
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map = instr->has_transition() &&
+      instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else if (is_external_location) {
+    DCHECK(!is_in_object);
+    DCHECK(!needs_write_barrier);
+    DCHECK(!needs_write_barrier_for_map);
+    obj = UseRegisterOrConstant(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map
+        ? UseRegister(instr->object())
+        : UseRegisterAtStart(instr->object());
+  }
+
+  bool can_be_constant = instr->value()->IsConstant() &&
+      HConstant::cast(instr->value())->NotInNewSpace() &&
+      !instr->field_representation().IsDouble();
+
+  LOperand* val;
+  if (instr->field_representation().IsInteger8() ||
+      instr->field_representation().IsUInteger8()) {
+    // mov_b requires a byte register (i.e. any of eax, ebx, ecx, edx).
+    // Just force the value to be in eax and we're safe here.
+    val = UseFixed(instr->value(), eax);
+  } else if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (can_be_constant) {
+    val = UseRegisterOrConstant(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We only need a scratch register if we have a write barrier or we
+  // have a store into the properties array (not in-object-property).
+  LOperand* temp = (!is_in_object || needs_write_barrier ||
+                    needs_write_barrier_for_map) ? TempRegister() : NULL;
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp_map = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new(zone()) LStoreNamedField(obj, val, temp, temp_map);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (instr->HasVectorAndSlot()) {
+    slot = FixedTemp(VectorStoreICDescriptor::SlotRegister());
+    vector = FixedTemp(VectorStoreICDescriptor::VectorRegister());
+  }
+
+  LStoreNamedGeneric* result =
+      new (zone()) LStoreNamedGeneric(context, object, value, slot, vector);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* left = UseFixed(instr->left(), edx);
+  LOperand* right = UseFixed(instr->right(), eax);
+  LStringAdd* string_add = new(zone()) LStringAdd(context, left, right);
+  return MarkAsCall(DefineFixed(string_add, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = instr->size()->IsConstant()
+      ? UseConstant(instr->size())
+      : UseTempRegister(instr->size());
+  LOperand* temp = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(context, size, temp);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetRegisterParameter(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kNotEnoughSpillSlotsForOsr);
+      spill_index = 0;
+    }
+    if (spill_index == 0) {
+      // The dynamic frame alignment state overwrites the first local.
+      // The first local is saved at the end of the unoptimized frame.
+      spill_index = graph()->osr()->UnoptimizedFrameSlots();
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LCallStub* result = new(zone()) LCallStub(context);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length;
+  LOperand* index;
+  if (instr->length()->IsConstant() && instr->index()->IsConstant()) {
+    length = UseRegisterOrConstant(instr->length());
+    index = UseOrConstant(instr->index());
+  } else {
+    length = UseTempRegister(instr->length());
+    index = Use(instr->index());
+  }
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), eax);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* value = UseFixed(instr->value(), ebx);
+  LTypeof* result = new(zone()) LTypeof(context, value);
+  return MarkAsCall(DefineFixed(result, eax), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  info()->MarkAsDeferredCalling();
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), esi);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedFunction(instr->shared());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* object = UseFixed(instr->enumerable(), eax);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, eax), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, esi), instr);
+}
+
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/crankshaft/x87/lithium-x87.h b/src/crankshaft/x87/lithium-x87.h
new file mode 100644
index 0000000..e033902
--- /dev/null
+++ b/src/crankshaft/x87/lithium-x87.h
@@ -0,0 +1,2798 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CRANKSHAFT_X87_LITHIUM_X87_H_
+#define V8_CRANKSHAFT_X87_LITHIUM_X87_H_
+
+#include "src/crankshaft/hydrogen.h"
+#include "src/crankshaft/lithium.h"
+#include "src/crankshaft/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+namespace compiler {
+class RCodeVisualizer;
+}
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(AllocateBlockContext)                    \
+  V(Allocate)                                \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckArrayBufferNotNeutered)             \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8NoSSE2)                     \
+  V(ClassOfTestAndBranch)                    \
+  V(ClobberDoubles)                          \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInPrototypeChainAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(LoadRoot)                                \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(MaybeGrowElements)                       \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(Prologue)                                \
+  V(PushArgument)                            \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const final { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) final;              \
+  const char* Mnemonic() const final { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {
+  }
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall() ||
+           // We only have rudimentary X87Stack tracking, thus in general
+           // cannot handle phi-nodes.
+        (IsControl());
+  }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  bool HasDoubleRegisterResult();
+  bool HasDoubleRegisterInput();
+  bool IsDoubleInput(X87Register reg, LCodeGen* cgen);
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator support.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const final { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const override { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() final { return I; }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return T; }
+  LOperand* TempAt(int i) final { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block) : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const final { return true; }
+  void PrintDataTo(StringStream* stream) override;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap final : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LClobberDoubles final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LClobberDoubles(Isolate* isolate) { }
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override { return true; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
+};
+
+
+class LGoto final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) override;
+  bool IsControl() const override { return true; }
+
+  int block_id() const { return block_->block_id(); }
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return false;
+  }
+
+  bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
+  HBasicBlock* block() const { return block_; }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LPrologue final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Prologue, "prologue")
+};
+
+
+class LLazyBailout final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+};
+
+
+class LDummy final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const override { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel final : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LUnknownOSRValue final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction: public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  bool IsControl() const final { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LWrapReceiver(LOperand* receiver,
+                LOperand* function,
+                LOperand* temp) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+    temps_[0] = temp;
+  }
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+};
+
+
+class LApplyArguments final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+};
+
+
+class LAccessArgumentsAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LArgumentsLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LDebugBreak final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LModByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LModByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LModI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDivByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend,
+                       int32_t divisor,
+                       LOperand* temp1,
+                       LOperand* temp2,
+                       LOperand* temp3) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* temp3() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMulI(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+class LCompareNumericAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LMathFloor final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFloor(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathRound(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LMathExp(LOperand* value,
+           LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt final : public LTemplateInstruction<1, 1, 2> {
+ public:
+  explicit LMathSqrt(LOperand* value,
+                     LOperand* temp1,
+                     LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch final : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+};
+
+
+class LCmpHoleAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCompareMinusZeroAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsStringAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsSmiAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LIsUndetectableAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStringCompareAndBranch final : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LHasInstanceTypeAndBranch final : public LControlInstruction<1, 1> {
+ public:
+  LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LGetCachedArrayIndex final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LClassOfTestAndBranch final : public LControlInstruction<1, 2> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+  LOperand* context() { return inputs_[0]; }
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() const { return inputs_[0]; }
+  LOperand* left() const { return inputs_[1]; }
+  LOperand* right() const { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LHasInPrototypeChainAndBranch final : public LControlInstruction<2, 1> {
+ public:
+  LHasInPrototypeChainAndBranch(LOperand* object, LOperand* prototype,
+                                LOperand* scratch) {
+    inputs_[0] = object;
+    inputs_[1] = prototype;
+    temps_[0] = scratch;
+  }
+
+  LOperand* object() const { return inputs_[0]; }
+  LOperand* prototype() const { return inputs_[1]; }
+  LOperand* scratch() const { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInPrototypeChainAndBranch,
+                               "has-in-prototype-chain-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInPrototypeChainAndBranch)
+};
+
+
+class LBoundsCheck final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+
+  Token::Value op() const { return hydrogen()->op(); }
+};
+
+
+class LShiftI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+  Token::Value op() const { return op_; }
+  bool can_deopt() const { return can_deopt_; }
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD final : public LTemplateInstruction<1, 0, 1> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  uint64_t bits() const { return hydrogen()->DoubleValueAsBits(); }
+};
+
+
+class LConstantE final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch final : public LControlInstruction<1, 1> {
+ public:
+  LBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LCmpMapAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpMapAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LSeqStringGetChar final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar final : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  static bool UseLea(HAdd* add) {
+    return !add->CheckFlag(HValue::kCanOverflow) &&
+        add->BetterLeftOperand()->UseCount() > 1;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right, LOperand* temp) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+    temps_[0] = temp;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const override { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) override;
+  const char* Mnemonic() const override;
+
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+
+  Strength strength() { return hydrogen()->strength(); }
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn final : public LTemplateInstruction<0, 3, 0> {
+ public:
+  explicit LReturn(LOperand* value,
+                   LOperand* context,
+                   LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+  DECLARE_HYDROGEN_ACCESSOR(Return)
+};
+
+
+class LLoadNamedField final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
+    inputs_[0] = function;
+    temps_[0] = temp;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key, LOperand* backing_store_owner) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+    inputs_[2] = backing_store_owner;
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* backing_store_owner() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+  bool key_is_smi() {
+    return hydrogen()->key()->representation().IsTagged();
+  }
+};
+
+
+inline static bool ExternalArrayOpRequiresTemp(
+    Representation key_representation,
+    ElementsKind elements_kind) {
+  // Operations that require the key to be divided by two to be converted into
+  // an index cannot fold the scale operation into a load and need an extra
+  // temp register to do the work.
+  return key_representation.IsSmi() &&
+         (elements_kind == UINT8_ELEMENTS || elements_kind == INT8_ELEMENTS ||
+          elements_kind == UINT8_CLAMPED_ELEMENTS);
+}
+
+
+class LLoadKeyedGeneric final : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = obj;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalGeneric final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  TypeofMode typeof_mode() const { return hydrogen()->typeof_mode(); }
+};
+
+
+class LLoadContextSlot final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreContextSlot final : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LPushArgument final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) override;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext final : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor final : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : inputs_(descriptor.GetRegisterParameterCount() +
+                    kImplicitRegisterParameterCount,
+                zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() +
+               kImplicitRegisterParameterCount ==
+           operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  // The target and context are passed as implicit parameters that are not
+  // explicitly listed in the descriptor.
+  static const int kImplicitRegisterParameterCount = 2;
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() final { return inputs_.length(); }
+  LOperand* InputAt(int i) final { return inputs_[i]; }
+
+  int TempCount() final { return 0; }
+  LOperand* TempAt(int i) final { return NULL; }
+};
+
+
+class LInvokeFunction final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction final : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function, LOperand* slot,
+                LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  void PrintDataTo(StringStream* stream) override;
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const override {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LDoubleToSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LTaggedToI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD final : public LTemplateInstruction<1, 1, 1> {
+ public:
+  explicit LNumberUntagD(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change);
+};
+
+
+class LSmiUntag final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+  bool needs_check() const { return needs_check_; }
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField final : public LTemplateInstruction<0, 2, 2> {
+ public:
+  LStoreNamedField(LOperand* obj,
+                   LOperand* val,
+                   LOperand* temp,
+                   LOperand* temp_map) {
+    inputs_[0] = obj;
+    inputs_[1] = val;
+    temps_[0] = temp;
+    temps_[1] = temp_map;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp_map() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LStoreNamedGeneric final : public LTemplateInstruction<0, 3, 2> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value,
+                     LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+  Handle<Object> name() const { return hydrogen()->name(); }
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LStoreKeyed final : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val,
+              LOperand* backing_store_owner) {
+    inputs_[0] = obj;
+    inputs_[1] = key;
+    inputs_[2] = val;
+    inputs_[3] = backing_store_owner;
+  }
+
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  LOperand* backing_store_owner() { return inputs_[3]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) override;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+};
+
+
+class LStoreKeyedGeneric final : public LTemplateInstruction<0, 4, 2> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                     LOperand* value, LOperand* slot, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    inputs_[3] = value;
+    temps_[0] = slot;
+    temps_[1] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+  LOperand* temp_slot() { return temps_[0]; }
+  LOperand* temp_vector() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  LanguageMode language_mode() { return hydrogen()->language_mode(); }
+};
+
+
+class LTransitionElementsKind final : public LTemplateInstruction<0, 2, 2> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* new_map_temp,
+                          LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+    temps_[1] = temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+  LOperand* temp() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) override;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
+class LMaybeGrowElements final : public LTemplateInstruction<1, 5, 0> {
+ public:
+  LMaybeGrowElements(LOperand* context, LOperand* object, LOperand* elements,
+                     LOperand* key, LOperand* current_capacity) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = elements;
+    inputs_[3] = key;
+    inputs_[4] = current_capacity;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* elements() { return inputs_[2]; }
+  LOperand* key() { return inputs_[3]; }
+  LOperand* current_capacity() { return inputs_[4]; }
+
+  DECLARE_HYDROGEN_ACCESSOR(MaybeGrowElements)
+  DECLARE_CONCRETE_INSTRUCTION(MaybeGrowElements, "maybe-grow-elements")
+};
+
+
+class LStringAdd final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt final : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckArrayBufferNotNeutered final
+    : public LTemplateInstruction<0, 1, 1> {
+ public:
+  explicit LCheckArrayBufferNotNeutered(LOperand* view, LOperand* scratch) {
+    inputs_[0] = view;
+    temps_[0] = scratch;
+  }
+
+  LOperand* view() { return inputs_[0]; }
+  LOperand* scratch() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckArrayBufferNotNeutered,
+                               "check-array-buffer-not-neutered")
+  DECLARE_HYDROGEN_ACCESSOR(CheckArrayBufferNotNeutered)
+};
+
+
+class LCheckInstanceType final : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LCheckInstanceType(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LClampDToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LClampTToUint8NoSSE2 final : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LClampTToUint8NoSSE2(LOperand* unclamped,
+                       LOperand* temp1,
+                       LOperand* temp2,
+                       LOperand* temp3) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = temp3;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* scratch() { return temps_[0]; }
+  LOperand* scratch2() { return temps_[1]; }
+  LOperand* scratch3() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2,
+                               "clamp-t-to-uint8-nosse2")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
+class LCheckNonSmi final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LDoubleBits final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate final : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LToFastProperties final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch final : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) override;
+};
+
+
+class LOsrEntry final : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const override { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck final : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray final : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue final : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex final : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk final : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph),
+        num_double_slots_(0) { }
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+
+  int num_double_slots() const { return num_double_slots_; }
+
+ private:
+  int num_double_slots_;
+};
+
+
+class LChunkBuilder final : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(X87Register reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a fixed register or a constant operand.
+  MUST_USE_RESULT LOperand* UseFixedOrConstant(HValue* value,
+                                               Register fixed_register);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) override;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            X87Register reg);
+  LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
+  // Assigns an environment to an instruction.  An instruction which can
+  // deoptimize must have an environment.
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  // Assigns a pointer map to an instruction.  An instruction which can
+  // trigger a GC or a lazy deoptimization must have a pointer map.
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  LOperand* GetSeqStringSetCharOperand(HSeqStringSetChar* instr);
+
+  // Marks a call for the register allocator.  Assigns a pointer map to
+  // support GC and lazy deoptimization.  Assigns an environment to support
+  // eager deoptimization if CAN_DEOPTIMIZE_EAGERLY.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+
+  LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CRANKSHAFT_X87_LITHIUM_X87_H_