Version 3.25.7 (based on bleeding_edge revision r19776)

Promise.all and Promise.race should reject non-array parameter (Chromium issue 347453).

Promise.all and Promise race should use "then" rather than "chain" (Chromium issue 347427).

Merge the "Compute Minus Zero Checks" phase into the range analysis (issue 3204).

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@19777 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/src/a64/assembler-a64-inl.h b/src/a64/assembler-a64-inl.h
index a925e55..e123039 100644
--- a/src/a64/assembler-a64-inl.h
+++ b/src/a64/assembler-a64-inl.h
@@ -179,9 +179,9 @@
 
 inline void CPURegList::Remove(const CPURegList& other) {
   ASSERT(IsValid());
-  ASSERT(other.type() == type_);
-  ASSERT(other.RegisterSizeInBits() == size_);
-  list_ &= ~other.list();
+  if (other.type() == type_) {
+    list_ &= ~other.list();
+  }
 }
 
 
@@ -192,10 +192,14 @@
 }
 
 
-inline void CPURegList::Remove(const CPURegister& other) {
-  ASSERT(other.type() == type_);
-  ASSERT(other.SizeInBits() == size_);
-  Remove(other.code());
+inline void CPURegList::Remove(const CPURegister& other1,
+                               const CPURegister& other2,
+                               const CPURegister& other3,
+                               const CPURegister& other4) {
+  if (!other1.IsNone() && (other1.type() == type_)) Remove(other1.code());
+  if (!other2.IsNone() && (other2.type() == type_)) Remove(other2.code());
+  if (!other3.IsNone() && (other3.type() == type_)) Remove(other3.code());
+  if (!other4.IsNone() && (other4.type() == type_)) Remove(other4.code());
 }
 
 
@@ -649,6 +653,12 @@
 }
 
 
+Address RelocInfo::constant_pool_entry_address() {
+  ASSERT(IsInConstantPool());
+  return Assembler::target_pointer_address_at(pc_);
+}
+
+
 Object* RelocInfo::target_object() {
   ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return reinterpret_cast<Object*>(Assembler::target_address_at(pc_));
diff --git a/src/a64/assembler-a64.cc b/src/a64/assembler-a64.cc
index 14f78a0..85171d5 100644
--- a/src/a64/assembler-a64.cc
+++ b/src/a64/assembler-a64.cc
@@ -160,6 +160,12 @@
 }
 
 
+bool RelocInfo::IsInConstantPool() {
+  Instruction* instr = reinterpret_cast<Instruction*>(pc_);
+  return instr->IsLdrLiteralX();
+}
+
+
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   Instr* pc = reinterpret_cast<Instr*>(pc_);
diff --git a/src/a64/assembler-a64.h b/src/a64/assembler-a64.h
index 09a8a72..cb018a4 100644
--- a/src/a64/assembler-a64.h
+++ b/src/a64/assembler-a64.h
@@ -461,6 +461,11 @@
     return list_;
   }
 
+  inline void set_list(RegList new_list) {
+    ASSERT(IsValid());
+    list_ = new_list;
+  }
+
   // Combine another CPURegList into this one. Registers that already exist in
   // this list are left unchanged. The type and size of the registers in the
   // 'other' list must match those in this list.
@@ -471,9 +476,12 @@
   // in the 'other' list must match those in this list.
   void Remove(const CPURegList& other);
 
-  // Variants of Combine and Remove which take a single register.
+  // Variants of Combine and Remove which take CPURegisters.
   void Combine(const CPURegister& other);
-  void Remove(const CPURegister& other);
+  void Remove(const CPURegister& other1,
+              const CPURegister& other2 = NoCPUReg,
+              const CPURegister& other3 = NoCPUReg,
+              const CPURegister& other4 = NoCPUReg);
 
   // Variants of Combine and Remove which take a single register by its code;
   // the type and size of the register is inferred from this list.
@@ -503,9 +511,17 @@
     return list_ == 0;
   }
 
-  bool IncludesAliasOf(const CPURegister& other) const {
+  bool IncludesAliasOf(const CPURegister& other1,
+                       const CPURegister& other2 = NoCPUReg,
+                       const CPURegister& other3 = NoCPUReg,
+                       const CPURegister& other4 = NoCPUReg) const {
     ASSERT(IsValid());
-    return (type_ == other.type()) && (other.Bit() & list_);
+    RegList list = 0;
+    if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
+    if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
+    if (!other3.IsNone() && (other3.type() == type_)) list |= other3.Bit();
+    if (!other4.IsNone() && (other4.type() == type_)) list |= other4.Bit();
+    return (list_ & list) != 0;
   }
 
   int Count() const {
diff --git a/src/a64/code-stubs-a64.cc b/src/a64/code-stubs-a64.cc
index 2b472da..3ed79a4 100644
--- a/src/a64/code-stubs-a64.cc
+++ b/src/a64/code-stubs-a64.cc
@@ -1222,7 +1222,8 @@
       // A64 simulator does not currently simulate FPCR (where the rounding
       // mode is set), so test the operation with some debug code.
       if (masm->emit_debug_code()) {
-        Register temp = masm->Tmp1();
+        UseScratchRegisterScope temps(masm);
+        Register temp = temps.AcquireX();
         //  d5  zero_double   The value +0.0 as a double.
         __ Fneg(scratch0_double, zero_double);
         // Verify that we correctly generated +0.0 and -0.0.
@@ -1500,7 +1501,8 @@
   if (__ emit_debug_code()) {
     // Verify that the slot below fp[kSPOffset]-8 points to the return location
     // (currently in x12).
-    Register temp = masm->Tmp1();
+    UseScratchRegisterScope temps(masm);
+    Register temp = temps.AcquireX();
     __ Ldr(temp, MemOperand(fp, ExitFrameConstants::kSPOffset));
     __ Ldr(temp, MemOperand(temp, -static_cast<int64_t>(kXRegSizeInBytes)));
     __ Cmp(temp, x12);
@@ -4720,9 +4722,10 @@
         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
     InformIncrementalMarker(masm);
     regs_.Restore(masm);  // Restore the extra scratch registers we used.
+
     __ RememberedSetHelper(object_,
                            address_,
-                           value_,
+                           value_,            // scratch1
                            save_fp_regs_mode_,
                            MacroAssembler::kReturnAtEnd);
 
@@ -4783,7 +4786,7 @@
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
     __ RememberedSetHelper(object_,
                            address_,
-                           value_,
+                           value_,            // scratch1
                            save_fp_regs_mode_,
                            MacroAssembler::kReturnAtEnd);
   } else {
@@ -4826,7 +4829,7 @@
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
     __ RememberedSetHelper(object_,
                            address_,
-                           value_,
+                           value_,            // scratch1
                            save_fp_regs_mode_,
                            MacroAssembler::kReturnAtEnd);
   } else {
@@ -4859,7 +4862,7 @@
   if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
     __ RememberedSetHelper(object_,
                            address_,
-                           value_,
+                           value_,            // scratch1
                            save_fp_regs_mode_,
                            MacroAssembler::kReturnAtEnd);
   }
@@ -5090,12 +5093,11 @@
     __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
 
     // Check if the key is identical to the name.
+    UseScratchRegisterScope temps(masm);
+    Register scratch3 = temps.AcquireX();
     __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2));
-    // TODO(jbramley): We need another scratch here, but some callers can't
-    // provide a scratch3 so we have to use Tmp1(). We should find a clean way
-    // to make it unavailable to the MacroAssembler for a short time.
-    __ Ldr(__ Tmp1(), FieldMemOperand(scratch2, kElementsStartOffset));
-    __ Cmp(name, __ Tmp1());
+    __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ Cmp(name, scratch3);
     __ B(eq, done);
   }
 
diff --git a/src/a64/debug-a64.cc b/src/a64/debug-a64.cc
index d871165..2c0ced3 100644
--- a/src/a64/debug-a64.cc
+++ b/src/a64/debug-a64.cc
@@ -175,8 +175,7 @@
     ASSERT((object_regs & non_object_regs) == 0);
     ASSERT((scratch.Bit() & object_regs) == 0);
     ASSERT((scratch.Bit() & non_object_regs) == 0);
-    ASSERT((ip0.Bit() & (object_regs | non_object_regs)) == 0);
-    ASSERT((ip1.Bit() & (object_regs | non_object_regs)) == 0);
+    ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
     STATIC_ASSERT(kSmiValueSize == 32);
 
     CPURegList non_object_list =
diff --git a/src/a64/deoptimizer-a64.cc b/src/a64/deoptimizer-a64.cc
index 40e3191..af1a48c 100644
--- a/src/a64/deoptimizer-a64.cc
+++ b/src/a64/deoptimizer-a64.cc
@@ -340,6 +340,9 @@
 
 
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  UseScratchRegisterScope temps(masm());
+  Register entry_id = temps.AcquireX();
+
   // Create a sequence of deoptimization entries.
   // Note that registers are still live when jumping to an entry.
   Label done;
@@ -354,15 +357,13 @@
     for (int i = 0; i < count(); i++) {
       int start = masm()->pc_offset();
       USE(start);
-      __ movz(masm()->Tmp0(), i);
+      __ movz(entry_id, i);
       __ b(&done);
       ASSERT(masm()->pc_offset() - start == table_entry_size_);
     }
   }
   __ Bind(&done);
-  // TODO(all): We need to add some kind of assertion to verify that Tmp0()
-  // is not clobbered by Push.
-  __ Push(masm()->Tmp0());
+  __ Push(entry_id);
 }
 
 
diff --git a/src/a64/full-codegen-a64.cc b/src/a64/full-codegen-a64.cc
index 4414cb3..7760530 100644
--- a/src/a64/full-codegen-a64.cc
+++ b/src/a64/full-codegen-a64.cc
@@ -86,10 +86,10 @@
   }
 
   void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) {
-    // We need to use ip0, so don't allow access to the MacroAssembler.
-    InstructionAccurateScope scope(masm_);
-    __ orr(ip0, reg1, reg2);
-    EmitJumpIfNotSmi(ip0, target);
+    UseScratchRegisterScope temps(masm_);
+    Register temp = temps.AcquireX();
+    __ Orr(temp, reg1, reg2);
+    EmitJumpIfNotSmi(temp, target);
   }
 
   void EmitPatchInfo() {
diff --git a/src/a64/ic-a64.cc b/src/a64/ic-a64.cc
index 93d7857..73d4013 100644
--- a/src/a64/ic-a64.cc
+++ b/src/a64/ic-a64.cc
@@ -1074,6 +1074,7 @@
                                          FAST_DOUBLE_ELEMENTS,
                                          receiver_map,
                                          x10,
+                                         x11,
                                          slow);
   ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
   AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
@@ -1088,6 +1089,7 @@
                                          FAST_ELEMENTS,
                                          receiver_map,
                                          x10,
+                                         x11,
                                          slow);
   ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
@@ -1104,6 +1106,7 @@
                                          FAST_ELEMENTS,
                                          receiver_map,
                                          x10,
+                                         x11,
                                          slow);
   ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
diff --git a/src/a64/lithium-a64.cc b/src/a64/lithium-a64.cc
index 12812bb..dff7003 100644
--- a/src/a64/lithium-a64.cc
+++ b/src/a64/lithium-a64.cc
@@ -1369,7 +1369,7 @@
 
 
 LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
+  ASSERT(instr->representation().IsInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
   ASSERT(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
@@ -1387,6 +1387,25 @@
 }
 
 
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  bool truncating = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32);
+  LOperand* temp = truncating ? NULL : TempRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LDivByConstI(dividend, divisor, temp));
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0) ||
+      !truncating;
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1402,10 +1421,13 @@
 
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    // TODO(all): Add Smi support to DoDivI and turn this into a ternary.
-    if (instr->RightIsPowerOf2()) return DoDivByPowerOf2I(instr);
-    if (instr->representation().IsInteger32()) return DoDivI(instr);
-    return DoArithmeticT(Token::DIV, instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
   } else {
@@ -1714,6 +1736,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result =
@@ -1725,6 +1750,22 @@
 }
 
 
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LFlooringDivByConstI(dividend, divisor));
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0);
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
@@ -1739,8 +1780,7 @@
   if (instr->RightIsPowerOf2()) {
     return DoFlooringDivByPowerOf2I(instr);
   } else if (instr->right()->IsConstant()) {
-    // TODO(svenpanne) Do something more efficient in this case.
-    return DoFlooringDivI(instr);
+    return DoFlooringDivByConstI(instr);
   } else {
     return DoFlooringDivI(instr);
   }
@@ -1767,7 +1807,7 @@
 
 
 LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
+  ASSERT(instr->representation().IsInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
   ASSERT(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
@@ -1781,6 +1821,23 @@
 }
 
 
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp = TempRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LModByConstI(dividend, divisor, temp));
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->CanBeNegative());
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1798,10 +1855,13 @@
 
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    // TODO(all): Add Smi support to DoDivI and turn this into a ternary.
-    if (instr->RightIsPowerOf2()) return DoModByPowerOf2I(instr);
-    if (instr->representation().IsInteger32()) return DoModI(instr);
-    return DoArithmeticT(Token::MOD, instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MOD, instr);
   } else {
diff --git a/src/a64/lithium-a64.h b/src/a64/lithium-a64.h
index aa1c23a..6f718e5 100644
--- a/src/a64/lithium-a64.h
+++ b/src/a64/lithium-a64.h
@@ -90,6 +90,7 @@
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
+  V(DivByConstI)                                \
   V(DivByPowerOf2I)                             \
   V(DivI)                                       \
   V(DoubleBits)                                 \
@@ -97,6 +98,7 @@
   V(Drop)                                       \
   V(Dummy)                                      \
   V(DummyUse)                                   \
+  V(FlooringDivByConstI)                        \
   V(FlooringDivByPowerOf2I)                     \
   V(FlooringDivI)                               \
   V(ForInCacheArray)                            \
@@ -143,6 +145,7 @@
   V(MathPowHalf)                                \
   V(MathRound)                                  \
   V(MathSqrt)                                   \
+  V(ModByConstI)                                \
   V(ModByPowerOf2I)                             \
   V(ModI)                                       \
   V(MulConstIS)                                 \
@@ -1300,6 +1303,26 @@
 };
 
 
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -1949,6 +1972,25 @@
 };
 
 
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
@@ -2040,6 +2082,26 @@
 };
 
 
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LModI(LOperand* left, LOperand* right) {
@@ -2931,10 +2993,13 @@
 #undef DECLARE_DO
 
   LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
   LInstruction* DoDivI(HBinaryOperation* instr);
   LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
   LInstruction* DoModI(HMod* instr);
   LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
   LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
 
   static bool HasMagicNumberForDivision(int32_t divisor);
diff --git a/src/a64/lithium-codegen-a64.cc b/src/a64/lithium-codegen-a64.cc
index 8e443b9..ea826d6 100644
--- a/src/a64/lithium-codegen-a64.cc
+++ b/src/a64/lithium-codegen-a64.cc
@@ -845,7 +845,13 @@
     }
     if (deopt_jump_table_[i].needs_frame) {
       ASSERT(!info()->saves_caller_doubles());
-      __ Mov(__ Tmp0(), Operand(ExternalReference::ForDeoptEntry(entry)));
+
+      UseScratchRegisterScope temps(masm());
+      Register stub_deopt_entry = temps.AcquireX();
+      Register stub_marker = temps.AcquireX();
+
+      __ Mov(stub_deopt_entry,
+             Operand(ExternalReference::ForDeoptEntry(entry)));
       if (needs_frame.is_bound()) {
         __ B(&needs_frame);
       } else {
@@ -853,12 +859,11 @@
         // This variant of deopt can only be used with stubs. Since we don't
         // have a function pointer to install in the stack frame that we're
         // building, install a special marker there instead.
-        // TODO(jochen): Revisit the use of TmpX().
         ASSERT(info()->IsStub());
-        __ Mov(__ Tmp1(), Operand(Smi::FromInt(StackFrame::STUB)));
-        __ Push(lr, fp, cp, __ Tmp1());
+        __ Mov(stub_marker, Operand(Smi::FromInt(StackFrame::STUB)));
+        __ Push(lr, fp, cp, stub_marker);
         __ Add(fp, __ StackPointer(), 2 * kPointerSize);
-        __ Call(__ Tmp0());
+        __ Call(stub_deopt_entry);
       }
     } else {
       if (info()->saves_caller_doubles()) {
@@ -1057,6 +1062,11 @@
 }
 
 
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
+  DeoptimizeBranch(environment, reg_not_zero, rt);
+}
+
+
 void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
   int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
   DeoptimizeBranch(environment, reg_bit_set, rt, sign_bit);
@@ -2634,6 +2644,39 @@
 }
 
 
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister32(instr->result());
+  ASSERT(!AreAliased(dividend, result));
+
+  if (divisor == 0) {
+    Deoptimize(instr->environment());
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    DeoptimizeIfZero(dividend, instr->environment());
+  }
+
+  __ FlooringDiv(result, dividend, Abs(divisor));
+  __ Add(result, result, Operand(dividend, LSR, 31));
+  if (divisor < 0) __ Neg(result, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    Register temp = ToRegister32(instr->temp());
+    ASSERT(!AreAliased(dividend, result, temp));
+    __ Sxtw(dividend.X(), dividend);
+    __ Mov(temp, divisor);
+    __ Smsubl(temp.X(), result, temp, dividend.X());
+    DeoptimizeIfNotZero(temp, instr->environment());
+  }
+}
+
+
 void LCodeGen::DoDivI(LDivI* instr) {
   Register dividend = ToRegister32(instr->left());
   Register divisor = ToRegister32(instr->right());
@@ -3839,6 +3882,29 @@
 }
 
 
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister32(instr->result());
+  ASSERT(!AreAliased(dividend, result));
+
+  if (divisor == 0) {
+    Deoptimize(instr->environment());
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    __ Cmp(dividend, 0);
+    DeoptimizeIf(eq, instr->environment());
+  }
+
+  __ FlooringDiv(result, dividend, divisor);
+}
+
+
 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   Register dividend = ToRegister32(instr->dividend());
   Register divisor = ToRegister32(instr->divisor());
@@ -4098,6 +4164,36 @@
 }
 
 
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister32(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister32(instr->result());
+  Register temp = ToRegister32(instr->temp());
+  ASSERT(!AreAliased(dividend, result, temp));
+
+  if (divisor == 0) {
+    Deoptimize(instr->environment());
+    return;
+  }
+
+  __ FlooringDiv(result, dividend, Abs(divisor));
+  __ Add(result, result, Operand(dividend, LSR, 31));
+  __ Sxtw(dividend.X(), dividend);
+  __ Mov(temp, Abs(divisor));
+  __ Smsubl(result.X(), result, temp, dividend.X());
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hmod->left()->CanBeNegative()) {
+    Label remainder_not_zero;
+    __ Cbnz(result, &remainder_not_zero);
+    DeoptimizeIfNegative(dividend, instr->environment());
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
 void LCodeGen::DoModI(LModI* instr) {
   Register dividend = ToRegister32(instr->left());
   Register divisor = ToRegister32(instr->right());
diff --git a/src/a64/lithium-codegen-a64.h b/src/a64/lithium-codegen-a64.h
index 74dad62..140b1ae 100644
--- a/src/a64/lithium-codegen-a64.h
+++ b/src/a64/lithium-codegen-a64.h
@@ -226,6 +226,7 @@
                   Deoptimizer::BailoutType* override_bailout_type = NULL);
   void DeoptimizeIf(Condition cc, LEnvironment* environment);
   void DeoptimizeIfZero(Register rt, LEnvironment* environment);
+  void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
   void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
   void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
   void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
diff --git a/src/a64/macro-assembler-a64-inl.h b/src/a64/macro-assembler-a64-inl.h
index 98a14b7..09efcc3 100644
--- a/src/a64/macro-assembler-a64-inl.h
+++ b/src/a64/macro-assembler-a64-inl.h
@@ -588,7 +588,8 @@
 void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
   ASSERT(allow_macro_instructions_);
   if (value != 0.0) {
-    FPRegister tmp = AppropriateTempFor(fn);
+    UseScratchRegisterScope temps(this);
+    FPRegister tmp = temps.AcquireSameSizeAs(fn);
     Fmov(tmp, value);
     fcmp(fn, tmp);
   } else {
@@ -742,16 +743,19 @@
     // These cases can be handled by the Assembler.
     fmov(fd, imm);
   } else {
+    UseScratchRegisterScope temps(this);
     // TODO(all): The Assembler would try to relocate the immediate with
     // Assembler::ldr(const FPRegister& ft, double imm) but it is not
     // implemented yet.
     if (fd.SizeInBits() == kDRegSize) {
-      Mov(Tmp0(), double_to_rawbits(imm));
-      Fmov(fd, Tmp0());
+      Register tmp = temps.AcquireX();
+      Mov(tmp, double_to_rawbits(imm));
+      Fmov(fd, tmp);
     } else {
       ASSERT(fd.SizeInBits() == kSRegSize);
-      Mov(WTmp0(), float_to_rawbits(static_cast<float>(imm)));
-      Fmov(fd, WTmp0());
+      Register tmp = temps.AcquireW();
+      Mov(tmp, float_to_rawbits(static_cast<float>(imm)));
+      Fmov(fd, tmp);
     }
   }
 }
@@ -1351,9 +1355,11 @@
                                    Label* both_smi_label,
                                    Label* not_smi_label) {
   STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+  UseScratchRegisterScope temps(this);
+  Register tmp = temps.AcquireX();
   // Check if both tag bits are clear.
-  Orr(Tmp0(), value1, value2);
-  JumpIfSmi(Tmp0(), both_smi_label, not_smi_label);
+  Orr(tmp, value1, value2);
+  JumpIfSmi(tmp, both_smi_label, not_smi_label);
 }
 
 
@@ -1362,9 +1368,11 @@
                                      Label* either_smi_label,
                                      Label* not_smi_label) {
   STATIC_ASSERT((kSmiTagSize == 1) && (kSmiTag == 0));
+  UseScratchRegisterScope temps(this);
+  Register tmp = temps.AcquireX();
   // Check if either tag bit is clear.
-  And(Tmp0(), value1, value2);
-  JumpIfSmi(Tmp0(), either_smi_label, not_smi_label);
+  And(tmp, value1, value2);
+  JumpIfSmi(tmp, either_smi_label, not_smi_label);
 }
 
 
@@ -1437,8 +1445,10 @@
 
 
 void MacroAssembler::Push(Handle<Object> handle) {
-  Mov(Tmp0(), Operand(handle));
-  Push(Tmp0());
+  UseScratchRegisterScope temps(this);
+  Register tmp = temps.AcquireX();
+  Mov(tmp, Operand(handle));
+  Push(tmp);
 }
 
 
diff --git a/src/a64/macro-assembler-a64.cc b/src/a64/macro-assembler-a64.cc
index 53c7777..2bc2128 100644
--- a/src/a64/macro-assembler-a64.cc
+++ b/src/a64/macro-assembler-a64.cc
@@ -53,7 +53,7 @@
 #endif
       has_frame_(false),
       use_real_aborts_(true),
-      sp_(jssp), tmp0_(ip0), tmp1_(ip1), fptmp0_(fp_scratch) {
+      sp_(jssp), tmp_list_(ip0, ip1), fptmp_list_(fp_scratch) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -65,9 +65,12 @@
                                   const Register& rn,
                                   const Operand& operand,
                                   LogicalOp op) {
+  UseScratchRegisterScope temps(this);
+
   if (operand.NeedsRelocation()) {
-    LoadRelocated(Tmp0(), operand);
-    Logical(rd, rn, Tmp0(), op);
+    Register temp = temps.AcquireX();
+    LoadRelocated(temp, operand);
+    Logical(rd, rn, temp, op);
 
   } else if (operand.IsImmediate()) {
     int64_t immediate = operand.immediate();
@@ -125,7 +128,7 @@
       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
     } else {
       // Immediate can't be encoded: synthesize using move immediate.
-      Register temp = AppropriateTempFor(rn);
+      Register temp = temps.AcquireSameSizeAs(rn);
       Mov(temp, immediate);
       if (rd.Is(csp)) {
         // If rd is the stack pointer we cannot use it as the destination
@@ -144,7 +147,7 @@
     ASSERT(operand.shift_amount() <= 4);
     ASSERT(operand.reg().Is64Bits() ||
            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
-    Register temp = AppropriateTempFor(rn, operand.reg());
+    Register temp = temps.AcquireSameSizeAs(rn);
     EmitExtendShift(temp, operand.reg(), operand.extend(),
                     operand.shift_amount());
     Logical(rd, rn, temp, op);
@@ -208,9 +211,10 @@
       invert_move = true;
     }
 
-    // Mov instructions can't move value into the stack pointer, so set up a
-    // temporary register, if needed.
-    Register temp = rd.IsSP() ? AppropriateTempFor(rd) : rd;
+    // Mov instructions can't move immediate values into the stack pointer, so
+    // set up a temporary register, if needed.
+    UseScratchRegisterScope temps(this);
+    Register temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
 
     // Iterate through the halfwords. Use movn/movz for the first non-ignored
     // halfword, and movk for subsequent halfwords.
@@ -248,9 +252,11 @@
                          DiscardMoveMode discard_mode) {
   ASSERT(allow_macro_instructions_);
   ASSERT(!rd.IsZero());
+
   // Provide a swap register for instructions that need to write into the
   // system stack pointer (and can't do this inherently).
-  Register dst = (rd.Is(csp)) ? (Tmp1()) : (rd);
+  UseScratchRegisterScope temps(this);
+  Register dst = (rd.IsSP()) ? temps.AcquireSameSizeAs(rd) : rd;
 
   if (operand.NeedsRelocation()) {
     LoadRelocated(dst, operand);
@@ -291,8 +297,7 @@
 
   // Copy the result to the system stack pointer.
   if (!dst.Is(rd)) {
-    ASSERT(rd.IsZero());
-    ASSERT(dst.Is(Tmp1()));
+    ASSERT(rd.IsSP());
     Assembler::mov(rd, dst);
   }
 }
@@ -302,8 +307,8 @@
   ASSERT(allow_macro_instructions_);
 
   if (operand.NeedsRelocation()) {
-    LoadRelocated(Tmp0(), operand);
-    Mvn(rd, Tmp0());
+    LoadRelocated(rd, operand);
+    mvn(rd, rd);
 
   } else if (operand.IsImmediate()) {
     // Call the macro assembler for generic immediates.
@@ -312,14 +317,11 @@
   } else if (operand.IsExtendedRegister()) {
     // Emit two instructions for the extend case. This differs from Mov, as
     // the extend and invert can't be achieved in one instruction.
-    Register temp = AppropriateTempFor(rd, operand.reg());
-    EmitExtendShift(temp, operand.reg(), operand.extend(),
+    EmitExtendShift(rd, operand.reg(), operand.extend(),
                     operand.shift_amount());
-    mvn(rd, temp);
+    mvn(rd, rd);
 
   } else {
-    // Otherwise, emit a register move only if the registers are distinct.
-    // If the jssp is an operand, add #0 is emitted, otherwise, orr #0.
     mvn(rd, operand);
   }
 }
@@ -360,8 +362,10 @@
                                              ConditionalCompareOp op) {
   ASSERT((cond != al) && (cond != nv));
   if (operand.NeedsRelocation()) {
-    LoadRelocated(Tmp0(), operand);
-    ConditionalCompareMacro(rn, Tmp0(), nzcv, cond, op);
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+    LoadRelocated(temp, operand);
+    ConditionalCompareMacro(rn, temp, nzcv, cond, op);
 
   } else if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
       (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
@@ -372,7 +376,8 @@
   } else {
     // The operand isn't directly supported by the instruction: perform the
     // operation on a temporary register.
-    Register temp = AppropriateTempFor(rn);
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireSameSizeAs(rn);
     Mov(temp, operand);
     ConditionalCompare(rn, temp, nzcv, cond, op);
   }
@@ -398,7 +403,8 @@
     } else if (imm == -1) {
       csinv(rd, rn, zr, cond);
     } else {
-      Register temp = AppropriateTempFor(rn);
+      UseScratchRegisterScope temps(this);
+      Register temp = temps.AcquireSameSizeAs(rn);
       Mov(temp, operand.immediate());
       csel(rd, rn, temp, cond);
     }
@@ -407,7 +413,8 @@
     csel(rd, rn, operand.reg(), cond);
   } else {
     // All other arguments.
-    Register temp = AppropriateTempFor(rn);
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireSameSizeAs(rn);
     Mov(temp, operand);
     csel(rd, rn, temp, cond);
   }
@@ -426,12 +433,15 @@
   }
 
   if (operand.NeedsRelocation()) {
-    LoadRelocated(Tmp0(), operand);
-    AddSubMacro(rd, rn, Tmp0(), S, op);
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+    LoadRelocated(temp, operand);
+    AddSubMacro(rd, rn, temp, S, op);
   } else if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
              (rn.IsZero() && !operand.IsShiftedRegister())                ||
              (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
-    Register temp = AppropriateTempFor(rn);
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireSameSizeAs(rn);
     Mov(temp, operand);
     AddSub(rd, rn, temp, S, op);
   } else {
@@ -446,24 +456,28 @@
                                           FlagsUpdate S,
                                           AddSubWithCarryOp op) {
   ASSERT(rd.SizeInBits() == rn.SizeInBits());
+  UseScratchRegisterScope temps(this);
 
   if (operand.NeedsRelocation()) {
-    LoadRelocated(Tmp0(), operand);
-    AddSubWithCarryMacro(rd, rn, Tmp0(), S, op);
+    Register temp = temps.AcquireX();
+    LoadRelocated(temp, operand);
+    AddSubWithCarryMacro(rd, rn, temp, S, op);
 
   } else if (operand.IsImmediate() ||
              (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
     // Add/sub with carry (immediate or ROR shifted register.)
-    Register temp = AppropriateTempFor(rn);
+    Register temp = temps.AcquireSameSizeAs(rn);
     Mov(temp, operand);
     AddSubWithCarry(rd, rn, temp, S, op);
+
   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
     // Add/sub with carry (shifted register).
     ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
     ASSERT(operand.shift() != ROR);
-    ASSERT(is_uintn(operand.shift_amount(),
-          rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2));
-    Register temp = AppropriateTempFor(rn, operand.reg());
+    ASSERT(
+        is_uintn(operand.shift_amount(),
+                 rd.SizeInBits() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2));
+    Register temp = temps.AcquireSameSizeAs(rn);
     EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
     AddSubWithCarry(rd, rn, temp, S, op);
 
@@ -475,7 +489,7 @@
     ASSERT(operand.shift_amount() <= 4);
     ASSERT(operand.reg().Is64Bits() ||
            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
-    Register temp = AppropriateTempFor(rn, operand.reg());
+    Register temp = temps.AcquireSameSizeAs(rn);
     EmitExtendShift(temp, operand.reg(), operand.extend(),
                     operand.shift_amount());
     AddSubWithCarry(rd, rn, temp, S, op);
@@ -500,7 +514,8 @@
       !IsImmLSUnscaled(offset)) {
     // Immediate offset that can't be encoded using unsigned or unscaled
     // addressing modes.
-    Register temp = AppropriateTempFor(addr.base());
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireSameSizeAs(addr.base());
     Mov(temp, addr.offset());
     LoadStore(rt, MemOperand(addr.base(), temp), op);
   } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
@@ -856,11 +871,14 @@
   PrepareForPush(count, size);
 
   if (FLAG_optimize_for_size && count > 8) {
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+
     Label loop;
-    __ Mov(Tmp0(), count / 2);
+    __ Mov(temp, count / 2);
     __ Bind(&loop);
     PushHelper(2, size, src, src, NoReg, NoReg);
-    __ Subs(Tmp0(), Tmp0(), 1);
+    __ Subs(temp, temp, 1);
     __ B(ne, &loop);
 
     count %= 2;
@@ -888,7 +906,8 @@
 void MacroAssembler::PushMultipleTimes(CPURegister src, Register count) {
   PrepareForPush(Operand(count, UXTW, WhichPowerOf2(src.SizeInBytes())));
 
-  Register temp = AppropriateTempFor(count);
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireSameSizeAs(count);
 
   if (FLAG_optimize_for_size) {
     Label loop, done;
@@ -1332,10 +1351,10 @@
                                 Condition cond,
                                 Label* branch) {
   ASSERT(cond == eq || cond == ne);
-  // Use Tmp1() to have a different destination register, as Tmp0() will be used
-  // for relocation.
-  And(Tmp1(), object, Operand(ExternalReference::new_space_mask(isolate())));
-  Cmp(Tmp1(), Operand(ExternalReference::new_space_start(isolate())));
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+  And(temp, object, Operand(ExternalReference::new_space_mask(isolate())));
+  Cmp(temp, Operand(ExternalReference::new_space_start(isolate())));
   B(cond, branch);
 }
 
@@ -1504,8 +1523,11 @@
     Abort(kOperandIsASmiAndNotAName);
     Bind(&not_smi);
 
-    Ldr(Tmp1(), FieldMemOperand(object, HeapObject::kMapOffset));
-    CompareInstanceType(Tmp1(), Tmp1(), LAST_NAME_TYPE);
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+
+    Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+    CompareInstanceType(temp, temp, LAST_NAME_TYPE);
     Check(ls, kOperandIsNotAName);
   }
 }
@@ -1513,7 +1535,8 @@
 
 void MacroAssembler::AssertString(Register object) {
   if (emit_debug_code()) {
-    Register temp = Tmp1();
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
     STATIC_ASSERT(kSmiTag == 0);
     Tst(object, kSmiTagMask);
     Check(ne, kOperandIsASmiAndNotAString);
@@ -1830,8 +1853,10 @@
 void MacroAssembler::CallCFunction(ExternalReference function,
                                    int num_of_reg_args,
                                    int num_of_double_args) {
-  Mov(Tmp0(), Operand(function));
-  CallCFunction(Tmp0(), num_of_reg_args, num_of_double_args);
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+  Mov(temp, Operand(function));
+  CallCFunction(temp, num_of_reg_args, num_of_double_args);
 }
 
 
@@ -1884,7 +1909,8 @@
       // Because the stack pointer must be aligned on a 16-byte boundary, the
       // aligned csp can be up to 12 bytes below the jssp. This is the case
       // where we only pushed one W register on top of an aligned jssp.
-      Register temp = Tmp1();
+      UseScratchRegisterScope temps(this);
+      Register temp = temps.AcquireX();
       ASSERT(ActivationFrameAlignment() == 16);
       Sub(temp, csp, old_stack_pointer);
       // We want temp <= 0 && temp >= -12.
@@ -1903,8 +1929,10 @@
 
 
 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode) {
-  Mov(Tmp0(), Operand(target, rmode));
-  Br(Tmp0());
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+  Mov(temp, Operand(target, rmode));
+  Br(temp);
 }
 
 
@@ -1966,16 +1994,19 @@
   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
   ASSERT(rmode != RelocInfo::NONE32);
 
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+
   if (rmode == RelocInfo::NONE64) {
     uint64_t imm = reinterpret_cast<uint64_t>(target);
-    movz(Tmp0(), (imm >> 0) & 0xffff, 0);
-    movk(Tmp0(), (imm >> 16) & 0xffff, 16);
-    movk(Tmp0(), (imm >> 32) & 0xffff, 32);
-    movk(Tmp0(), (imm >> 48) & 0xffff, 48);
+    movz(temp, (imm >> 0) & 0xffff, 0);
+    movk(temp, (imm >> 16) & 0xffff, 16);
+    movk(temp, (imm >> 32) & 0xffff, 32);
+    movk(temp, (imm >> 48) & 0xffff, 48);
   } else {
-    LoadRelocated(Tmp0(), Operand(reinterpret_cast<intptr_t>(target), rmode));
+    LoadRelocated(temp, Operand(reinterpret_cast<intptr_t>(target), rmode));
   }
-  Blr(Tmp0());
+  Blr(temp);
 #ifdef DEBUG
   AssertSizeOfCodeGeneratedSince(&start_call, CallSize(target, rmode));
 #endif
@@ -2056,21 +2087,23 @@
                                        Label* on_heap_number,
                                        Label* on_not_heap_number) {
   ASSERT(on_heap_number || on_not_heap_number);
-  // Tmp0() is used as a scratch register.
-  ASSERT(!AreAliased(Tmp0(), heap_number_map));
   AssertNotSmi(object);
 
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+
   // Load the HeapNumber map if it is not passed.
   if (heap_number_map.Is(NoReg)) {
-    heap_number_map = Tmp1();
+    heap_number_map = temps.AcquireX();
     LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   } else {
-    // This assert clobbers Tmp0(), so do it before loading Tmp0() with the map.
     AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   }
 
-  Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
-  Cmp(Tmp0(), heap_number_map);
+  ASSERT(!AreAliased(temp, heap_number_map));
+
+  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+  Cmp(temp, heap_number_map);
 
   if (on_heap_number) {
     B(eq, on_heap_number);
@@ -2196,10 +2229,12 @@
 
 void MacroAssembler::JumpIfMinusZero(DoubleRegister input,
                                      Label* on_negative_zero) {
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
   // Floating point -0.0 is kMinInt as an integer, so subtracting 1 (cmp) will
   // cause overflow.
-  Fmov(Tmp0(), input);
-  Cmp(Tmp0(), 1);
+  Fmov(temp, input);
+  Cmp(temp, 1);
   B(vs, on_negative_zero);
 }
 
@@ -2209,10 +2244,8 @@
   Cmp(input.W(), Operand(input.W(), UXTB));
   // If input < input & 0xff, it must be < 0, so saturate to 0.
   Csel(output.W(), wzr, input.W(), lt);
-  // Create a constant 0xff.
-  Mov(WTmp0(), 255);
-  // If input > input & 0xff, it must be > 255, so saturate to 255.
-  Csel(output.W(), WTmp0(), output.W(), gt);
+  // If input <= input & 0xff, it must be <= 255. Otherwise, saturate to 255.
+  Csel(output.W(), output.W(), 255, le);
 }
 
 
@@ -2246,37 +2279,37 @@
                                                unsigned count,
                                                Register scratch1,
                                                Register scratch2,
-                                               Register scratch3) {
+                                               Register scratch3,
+                                               Register scratch4,
+                                               Register scratch5) {
   // Untag src and dst into scratch registers.
   // Copy src->dst in a tight loop.
-  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, Tmp0(), Tmp1()));
+  ASSERT(!AreAliased(dst, src,
+                     scratch1, scratch2, scratch3, scratch4, scratch5));
   ASSERT(count >= 2);
 
   const Register& remaining = scratch3;
   Mov(remaining, count / 2);
 
-  // Only use the Assembler, so we can use Tmp0() and Tmp1().
-  InstructionAccurateScope scope(this);
-
   const Register& dst_untagged = scratch1;
   const Register& src_untagged = scratch2;
-  sub(dst_untagged, dst, kHeapObjectTag);
-  sub(src_untagged, src, kHeapObjectTag);
+  Sub(dst_untagged, dst, kHeapObjectTag);
+  Sub(src_untagged, src, kHeapObjectTag);
 
   // Copy fields in pairs.
   Label loop;
-  bind(&loop);
-  ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
-                                 PostIndex));
-  stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
-                                 PostIndex));
-  sub(remaining, remaining, 1);
-  cbnz(remaining, &loop);
+  Bind(&loop);
+  Ldp(scratch4, scratch5,
+      MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex));
+  Stp(scratch4, scratch5,
+      MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex));
+  Sub(remaining, remaining, 1);
+  Cbnz(remaining, &loop);
 
   // Handle the leftovers.
   if (count & 1) {
-    ldr(Tmp0(), MemOperand(src_untagged));
-    str(Tmp0(), MemOperand(dst_untagged));
+    Ldr(scratch4, MemOperand(src_untagged));
+    Str(scratch4, MemOperand(dst_untagged));
   }
 }
 
@@ -2285,13 +2318,12 @@
                                                    Register src,
                                                    unsigned count,
                                                    Register scratch1,
-                                                   Register scratch2) {
+                                                   Register scratch2,
+                                                   Register scratch3,
+                                                   Register scratch4) {
   // Untag src and dst into scratch registers.
   // Copy src->dst in an unrolled loop.
-  ASSERT(!AreAliased(dst, src, scratch1, scratch2, Tmp0(), Tmp1()));
-
-  // Only use the Assembler, so we can use Tmp0() and Tmp1().
-  InstructionAccurateScope scope(this);
+  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
 
   const Register& dst_untagged = scratch1;
   const Register& src_untagged = scratch2;
@@ -2300,16 +2332,16 @@
 
   // Copy fields in pairs.
   for (unsigned i = 0; i < count / 2; i++) {
-    ldp(Tmp0(), Tmp1(), MemOperand(src_untagged, kXRegSizeInBytes * 2,
-                                   PostIndex));
-    stp(Tmp0(), Tmp1(), MemOperand(dst_untagged, kXRegSizeInBytes * 2,
-                                   PostIndex));
+    Ldp(scratch3, scratch4,
+        MemOperand(src_untagged, kXRegSizeInBytes * 2, PostIndex));
+    Stp(scratch3, scratch4,
+        MemOperand(dst_untagged, kXRegSizeInBytes * 2, PostIndex));
   }
 
   // Handle the leftovers.
   if (count & 1) {
-    ldr(Tmp0(), MemOperand(src_untagged));
-    str(Tmp0(), MemOperand(dst_untagged));
+    Ldr(scratch3, MemOperand(src_untagged));
+    Str(scratch3, MemOperand(dst_untagged));
   }
 }
 
@@ -2317,23 +2349,22 @@
 void MacroAssembler::CopyFieldsUnrolledHelper(Register dst,
                                               Register src,
                                               unsigned count,
-                                              Register scratch1) {
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3) {
   // Untag src and dst into scratch registers.
   // Copy src->dst in an unrolled loop.
-  ASSERT(!AreAliased(dst, src, scratch1, Tmp0(), Tmp1()));
-
-  // Only use the Assembler, so we can use Tmp0() and Tmp1().
-  InstructionAccurateScope scope(this);
+  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
 
   const Register& dst_untagged = scratch1;
-  const Register& src_untagged = Tmp1();
-  sub(dst_untagged, dst, kHeapObjectTag);
-  sub(src_untagged, src, kHeapObjectTag);
+  const Register& src_untagged = scratch2;
+  Sub(dst_untagged, dst, kHeapObjectTag);
+  Sub(src_untagged, src, kHeapObjectTag);
 
   // Copy fields one by one.
   for (unsigned i = 0; i < count; i++) {
-    ldr(Tmp0(), MemOperand(src_untagged, kXRegSizeInBytes, PostIndex));
-    str(Tmp0(), MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex));
+    Ldr(scratch3, MemOperand(src_untagged, kXRegSizeInBytes, PostIndex));
+    Str(scratch3, MemOperand(dst_untagged, kXRegSizeInBytes, PostIndex));
   }
 }
 
@@ -2352,12 +2383,10 @@
   //
   // In both cases, fields are copied in pairs if possible, and left-overs are
   // handled separately.
+  ASSERT(!AreAliased(dst, src));
   ASSERT(!temps.IncludesAliasOf(dst));
   ASSERT(!temps.IncludesAliasOf(src));
-  ASSERT(!temps.IncludesAliasOf(Tmp0()));
-  ASSERT(!temps.IncludesAliasOf(Tmp1()));
   ASSERT(!temps.IncludesAliasOf(xzr));
-  ASSERT(!AreAliased(dst, src, Tmp0(), Tmp1()));
 
   if (emit_debug_code()) {
     Cmp(dst, src);
@@ -2368,17 +2397,25 @@
   // enough scratch registers).
   static const unsigned kLoopThreshold = 8;
 
-  ASSERT(!temps.IsEmpty());
-  Register scratch1 = Register(temps.PopLowestIndex());
-  Register scratch2 = Register(temps.PopLowestIndex());
-  Register scratch3 = Register(temps.PopLowestIndex());
-
-  if (scratch3.IsValid() && (count >= kLoopThreshold)) {
-    CopyFieldsLoopPairsHelper(dst, src, count, scratch1, scratch2, scratch3);
-  } else if (scratch2.IsValid()) {
-    CopyFieldsUnrolledPairsHelper(dst, src, count, scratch1, scratch2);
-  } else if (scratch1.IsValid()) {
-    CopyFieldsUnrolledHelper(dst, src, count, scratch1);
+  UseScratchRegisterScope masm_temps(this);
+  if ((temps.Count() >= 3) && (count >= kLoopThreshold)) {
+    CopyFieldsLoopPairsHelper(dst, src, count,
+                              Register(temps.PopLowestIndex()),
+                              Register(temps.PopLowestIndex()),
+                              Register(temps.PopLowestIndex()),
+                              masm_temps.AcquireX(),
+                              masm_temps.AcquireX());
+  } else if (temps.Count() >= 2) {
+    CopyFieldsUnrolledPairsHelper(dst, src, count,
+                                  Register(temps.PopLowestIndex()),
+                                  Register(temps.PopLowestIndex()),
+                                  masm_temps.AcquireX(),
+                                  masm_temps.AcquireX());
+  } else if (temps.Count() == 1) {
+    CopyFieldsUnrolledHelper(dst, src, count,
+                             Register(temps.PopLowestIndex()),
+                             masm_temps.AcquireX(),
+                             masm_temps.AcquireX());
   } else {
     UNREACHABLE();
   }
@@ -2808,12 +2845,14 @@
 void MacroAssembler::Prologue(PrologueFrameMode frame_mode) {
   if (frame_mode == BUILD_STUB_FRAME) {
     ASSERT(StackPointer().Is(jssp));
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
     // TODO(jbramley): Does x1 contain a JSFunction here, or does it already
     // have the special STUB smi?
-    __ Mov(Tmp0(), Operand(Smi::FromInt(StackFrame::STUB)));
+    __ Mov(temp, Operand(Smi::FromInt(StackFrame::STUB)));
     // Compiled stubs don't age, and so they don't need the predictable code
     // ageing sequence.
-    __ Push(lr, fp, cp, Tmp0());
+    __ Push(lr, fp, cp, temp);
     __ Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp);
   } else {
     if (isolate()->IsCodePreAgingActive()) {
@@ -2828,10 +2867,14 @@
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
   ASSERT(jssp.Is(StackPointer()));
+  UseScratchRegisterScope temps(this);
+  Register type_reg = temps.AcquireX();
+  Register code_reg = temps.AcquireX();
+
   Push(lr, fp, cp);
-  Mov(Tmp1(), Operand(Smi::FromInt(type)));
-  Mov(Tmp0(), Operand(CodeObject()));
-  Push(Tmp1(), Tmp0());
+  Mov(type_reg, Operand(Smi::FromInt(type)));
+  Mov(code_reg, Operand(CodeObject()));
+  Push(type_reg, code_reg);
   // jssp[4] : lr
   // jssp[3] : fp
   // jssp[2] : cp
@@ -2839,7 +2882,7 @@
   // jssp[0] : code object
 
   // Adjust FP to point to saved FP.
-  add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+  Add(fp, jssp, StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
 }
 
 
@@ -3105,9 +3148,11 @@
     return;
   }
 
-  ASSERT(!AreAliased(result, scratch1, scratch2, Tmp0(), Tmp1()));
-  ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits() &&
-         Tmp0().Is64Bits() && Tmp1().Is64Bits());
+  UseScratchRegisterScope temps(this);
+  Register scratch3 = temps.AcquireX();
+
+  ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
+  ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
 
   // Make object size into bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
@@ -3136,8 +3181,8 @@
   } else {
     if (emit_debug_code()) {
       // Assert that result actually contains top on entry.
-      Ldr(Tmp0(), MemOperand(top_address));
-      Cmp(result, Tmp0());
+      Ldr(scratch3, MemOperand(top_address));
+      Cmp(result, scratch3);
       Check(eq, kUnexpectedAllocationTop);
     }
     // Load the allocation limit. 'result' already contains the allocation top.
@@ -3149,11 +3194,11 @@
   STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
 
   // Calculate new top and bail out if new space is exhausted.
-  Adds(Tmp1(), result, object_size);
+  Adds(scratch3, result, object_size);
   B(vs, gc_required);
-  Cmp(Tmp1(), allocation_limit);
+  Cmp(scratch3, allocation_limit);
   B(hi, gc_required);
-  Str(Tmp1(), MemOperand(top_address));
+  Str(scratch3, MemOperand(top_address));
 
   // Tag the object if requested.
   if ((flags & TAG_OBJECT) != 0) {
@@ -3180,9 +3225,12 @@
     return;
   }
 
-  ASSERT(!AreAliased(object_size, result, scratch1, scratch2, Tmp0(), Tmp1()));
-  ASSERT(object_size.Is64Bits() && result.Is64Bits() && scratch1.Is64Bits() &&
-         scratch2.Is64Bits() && Tmp0().Is64Bits() && Tmp1().Is64Bits());
+  UseScratchRegisterScope temps(this);
+  Register scratch3 = temps.AcquireX();
+
+  ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+  ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+         scratch1.Is64Bits() && scratch2.Is64Bits());
 
   // Check relative positions of allocation top and limit addresses.
   // The values must be adjacent in memory to allow the use of LDP.
@@ -3205,8 +3253,8 @@
   } else {
     if (emit_debug_code()) {
       // Assert that result actually contains top on entry.
-      Ldr(Tmp0(), MemOperand(top_address));
-      Cmp(result, Tmp0());
+      Ldr(scratch3, MemOperand(top_address));
+      Cmp(result, scratch3);
       Check(eq, kUnexpectedAllocationTop);
     }
     // Load the allocation limit. 'result' already contains the allocation top.
@@ -3219,20 +3267,20 @@
 
   // Calculate new top and bail out if new space is exhausted
   if ((flags & SIZE_IN_WORDS) != 0) {
-    Adds(Tmp1(), result, Operand(object_size, LSL, kPointerSizeLog2));
+    Adds(scratch3, result, Operand(object_size, LSL, kPointerSizeLog2));
   } else {
-    Adds(Tmp1(), result, object_size);
+    Adds(scratch3, result, object_size);
   }
 
   if (emit_debug_code()) {
-    Tst(Tmp1(), kObjectAlignmentMask);
+    Tst(scratch3, kObjectAlignmentMask);
     Check(eq, kUnalignedAllocationInNewSpace);
   }
 
   B(vs, gc_required);
-  Cmp(Tmp1(), allocation_limit);
+  Cmp(scratch3, allocation_limit);
   B(hi, gc_required);
-  Str(Tmp1(), MemOperand(top_address));
+  Str(scratch3, MemOperand(top_address));
 
   // Tag the object if requested.
   if ((flags & TAG_OBJECT) != 0) {
@@ -3560,9 +3608,11 @@
 
 
 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
-  Ldr(Tmp0(), FieldMemOperand(object, HeapObject::kMapOffset));
-  Ldrb(Tmp0(), FieldMemOperand(Tmp0(), Map::kBitFieldOffset));
-  Tst(Tmp0(), mask);
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+  Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  Tst(temp, mask);
 }
 
 
@@ -3634,9 +3684,11 @@
 
 void MacroAssembler::CompareRoot(const Register& obj,
                                  Heap::RootListIndex index) {
-  ASSERT(!AreAliased(obj, Tmp0()));
-  LoadRoot(Tmp0(), index);
-  Cmp(obj, Tmp0());
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+  ASSERT(!AreAliased(obj, temp));
+  LoadRoot(temp, index);
+  Cmp(obj, temp);
 }
 
 
@@ -3833,60 +3885,53 @@
 
 
 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
-                                            Register scratch,
+                                            Register scratch1,
+                                            Register scratch2,
                                             Label* miss) {
-  // TODO(jbramley): Sort out the uses of Tmp0() and Tmp1() in this function.
-  // The ARM version takes two scratch registers, and that should be enough for
-  // all of the checks.
-
+  ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
   Label same_contexts;
 
-  ASSERT(!AreAliased(holder_reg, scratch));
-
   // Load current lexical context from the stack frame.
-  Ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
   // In debug mode, make sure the lexical context is set.
 #ifdef DEBUG
-  Cmp(scratch, 0);
+  Cmp(scratch1, 0);
   Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
 #endif
 
   // Load the native context of the current context.
   int offset =
       Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
-  Ldr(scratch, FieldMemOperand(scratch, offset));
-  Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+  Ldr(scratch1, FieldMemOperand(scratch1, offset));
+  Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
 
   // Check the context is a native context.
   if (emit_debug_code()) {
     // Read the first word and compare to the global_context_map.
-    Register temp = Tmp1();
-    Ldr(temp, FieldMemOperand(scratch, HeapObject::kMapOffset));
-    CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+    Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
+    CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
     Check(eq, kExpectedNativeContext);
   }
 
   // Check if both contexts are the same.
-  ldr(Tmp0(), FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
-  cmp(scratch, Tmp0());
-  b(&same_contexts, eq);
+  Ldr(scratch2, FieldMemOperand(holder_reg,
+                                JSGlobalProxy::kNativeContextOffset));
+  Cmp(scratch1, scratch2);
+  B(&same_contexts, eq);
 
   // Check the context is a native context.
   if (emit_debug_code()) {
-    // Move Tmp0() into a different register, as CompareRoot will use it.
-    Register temp = Tmp1();
-    mov(temp, Tmp0());
-    CompareRoot(temp, Heap::kNullValueRootIndex);
+    // We're short on scratch registers here, so use holder_reg as a scratch.
+    Push(holder_reg);
+    Register scratch3 = holder_reg;
+
+    CompareRoot(scratch2, Heap::kNullValueRootIndex);
     Check(ne, kExpectedNonNullContext);
 
-    Ldr(temp, FieldMemOperand(temp, HeapObject::kMapOffset));
-    CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+    Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+    CompareRoot(scratch3, Heap::kNativeContextMapRootIndex);
     Check(eq, kExpectedNativeContext);
-
-    // Let's consider that Tmp0() has been cloberred by the MacroAssembler.
-    // We reload it with its value.
-    ldr(Tmp0(), FieldMemOperand(holder_reg,
-                                JSGlobalProxy::kNativeContextOffset));
+    Pop(holder_reg);
   }
 
   // Check that the security token in the calling global object is
@@ -3895,12 +3940,12 @@
   int token_offset = Context::kHeaderSize +
                      Context::SECURITY_TOKEN_INDEX * kPointerSize;
 
-  ldr(scratch, FieldMemOperand(scratch, token_offset));
-  ldr(Tmp0(), FieldMemOperand(Tmp0(), token_offset));
-  cmp(scratch, Tmp0());
-  b(miss, ne);
+  Ldr(scratch1, FieldMemOperand(scratch1, token_offset));
+  Ldr(scratch2, FieldMemOperand(scratch2, token_offset));
+  Cmp(scratch1, scratch2);
+  B(miss, ne);
 
-  bind(&same_contexts);
+  Bind(&same_contexts);
 }
 
 
@@ -4003,10 +4048,10 @@
 
 void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
                                          Register address,
-                                         Register scratch,
+                                         Register scratch1,
                                          SaveFPRegsMode fp_mode,
                                          RememberedSetFinalAction and_then) {
-  ASSERT(!AreAliased(object, address, scratch));
+  ASSERT(!AreAliased(object, address, scratch1));
   Label done, store_buffer_overflow;
   if (emit_debug_code()) {
     Label ok;
@@ -4014,22 +4059,25 @@
     Abort(kRememberedSetPointerInNewSpace);
     bind(&ok);
   }
+  UseScratchRegisterScope temps(this);
+  Register scratch2 = temps.AcquireX();
+
   // Load store buffer top.
-  Mov(Tmp0(), Operand(ExternalReference::store_buffer_top(isolate())));
-  Ldr(scratch, MemOperand(Tmp0()));
+  Mov(scratch2, Operand(ExternalReference::store_buffer_top(isolate())));
+  Ldr(scratch1, MemOperand(scratch2));
   // Store pointer to buffer and increment buffer top.
-  Str(address, MemOperand(scratch, kPointerSize, PostIndex));
+  Str(address, MemOperand(scratch1, kPointerSize, PostIndex));
   // Write back new top of buffer.
-  Str(scratch, MemOperand(Tmp0()));
+  Str(scratch1, MemOperand(scratch2));
   // Call stub on end of buffer.
   // Check for end of buffer.
   ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
          (1 << (14 + kPointerSizeLog2)));
   if (and_then == kFallThroughAtEnd) {
-    Tbz(scratch, (14 + kPointerSizeLog2), &done);
+    Tbz(scratch1, (14 + kPointerSizeLog2), &done);
   } else {
     ASSERT(and_then == kReturnAtEnd);
-    Tbnz(scratch, (14 + kPointerSizeLog2), &store_buffer_overflow);
+    Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
     Ret();
   }
 
@@ -4177,7 +4225,7 @@
 }
 
 
-// Will clobber: object, address, value, Tmp0(), Tmp1().
+// Will clobber: object, address, value.
 // If lr_status is kLRHasBeenSaved, lr will also be clobbered.
 //
 // The register 'object' contains a heap object pointer. The heap object tag is
@@ -4193,8 +4241,11 @@
   ASSERT(!AreAliased(object, value));
 
   if (emit_debug_code()) {
-    Ldr(Tmp0(), MemOperand(address));
-    Cmp(Tmp0(), value);
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
+
+    Ldr(temp, MemOperand(address));
+    Cmp(temp, value);
     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
   }
 
@@ -4259,15 +4310,18 @@
 void MacroAssembler::GetMarkBits(Register addr_reg,
                                  Register bitmap_reg,
                                  Register shift_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg, no_reg));
+  ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+  ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
   // addr_reg is divided into fields:
   // |63        page base        20|19    high      8|7   shift   3|2  0|
   // 'high' gives the index of the cell holding color bits for the object.
   // 'shift' gives the offset in the cell for this object's color.
   const int kShiftBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
-  Ubfx(Tmp0(), addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+  Ubfx(temp, addr_reg, kShiftBits, kPageSizeBits - kShiftBits);
   Bic(bitmap_reg, addr_reg, Page::kPageAlignmentMask);
-  Add(bitmap_reg, bitmap_reg, Operand(Tmp0(), LSL, Bitmap::kBytesPerCellLog2));
+  Add(bitmap_reg, bitmap_reg, Operand(temp, LSL, Bitmap::kBytesPerCellLog2));
   // bitmap_reg:
   // |63        page base        20|19 zeros 15|14      high      3|2  0|
   Ubfx(shift_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
@@ -4491,8 +4545,6 @@
 void MacroAssembler::AssertRegisterIsRoot(Register reg,
                                           Heap::RootListIndex index,
                                           BailoutReason reason) {
-  // CompareRoot uses Tmp0().
-  ASSERT(!reg.Is(Tmp0()));
   if (emit_debug_code()) {
     CompareRoot(reg, index);
     Check(eq, reason);
@@ -4502,7 +4554,8 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (emit_debug_code()) {
-    Register temp = Tmp1();
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
     Label ok;
     Ldr(temp, FieldMemOperand(elements, HeapObject::kMapOffset));
     JumpIfRoot(temp, Heap::kFixedArrayMapRootIndex, &ok);
@@ -4516,7 +4569,8 @@
 
 void MacroAssembler::AssertIsString(const Register& object) {
   if (emit_debug_code()) {
-    Register temp = Tmp1();
+    UseScratchRegisterScope temps(this);
+    Register temp = temps.AcquireX();
     STATIC_ASSERT(kSmiTag == 0);
     Tst(object, Operand(kSmiTagMask));
     Check(ne, kOperandIsNotAString);
@@ -4563,6 +4617,12 @@
   SetStackPointer(jssp);
   Mov(jssp, old_stack_pointer);
 
+  // We need some scratch registers for the MacroAssembler, so make sure we have
+  // some. This is safe here because Abort never returns.
+  RegList old_tmp_list = TmpList()->list();
+  TmpList()->Combine(ip0);
+  TmpList()->Combine(ip1);
+
   if (use_real_aborts()) {
     // Avoid infinite recursion; Push contains some assertions that use Abort.
     NoUseRealAbortsScope no_real_aborts(this);
@@ -4599,6 +4659,7 @@
   }
 
   SetStackPointer(old_stack_pointer);
+  TmpList()->set_list(old_tmp_list);
 }
 
 
@@ -4606,22 +4667,23 @@
     ElementsKind expected_kind,
     ElementsKind transitioned_kind,
     Register map_in_out,
-    Register scratch,
+    Register scratch1,
+    Register scratch2,
     Label* no_map_match) {
   // Load the global or builtins object from the current context.
-  Ldr(scratch, GlobalObjectMemOperand());
-  Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+  Ldr(scratch1, GlobalObjectMemOperand());
+  Ldr(scratch1, FieldMemOperand(scratch1, GlobalObject::kNativeContextOffset));
 
   // Check that the function's map is the same as the expected cached map.
-  Ldr(scratch, ContextMemOperand(scratch, Context::JS_ARRAY_MAPS_INDEX));
+  Ldr(scratch1, ContextMemOperand(scratch1, Context::JS_ARRAY_MAPS_INDEX));
   size_t offset = (expected_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
-  Ldr(Tmp0(), FieldMemOperand(scratch, offset));
-  Cmp(map_in_out, Tmp0());
+  Ldr(scratch2, FieldMemOperand(scratch1, offset));
+  Cmp(map_in_out, scratch2);
   B(ne, no_map_match);
 
   // Use the transitioned cached map.
   offset = (transitioned_kind * kPointerSize) + FixedArrayBase::kHeaderSize;
-  Ldr(map_in_out, FieldMemOperand(scratch, offset));
+  Ldr(map_in_out, FieldMemOperand(scratch1, offset));
 }
 
 
@@ -4663,14 +4725,18 @@
   // in most cases anyway, so this restriction shouldn't be too serious.
   ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
 
-  // We cannot print Tmp0() or Tmp1() as they're used internally by the macro
-  // assembler. We cannot print the stack pointer because it is typically used
-  // to preserve caller-saved registers (using other Printf variants which
-  // depend on this helper).
-  ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg0));
-  ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg1));
-  ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg2));
-  ASSERT(!AreAliased(Tmp0(), Tmp1(), StackPointer(), arg3));
+  // Make sure that the macro assembler doesn't try to use any of our arguments
+  // as scratch registers.
+  ASSERT(!TmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
+  ASSERT(!FPTmpList()->IncludesAliasOf(arg0, arg1, arg2, arg3));
+
+  // We cannot print the stack pointer because it is typically used to preserve
+  // caller-saved registers (using other Printf variants which depend on this
+  // helper).
+  ASSERT(!AreAliased(arg0, StackPointer()));
+  ASSERT(!AreAliased(arg1, StackPointer()));
+  ASSERT(!AreAliased(arg2, StackPointer()));
+  ASSERT(!AreAliased(arg3, StackPointer()));
 
   static const int kMaxArgCount = 4;
   // Assume that we have the maximum number of arguments until we know
@@ -4812,22 +4878,47 @@
                             const CPURegister& arg1,
                             const CPURegister& arg2,
                             const CPURegister& arg3) {
+  // Printf is expected to preserve all registers, so make sure that none are
+  // available as scratch registers until we've preserved them.
+  RegList old_tmp_list = TmpList()->list();
+  RegList old_fp_tmp_list = FPTmpList()->list();
+  TmpList()->set_list(0);
+  FPTmpList()->set_list(0);
+
   // Preserve all caller-saved registers as well as NZCV.
   // If csp is the stack pointer, PushCPURegList asserts that the size of each
   // list is a multiple of 16 bytes.
   PushCPURegList(kCallerSaved);
   PushCPURegList(kCallerSavedFP);
-  // Use Tmp0() as a scratch register. It is not accepted by Printf so it will
-  // never overlap an argument register.
-  Mrs(Tmp0(), NZCV);
-  Push(Tmp0(), xzr);
+
+  // We can use caller-saved registers as scratch values (except for argN).
+  CPURegList tmp_list = kCallerSaved;
+  CPURegList fp_tmp_list = kCallerSavedFP;
+  tmp_list.Remove(arg0, arg1, arg2, arg3);
+  fp_tmp_list.Remove(arg0, arg1, arg2, arg3);
+  TmpList()->set_list(tmp_list.list());
+  FPTmpList()->set_list(fp_tmp_list.list());
+
+  // Preserve NZCV.
+  { UseScratchRegisterScope temps(this);
+    Register tmp = temps.AcquireX();
+    Mrs(tmp, NZCV);
+    Push(tmp, xzr);
+  }
 
   PrintfNoPreserve(format, arg0, arg1, arg2, arg3);
 
-  Pop(xzr, Tmp0());
-  Msr(NZCV, Tmp0());
+  { UseScratchRegisterScope temps(this);
+    Register tmp = temps.AcquireX();
+    Pop(xzr, tmp);
+    Msr(NZCV, tmp);
+  }
+
   PopCPURegList(kCallerSavedFP);
   PopCPURegList(kCallerSaved);
+
+  TmpList()->set_list(old_tmp_list);
+  FPTmpList()->set_list(old_fp_tmp_list);
 }
 
 
@@ -4932,7 +5023,51 @@
 #endif
 
 
+void MacroAssembler::FlooringDiv(Register result,
+                                 Register dividend,
+                                 int32_t divisor) {
+  ASSERT(!AreAliased(result, dividend));
+  ASSERT(result.Is32Bits() && dividend.Is32Bits());
+  MultiplierAndShift ms(divisor);
+  Mov(result, Operand(ms.multiplier()));
+  Smull(result.X(), dividend, result);
+  Asr(result.X(), result.X(), 32);
+  if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
+  if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
+  if (ms.shift() > 0) Asr(result, result, ms.shift());
+}
+
+
 #undef __
+
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+  available_->set_list(old_available_);
+  availablefp_->set_list(old_availablefp_);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+  int code = AcquireNextAvailable(available_).code();
+  return Register::Create(code, reg.SizeInBits());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+  int code = AcquireNextAvailable(availablefp_).code();
+  return FPRegister::Create(code, reg.SizeInBits());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+    CPURegList* available) {
+  CHECK(!available->IsEmpty());
+  CPURegister result = available->PopLowestIndex();
+  ASSERT(!AreAliased(result, xzr, csp));
+  return result;
+}
+
+
 #define __ masm->
 
 
diff --git a/src/a64/macro-assembler-a64.h b/src/a64/macro-assembler-a64.h
index b11b9b6..f5fa14f 100644
--- a/src/a64/macro-assembler-a64.h
+++ b/src/a64/macro-assembler-a64.h
@@ -520,7 +520,6 @@
   //
   // Other than the registers passed into Pop, the stack pointer and (possibly)
   // the system stack pointer, these methods do not modify any other registers.
-  // Scratch registers such as Tmp0() and Tmp1() are preserved.
   void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
             const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
   void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
@@ -746,7 +745,7 @@
 
   // Set the current stack pointer, but don't generate any code.
   inline void SetStackPointer(const Register& stack_pointer) {
-    ASSERT(!AreAliased(stack_pointer, Tmp0(), Tmp1()));
+    ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
     sp_ = stack_pointer;
   }
 
@@ -940,13 +939,13 @@
 
   // Copy fields from 'src' to 'dst', where both are tagged objects.
   // The 'temps' list is a list of X registers which can be used for scratch
-  // values. The temps list must include at least one register, and it must not
-  // contain Tmp0() or Tmp1().
+  // values. The temps list must include at least one register.
   //
   // Currently, CopyFields cannot make use of more than three registers from
   // the 'temps' list.
   //
-  // As with several MacroAssembler methods, Tmp0() and Tmp1() will be used.
+  // CopyFields expects to be able to take at least two registers from
+  // MacroAssembler::TmpList().
   void CopyFields(Register dst, Register src, CPURegList temps, unsigned count);
 
   // Copies a number of bytes from src to dst. All passed registers are
@@ -1449,7 +1448,6 @@
   void LoadElementsKind(Register result, Register object);
 
   // Compare the object in a register to a value from the root list.
-  // Uses the Tmp0() register as scratch.
   void CompareRoot(const Register& obj, Heap::RootListIndex index);
 
   // Compare the object in a register to a value and jump if they are equal.
@@ -1556,7 +1554,8 @@
   // on access to global objects across environments. The holder register
   // is left untouched, whereas both scratch registers are clobbered.
   void CheckAccessGlobalProxy(Register holder_reg,
-                              Register scratch,
+                              Register scratch1,
+                              Register scratch2,
                               Label* miss);
 
   // Hash the interger value in 'key' register.
@@ -1588,8 +1587,6 @@
   // Frames.
 
   // Activation support.
-  // Note that Tmp0() and Tmp1() are used as a scratch registers. This is safe
-  // because these methods are not used in Crankshaft.
   void EnterFrame(StackFrame::Type type);
   void LeaveFrame(StackFrame::Type type);
 
@@ -1677,6 +1674,10 @@
 
   void LoadContext(Register dst, int context_chain_length);
 
+  // Emit code for a flooring division by a constant. The dividend register is
+  // unchanged. Dividend and result must be different.
+  void FlooringDiv(Register result, Register dividend, int32_t divisor);
+
   // ---------------------------------------------------------------------------
   // StatsCounter support
 
@@ -1700,7 +1701,7 @@
   // in new space.
   void RememberedSetHelper(Register object,  // Used for debug code.
                            Register addr,
-                           Register scratch,
+                           Register scratch1,
                            SaveFPRegsMode save_fp,
                            RememberedSetFinalAction and_then);
 
@@ -1885,7 +1886,8 @@
       ElementsKind expected_kind,
       ElementsKind transitioned_kind,
       Register map_in_out,
-      Register scratch,
+      Register scratch1,
+      Register scratch2,
       Label* no_map_match);
 
   void LoadGlobalFunction(int index, Register function);
@@ -1896,72 +1898,8 @@
                                     Register map,
                                     Register scratch);
 
-  // --------------------------------------------------------------------------
-  // Set the registers used internally by the MacroAssembler as scratch
-  // registers. These registers are used to implement behaviours which are not
-  // directly supported by A64, and where an intermediate result is required.
-  //
-  // Both tmp0 and tmp1 may be set to any X register except for xzr, sp,
-  // and StackPointer(). Also, they must not be the same register (though they
-  // may both be NoReg).
-  //
-  // It is valid to set either or both of these registers to NoReg if you don't
-  // want the MacroAssembler to use any scratch registers. In a debug build, the
-  // Assembler will assert that any registers it uses are valid. Be aware that
-  // this check is not present in release builds. If this is a problem, use the
-  // Assembler directly.
-  void SetScratchRegisters(const Register& tmp0, const Register& tmp1) {
-    // V8 assumes the macro assembler uses ip0 and ip1 as temp registers.
-    ASSERT(tmp0.IsNone() || tmp0.Is(ip0));
-    ASSERT(tmp1.IsNone() || tmp1.Is(ip1));
-
-    ASSERT(!AreAliased(xzr, csp, tmp0, tmp1));
-    ASSERT(!AreAliased(StackPointer(), tmp0, tmp1));
-    tmp0_ = tmp0;
-    tmp1_ = tmp1;
-  }
-
-  const Register& Tmp0() const {
-    return tmp0_;
-  }
-
-  const Register& Tmp1() const {
-    return tmp1_;
-  }
-
-  const Register WTmp0() const {
-    return Register::Create(tmp0_.code(), kWRegSize);
-  }
-
-  const Register WTmp1() const {
-    return Register::Create(tmp1_.code(), kWRegSize);
-  }
-
-  void SetFPScratchRegister(const FPRegister& fptmp0) {
-    fptmp0_ = fptmp0;
-  }
-
-  const FPRegister& FPTmp0() const {
-    return fptmp0_;
-  }
-
-  const Register AppropriateTempFor(
-      const Register& target,
-      const CPURegister& forbidden = NoCPUReg) const {
-    Register candidate = forbidden.Is(Tmp0()) ? Tmp1() : Tmp0();
-    ASSERT(!candidate.Is(target));
-    return Register::Create(candidate.code(), target.SizeInBits());
-  }
-
-  const FPRegister AppropriateTempFor(
-      const FPRegister& target,
-      const CPURegister& forbidden = NoCPUReg) const {
-    USE(forbidden);
-    FPRegister candidate = FPTmp0();
-    ASSERT(!candidate.Is(forbidden));
-    ASSERT(!candidate.Is(target));
-    return FPRegister::Create(candidate.code(), target.SizeInBits());
-  }
+  CPURegList* TmpList() { return &tmp_list_; }
+  CPURegList* FPTmpList() { return &fptmp_list_; }
 
   // Like printf, but print at run-time from generated code.
   //
@@ -1974,7 +1912,7 @@
   // size.
   //
   // The following registers cannot be printed:
-  //    Tmp0(), Tmp1(), StackPointer(), csp.
+  //    StackPointer(), csp.
   //
   // This function automatically preserves caller-saved registers so that
   // calling code can use Printf at any point without having to worry about
@@ -2059,11 +1997,14 @@
   // These each implement CopyFields in a different way.
   void CopyFieldsLoopPairsHelper(Register dst, Register src, unsigned count,
                                  Register scratch1, Register scratch2,
-                                 Register scratch3);
+                                 Register scratch3, Register scratch4,
+                                 Register scratch5);
   void CopyFieldsUnrolledPairsHelper(Register dst, Register src, unsigned count,
-                                     Register scratch1, Register scratch2);
+                                     Register scratch1, Register scratch2,
+                                     Register scratch3, Register scratch4);
   void CopyFieldsUnrolledHelper(Register dst, Register src, unsigned count,
-                                Register scratch1);
+                                Register scratch1, Register scratch2,
+                                Register scratch3);
 
   // The actual Push and Pop implementations. These don't generate any code
   // other than that required for the push or pop. This allows
@@ -2144,10 +2085,9 @@
   // The register to use as a stack pointer for stack operations.
   Register sp_;
 
-  // Scratch registers used internally by the MacroAssembler.
-  Register tmp0_;
-  Register tmp1_;
-  FPRegister fptmp0_;
+  // Scratch registers available for use by the MacroAssembler.
+  CPURegList tmp_list_;
+  CPURegList fptmp_list_;
 
   void InitializeNewString(Register string,
                            Register length,
@@ -2228,6 +2168,49 @@
 };
 
 
+// This scope utility allows scratch registers to be managed safely. The
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the MacroAssembler's lists will be restored to their
+// original state, even if the lists were modified by some other means.
+class UseScratchRegisterScope {
+ public:
+  explicit UseScratchRegisterScope(MacroAssembler* masm)
+      : available_(masm->TmpList()),
+        availablefp_(masm->FPTmpList()),
+        old_available_(available_->list()),
+        old_availablefp_(availablefp_->list()) {
+    ASSERT(available_->type() == CPURegister::kRegister);
+    ASSERT(availablefp_->type() == CPURegister::kFPRegister);
+  }
+
+  ~UseScratchRegisterScope();
+
+  // Take a register from the appropriate temps list. It will be returned
+  // automatically when the scope ends.
+  Register AcquireW() { return AcquireNextAvailable(available_).W(); }
+  Register AcquireX() { return AcquireNextAvailable(available_).X(); }
+  FPRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+  FPRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+
+  Register AcquireSameSizeAs(const Register& reg);
+  FPRegister AcquireSameSizeAs(const FPRegister& reg);
+
+ private:
+  static CPURegister AcquireNextAvailable(CPURegList* available);
+
+  // Available scratch registers.
+  CPURegList* available_;     // kRegister
+  CPURegList* availablefp_;   // kFPRegister
+
+  // The state of the available lists at the start of this scope.
+  RegList old_available_;     // kRegister
+  RegList old_availablefp_;   // kFPRegister
+};
+
+
 inline MemOperand ContextMemOperand(Register context, int index) {
   return MemOperand(context, Context::SlotOffset(index));
 }
diff --git a/src/a64/stub-cache-a64.cc b/src/a64/stub-cache-a64.cc
index 1e09f4a..83d6495 100644
--- a/src/a64/stub-cache-a64.cc
+++ b/src/a64/stub-cache-a64.cc
@@ -903,7 +903,8 @@
       // the map check so that we know that the object is actually a global
       // object.
       if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+        UseScratchRegisterScope temps(masm());
+        __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
       } else if (current_map->IsJSGlobalObjectMap()) {
         GenerateCheckPropertyCell(
             masm(), Handle<JSGlobalObject>::cast(current), name,
@@ -940,7 +941,7 @@
   ASSERT(current_map->IsJSGlobalProxyMap() ||
          !current_map->is_access_check_needed());
   if (current_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
   }
 
   // Return the register containing the holder.
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index 3399958..e388ed5 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -113,6 +113,13 @@
 }
 
 
+Address RelocInfo::constant_pool_entry_address() {
+  ASSERT(IsInConstantPool());
+  ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+  return Assembler::target_pointer_address_at(pc_);
+}
+
+
 int RelocInfo::target_address_size() {
   return kPointerSize;
 }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 35279e5..cf14172 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -300,6 +300,11 @@
 }
 
 
+bool RelocInfo::IsInConstantPool() {
+  return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
+}
+
+
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   Instr* pc = reinterpret_cast<Instr*>(pc_);
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index b933c4a..f661ad8 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -1898,8 +1898,7 @@
 // * function: r1 or at sp.
 //
 // An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed on the stack,
-// in the safepoint slot for register r4.
+// In this case the offset to the inline site to patch is passed in r5.
 // (See LCodeGen::DoInstanceOfKnownGlobal)
 void InstanceofStub::Generate(MacroAssembler* masm) {
   // Call site inlining and patching implies arguments in registers.
@@ -1958,14 +1957,14 @@
     ASSERT(HasArgsInRegisters());
     // Patch the (relocated) inlined map check.
 
-    // The offset was stored in r4 safepoint slot.
-    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
-    __ LoadFromSafepointRegisterSlot(scratch, r4);
-    __ sub(inline_site, lr, scratch);
-    // Get the map location in scratch and patch it.
-    __ GetRelocatedValueLocation(inline_site, scratch);
-    __ ldr(scratch, MemOperand(scratch));
-    __ str(map, FieldMemOperand(scratch, Cell::kValueOffset));
+    // The offset was stored in r5
+    //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+    const Register offset = r5;
+    __ sub(inline_site, lr, offset);
+    // Get the map location in r5 and patch it.
+    __ GetRelocatedValueLocation(inline_site, offset);
+    __ ldr(offset, MemOperand(offset));
+    __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
   }
 
   // Register mapping: r3 is object map and r4 is function prototype.
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 63109f5..f12b6ea 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -1257,6 +1257,23 @@
 }
 
 
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LDivByConstI(dividend, divisor));
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32);
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1271,7 +1288,13 @@
 
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    return instr->RightIsPowerOf2() ? DoDivByPowerOf2I(instr) : DoDivI(instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
   } else {
@@ -1280,29 +1303,6 @@
 }
 
 
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
-  uint32_t divisor_abs = abs(divisor);
-  // Dividing by 0 or powers of 2 is easy.
-  if (divisor == 0 || IsPowerOf2(divisor_abs)) return true;
-
-  // We have magic numbers for a few specific divisors.
-  // Details and proofs can be found in:
-  // - Hacker's Delight, Henry S. Warren, Jr.
-  // - The PowerPC Compiler Writer’s Guide
-  // and probably many others.
-  //
-  // We handle
-  //   <divisor with magic numbers> * <power of 2>
-  // but not
-  //   <divisor with magic numbers> * <other divisor with magic numbers>
-  int32_t power_of_2_factor =
-    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
-  DivMagicNumbers magic_numbers =
-    DivMagicNumberFor(divisor_abs >> power_of_2_factor);
-  return magic_numbers.M != InvalidDivMagicNumber.M;
-}
-
-
 LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
@@ -1317,15 +1317,18 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
-  LOperand* divisor = CpuFeatures::IsSupported(SUDIV)
-      ? UseRegister(instr->right())
-      : UseOrConstant(instr->right());
-  LOperand* remainder = TempRegister();
+  int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result =
-      DefineAsRegister(
-          new(zone()) LFlooringDivByConstI(dividend, divisor, remainder));
-  return AssignEnvironment(result);
+      DefineAsRegister(new(zone()) LFlooringDivByConstI(dividend, divisor));
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0);
+  return can_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -1333,12 +1336,7 @@
   if (instr->RightIsPowerOf2()) {
     return DoFlooringDivByPowerOf2I(instr);
   } else if (instr->right()->IsConstant()) {
-    // LMathFloorOfDiv can currently only handle a subset of divisors, so fall
-    // back to a flooring division in all other cases.
-    return (CpuFeatures::IsSupported(SUDIV) ||
-            HasMagicNumberForDivisor(instr->right()->GetInteger32Constant()))
-        ? DoFlooringDivByConstI(instr)
-        : DoDivI(instr);
+    return DoFlooringDivByConstI(instr);
   } else {
     return DoDivI(instr);
   }
@@ -1360,6 +1358,22 @@
 }
 
 
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  ASSERT(instr->representation().IsSmiOrInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LModByConstI(dividend, divisor));
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->CanBeNegative());
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1395,7 +1409,13 @@
 
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MOD, instr);
   } else {
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index a304258..86cf2f2 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -86,6 +86,7 @@
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
+  V(DivByConstI)                                \
   V(DivByPowerOf2I)                             \
   V(DivI)                                       \
   V(DoubleBits)                                 \
@@ -137,6 +138,7 @@
   V(MathPowHalf)                                \
   V(MathRound)                                  \
   V(MathSqrt)                                   \
+  V(ModByConstI)                                \
   V(ModByPowerOf2I)                             \
   V(ModI)                                       \
   V(MulI)                                       \
@@ -638,6 +640,24 @@
 };
 
 
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
  public:
   LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
@@ -675,6 +695,24 @@
 };
 
 
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -713,20 +751,22 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
-  LFlooringDivByConstI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
-    inputs_[1] = divisor;
-    temps_[0] = temp;
+    divisor_ = divisor;
   }
 
   LOperand* dividend() { return inputs_[0]; }
-  LOperand* divisor() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
   DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
 };
 
 
@@ -2711,8 +2751,10 @@
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
   LInstruction* DoMathClz32(HUnaryMathOperation* instr);
   LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
   LInstruction* DoDivI(HBinaryOperation* instr);
   LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
   LInstruction* DoModI(HMod* instr);
   LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
   LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index a95bb5e..b10ed59 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -1144,6 +1144,36 @@
 }
 
 
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  ASSERT(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr->environment());
+    return;
+  }
+
+  __ FlooringDiv(result, dividend, Abs(divisor));
+  __ add(result, result, Operand(dividend, LSR, 31));
+  __ mov(ip, Operand(Abs(divisor)));
+  __ smull(result, ip, result, ip);
+  __ sub(result, dividend, result, SetCC);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hmod->left()->CanBeNegative()) {
+    Label remainder_not_zero;
+    __ b(ne, &remainder_not_zero);
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(lt, instr->environment());
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
 void LCodeGen::DoModI(LModI* instr) {
   HMod* hmod = instr->hydrogen();
   HValue* left = hmod->left();
@@ -1258,100 +1288,6 @@
 }
 
 
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
-    Register result,
-    Register dividend,
-    int32_t divisor,
-    Register remainder,
-    Register scratch,
-    LEnvironment* environment) {
-  ASSERT(!AreAliased(dividend, scratch, ip));
-  ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
-
-  uint32_t divisor_abs = abs(divisor);
-
-  int32_t power_of_2_factor =
-    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
-
-  switch (divisor_abs) {
-    case 0:
-      DeoptimizeIf(al, environment);
-      return;
-
-    case 1:
-      if (divisor > 0) {
-        __ Move(result, dividend);
-      } else {
-        __ rsb(result, dividend, Operand::Zero(), SetCC);
-        DeoptimizeIf(vs, environment);
-      }
-      // Compute the remainder.
-      __ mov(remainder, Operand::Zero());
-      return;
-
-    default:
-      if (IsPowerOf2(divisor_abs)) {
-        // Branch and condition free code for integer division by a power
-        // of two.
-        int32_t power = WhichPowerOf2(divisor_abs);
-        if (power > 1) {
-          __ mov(scratch, Operand(dividend, ASR, power - 1));
-        }
-        __ add(scratch, dividend, Operand(scratch, LSR, 32 - power));
-        __ mov(result, Operand(scratch, ASR, power));
-        // Negate if necessary.
-        // We don't need to check for overflow because the case '-1' is
-        // handled separately.
-        if (divisor < 0) {
-          ASSERT(divisor != -1);
-          __ rsb(result, result, Operand::Zero());
-        }
-        // Compute the remainder.
-        if (divisor > 0) {
-          __ sub(remainder, dividend, Operand(result, LSL, power));
-        } else {
-          __ add(remainder, dividend, Operand(result, LSL, power));
-        }
-        return;
-      } else {
-        // Use magic numbers for a few specific divisors.
-        // Details and proofs can be found in:
-        // - Hacker's Delight, Henry S. Warren, Jr.
-        // - The PowerPC Compiler Writer’s Guide
-        // and probably many others.
-        //
-        // We handle
-        //   <divisor with magic numbers> * <power of 2>
-        // but not
-        //   <divisor with magic numbers> * <other divisor with magic numbers>
-        DivMagicNumbers magic_numbers =
-          DivMagicNumberFor(divisor_abs >> power_of_2_factor);
-        // Branch and condition free code for integer division by a power
-        // of two.
-        const int32_t M = magic_numbers.M;
-        const int32_t s = magic_numbers.s + power_of_2_factor;
-
-        __ mov(ip, Operand(M));
-        __ smull(ip, scratch, dividend, ip);
-        if (M < 0) {
-          __ add(scratch, scratch, Operand(dividend));
-        }
-        if (s > 0) {
-          __ mov(scratch, Operand(scratch, ASR, s));
-        }
-        __ add(result, scratch, Operand(dividend, LSR, 31));
-        if (divisor < 0) __ rsb(result, result, Operand::Zero());
-        // Compute the remainder.
-        __ mov(ip, Operand(divisor));
-        // This sequence could be replaced with 'mls' when
-        // it gets implemented.
-        __ mul(scratch, result, ip);
-        __ sub(remainder, dividend, scratch);
-      }
-  }
-}
-
-
 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
@@ -1398,6 +1334,38 @@
 }
 
 
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  ASSERT(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr->environment());
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr->environment());
+  }
+
+  __ FlooringDiv(result, dividend, Abs(divisor));
+  __ add(result, result, Operand(dividend, LSR, 31));
+  if (divisor < 0) __ rsb(result, result, Operand::Zero());
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ mov(ip, Operand(divisor));
+    __ smull(scratch0(), ip, result, ip);
+    __ sub(scratch0(), scratch0(), dividend, SetCC);
+    DeoptimizeIf(ne, instr->environment());
+  }
+}
+
+
 void LCodeGen::DoDivI(LDivI* instr) {
   const Register left = ToRegister(instr->left());
   const Register right = ToRegister(instr->right());
@@ -1531,71 +1499,25 @@
 
 
 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
-  Register left = ToRegister(instr->dividend());
-  Register remainder = ToRegister(instr->temp());
-  Register scratch = scratch0();
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
+  ASSERT(!dividend.is(result));
 
-  if (!CpuFeatures::IsSupported(SUDIV)) {
-    // If the CPU doesn't support sdiv instruction, we only optimize when we
-    // have magic numbers for the divisor. The standard integer division routine
-    // is usually slower than transitionning to VFP.
-    ASSERT(instr->divisor()->IsConstantOperand());
-    int32_t divisor = ToInteger32(LConstantOperand::cast(instr->divisor()));
-    ASSERT(LChunkBuilder::HasMagicNumberForDivisor(divisor));
-    if (divisor < 0) {
-      __ cmp(left, Operand::Zero());
-      DeoptimizeIf(eq, instr->environment());
-    }
-    EmitSignedIntegerDivisionByConstant(result,
-                                        left,
-                                        divisor,
-                                        remainder,
-                                        scratch,
-                                        instr->environment());
-    // We performed a truncating division. Correct the result if necessary.
-    __ cmp(remainder, Operand::Zero());
-    __ teq(remainder, Operand(divisor), ne);
-    __ sub(result, result, Operand(1), LeaveCC, mi);
-  } else {
-    CpuFeatureScope scope(masm(), SUDIV);
-    // TODO(svenpanne) We *statically* know the divisor, use that fact!
-    Register right = ToRegister(instr->divisor());
-
-    // Check for x / 0.
-    __ cmp(right, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
-
-    // Check for (kMinInt / -1).
-    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      __ cmp(left, Operand(kMinInt));
-      __ cmp(right, Operand(-1), eq);
-      DeoptimizeIf(eq, instr->environment());
-    }
-
-    // Check for (0 / -x) that will produce negative zero.
-    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      __ cmp(right, Operand::Zero());
-      __ cmp(left, Operand::Zero(), mi);
-      // "right" can't be null because the code would have already been
-      // deoptimized. The Z flag is set only if (right < 0) and (left == 0).
-      // In this case we need to deoptimize to produce a -0.
-      DeoptimizeIf(eq, instr->environment());
-    }
-
-    Label done;
-    __ sdiv(result, left, right);
-    // If both operands have the same sign then we are done.
-    __ eor(remainder, left, Operand(right), SetCC);
-    __ b(pl, &done);
-
-    // Check if the result needs to be corrected.
-    __ mls(remainder, result, right, left);
-    __ cmp(remainder, Operand::Zero());
-    __ sub(result, result, Operand(1), LeaveCC, ne);
-
-    __ bind(&done);
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr->environment());
+    return;
   }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    __ cmp(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr->environment());
+  }
+
+  __ FlooringDiv(result, dividend, divisor);
 }
 
 
@@ -2781,9 +2703,6 @@
   Register temp = ToRegister(instr->temp());
   Register result = ToRegister(instr->result());
 
-  ASSERT(object.is(r0));
-  ASSERT(result.is(r0));
-
   // A Smi is not instance of anything.
   __ JumpIfSmi(object, &false_result);
 
@@ -2841,9 +2760,6 @@
 
 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                                Label* map_check) {
-  Register result = ToRegister(instr->result());
-  ASSERT(result.is(r0));
-
   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   flags = static_cast<InstanceofStub::Flags>(
       flags | InstanceofStub::kArgsInRegisters);
@@ -2856,37 +2772,32 @@
   PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
   LoadContextFromDeferred(instr->context());
 
-  // Get the temp register reserved by the instruction. This needs to be r4 as
-  // its slot of the pushing of safepoint registers is used to communicate the
-  // offset to the location of the map check.
-  Register temp = ToRegister(instr->temp());
-  ASSERT(temp.is(r4));
   __ Move(InstanceofStub::right(), instr->function());
-  static const int kAdditionalDelta = 5;
+  static const int kAdditionalDelta = 4;
   // Make sure that code size is predicable, since we use specific constants
   // offsets in the code to find embedded values..
-  PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
+  PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
   Label before_push_delta;
   __ bind(&before_push_delta);
   __ BlockConstPoolFor(kAdditionalDelta);
-  __ mov(temp, Operand(delta * kPointerSize));
+  // r5 is used to communicate the offset to the location of the map check.
+  __ mov(r5, Operand(delta * kPointerSize));
   // The mov above can generate one or two instructions. The delta was computed
   // for two instructions, so we need to pad here in case of one instruction.
   if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
     ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
     __ nop();
   }
-  __ StoreToSafepointRegisterSlot(temp, temp);
   CallCodeGeneric(stub.GetCode(isolate()),
                   RelocInfo::CODE_TARGET,
                   instr,
                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
-  // Put the result value into the result register slot and
+  // Put the result value (r0) into the result register slot and
   // restore all registers.
-  __ StoreToSafepointRegisterSlot(result, result);
+  __ StoreToSafepointRegisterSlot(r0, ToRegister(instr->result()));
 }
 
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index 1638ee9..e0f5009 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -349,17 +349,6 @@
                     int* offset,
                     AllocationSiteMode mode);
 
-  // Emit optimized code for integer division.
-  // Inputs are signed.
-  // All registers are clobbered.
-  // If 'remainder' is no_reg, it is not computed.
-  void EmitSignedIntegerDivisionByConstant(Register result,
-                                           Register dividend,
-                                           int32_t divisor,
-                                           Register remainder,
-                                           Register scratch,
-                                           LEnvironment* environment);
-
   void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
   void DoLoadKeyedExternalArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index d705c90..437b731 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -4032,6 +4032,25 @@
 }
 
 
+void MacroAssembler::FlooringDiv(Register result,
+                                 Register dividend,
+                                 int32_t divisor) {
+  ASSERT(!dividend.is(result));
+  ASSERT(!dividend.is(ip));
+  ASSERT(!result.is(ip));
+  MultiplierAndShift ms(divisor);
+  mov(ip, Operand(ms.multiplier()));
+  smull(ip, result, dividend, ip);
+  if (divisor > 0 && ms.multiplier() < 0) {
+    add(result, result, Operand(dividend));
+  }
+  if (divisor < 0 && ms.multiplier() > 0) {
+    sub(result, result, Operand(dividend));
+  }
+  if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 0eaf4dc..f98f141 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1155,6 +1155,10 @@
   }
 
 
+  // Emit code for a flooring division by a constant. The dividend register is
+  // unchanged and ip gets clobbered. Dividend and result must be different.
+  void FlooringDiv(Register result, Register dividend, int32_t divisor);
+
   // ---------------------------------------------------------------------------
   // StatsCounter support
 
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
index 65f5c91..0ccb28b 100644
--- a/src/arm/stub-cache-arm.cc
+++ b/src/arm/stub-cache-arm.cc
@@ -1303,21 +1303,6 @@
 Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
     Handle<JSObject> object,
     Handle<Name> name) {
-  Label miss;
-
-  // Check that the map of the object hasn't changed.
-  __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
-              DO_SMI_CHECK);
-
-  // Perform global security token check if needed.
-  if (object->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
-  }
-
-  // Stub is never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
   __ Push(receiver(), this->name(), value());
 
   // Do tail-call to the runtime system.
@@ -1325,10 +1310,6 @@
       ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
   __ TailCallExternalReference(store_ic_property, 3, 1);
 
-  // Handle store cache miss.
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
   // Return the generated code.
   return GetCode(kind(), Code::FAST, name);
 }
diff --git a/src/assembler.cc b/src/assembler.cc
index d51264d..0117fc5 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -1593,4 +1593,38 @@
   return written;
 }
 
+
+MultiplierAndShift::MultiplierAndShift(int32_t d) {
+  ASSERT(d <= -2 || 2 <= d);
+  const uint32_t two31 = 0x80000000;
+  uint32_t ad = Abs(d);
+  uint32_t t = two31 + (uint32_t(d) >> 31);
+  uint32_t anc = t - 1 - t % ad;   // Absolute value of nc.
+  int32_t p = 31;                  // Init. p.
+  uint32_t q1 = two31 / anc;       // Init. q1 = 2**p/|nc|.
+  uint32_t r1 = two31 - q1 * anc;  // Init. r1 = rem(2**p, |nc|).
+  uint32_t q2 = two31 / ad;        // Init. q2 = 2**p/|d|.
+  uint32_t r2 = two31 - q2 * ad;   // Init. r2 = rem(2**p, |d|).
+  uint32_t delta;
+  do {
+    p++;
+    q1 *= 2;          // Update q1 = 2**p/|nc|.
+    r1 *= 2;          // Update r1 = rem(2**p, |nc|).
+    if (r1 >= anc) {  // Must be an unsigned comparison here.
+      q1++;
+      r1 = r1 - anc;
+    }
+    q2 *= 2;          // Update q2 = 2**p/|d|.
+    r2 *= 2;          // Update r2 = rem(2**p, |d|).
+    if (r2 >= ad) {   // Must be an unsigned comparison here.
+      q2++;
+      r2 = r2 - ad;
+    }
+    delta = ad - r2;
+  } while (q1 < delta || (q1 == delta && r1 == 0));
+  int32_t mul = static_cast<int32_t>(q2 + 1);
+  multiplier_ = (d < 0) ? -mul : mul;
+  shift_ = p - 32;
+}
+
 } }  // namespace v8::internal
diff --git a/src/assembler.h b/src/assembler.h
index 2506001..274f650 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -384,6 +384,10 @@
   // instructions).
   bool IsCodedSpecially();
 
+  // If true, the pointer this relocation info refers to is an entry in the
+  // constant pool, otherwise the pointer is embedded in the instruction stream.
+  bool IsInConstantPool();
+
   // Read/modify the code target in the branch/call instruction
   // this relocation applies to;
   // can only be called if IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
@@ -406,6 +410,10 @@
   INLINE(Code* code_age_stub());
   INLINE(void set_code_age_stub(Code* stub));
 
+  // Returns the address of the constant pool entry where the target address
+  // is held.  This should only be called if IsInConstantPool returns true.
+  INLINE(Address constant_pool_entry_address());
+
   // Read the address of the word containing the target_address in an
   // instruction stream.  What this means exactly is architecture-independent.
   // The only architecture-independent user of this function is the serializer.
@@ -413,6 +421,7 @@
   // output before the next target.  Architecture-independent code shouldn't
   // dereference the pointer it gets back from this.
   INLINE(Address target_address_address());
+
   // This indicates how much space a target takes up when deserializing a code
   // stream.  For most architectures this is just the size of a pointer.  For
   // an instruction like movw/movt where the target bits are mixed into the
@@ -1037,6 +1046,21 @@
   virtual void AfterCall() const { }
 };
 
+
+// The multiplier and shift for signed division via multiplication, see Warren's
+// "Hacker's Delight", chapter 10.
+class MultiplierAndShift {
+ public:
+  explicit MultiplierAndShift(int32_t d);
+  int32_t multiplier() const { return multiplier_; }
+  int32_t shift() const { return shift_; }
+
+ private:
+  int32_t multiplier_;
+  int32_t shift_;
+};
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_ASSEMBLER_H_
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 6bb0118..7bd5d8b 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -1049,13 +1049,16 @@
   Handle<PropertyCell> placeholder_cell =
       isolate()->factory()->NewPropertyCell(placeholer_value);
 
-  HParameter* receiver = GetParameter(0);
   HParameter* value = GetParameter(2);
 
-  // Check that the map of the global has not changed: use a placeholder map
-  // that will be replaced later with the global object's map.
-  Handle<Map> placeholder_map = isolate()->factory()->meta_map();
-  Add<HCheckMaps>(receiver, placeholder_map, top_info());
+  if (stub->check_global()) {
+    // Check that the map of the global has not changed: use a placeholder map
+    // that will be replaced later with the global object's map.
+    Handle<Map> placeholder_map = isolate()->factory()->meta_map();
+    HValue* global = Add<HConstant>(
+        StoreGlobalStub::global_placeholder(isolate()));
+    Add<HCheckMaps>(global, placeholder_map, top_info());
+  }
 
   HValue* cell = Add<HConstant>(placeholder_cell);
   HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 03849aa..62026a8 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -962,19 +962,27 @@
 
 class StoreGlobalStub : public HandlerStub {
  public:
-  explicit StoreGlobalStub(bool is_constant) {
-    bit_field_ = IsConstantBits::encode(is_constant);
+  explicit StoreGlobalStub(bool is_constant, bool check_global) {
+    bit_field_ = IsConstantBits::encode(is_constant) |
+        CheckGlobalBits::encode(check_global);
+  }
+
+  static Handle<HeapObject> global_placeholder(Isolate* isolate) {
+    return isolate->factory()->uninitialized_value();
   }
 
   Handle<Code> GetCodeCopyFromTemplate(Isolate* isolate,
-                                       Map* receiver_map,
+                                       GlobalObject* global,
                                        PropertyCell* cell) {
     Handle<Code> code = CodeStub::GetCodeCopyFromTemplate(isolate);
-    // Replace the placeholder cell and global object map with the actual global
-    // cell and receiver map.
+    if (check_global()) {
+      // Replace the placeholder cell and global object map with the actual
+      // global cell and receiver map.
+      code->ReplaceNthObject(1, global_placeholder(isolate)->map(), global);
+      code->ReplaceNthObject(1, isolate->heap()->meta_map(), global->map());
+    }
     Map* cell_map = isolate->heap()->global_property_cell_map();
     code->ReplaceNthObject(1, cell_map, cell);
-    code->ReplaceNthObject(1, isolate->heap()->meta_map(), receiver_map);
     return code;
   }
 
@@ -986,9 +994,12 @@
       Isolate* isolate,
       CodeStubInterfaceDescriptor* descriptor);
 
-  bool is_constant() {
+  bool is_constant() const {
     return IsConstantBits::decode(bit_field_);
   }
+  bool check_global() const {
+    return CheckGlobalBits::decode(bit_field_);
+  }
   void set_is_constant(bool value) {
     bit_field_ = IsConstantBits::update(bit_field_, value);
   }
@@ -1005,6 +1016,7 @@
 
   class IsConstantBits: public BitField<bool, 0, 1> {};
   class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
+  class CheckGlobalBits: public BitField<bool, 9, 1> {};
 
   DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
 };
diff --git a/src/extensions/externalize-string-extension.cc b/src/extensions/externalize-string-extension.cc
index d372cf0..adc5577 100644
--- a/src/extensions/externalize-string-extension.cc
+++ b/src/extensions/externalize-string-extension.cc
@@ -107,7 +107,7 @@
     SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
         reinterpret_cast<char*>(data), string->length());
     result = string->MakeExternal(resource);
-    if (result && !string->IsInternalizedString()) {
+    if (result) {
       i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
       isolate->heap()->external_string_table()->AddString(*string);
     }
@@ -118,7 +118,7 @@
     SimpleTwoByteStringResource* resource = new SimpleTwoByteStringResource(
         data, string->length());
     result = string->MakeExternal(resource);
-    if (result && !string->IsInternalizedString()) {
+    if (result) {
       i::Isolate* isolate = reinterpret_cast<i::Isolate*>(args.GetIsolate());
       isolate->heap()->external_string_table()->AddString(*string);
     }
diff --git a/src/factory.cc b/src/factory.cc
index 60727d9..1141229 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -81,14 +81,16 @@
 
 Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
     int number_of_int64_entries,
-    int number_of_ptr_entries,
+    int number_of_code_ptr_entries,
+    int number_of_heap_ptr_entries,
     int number_of_int32_entries) {
-  ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
-         number_of_int32_entries > 0);
+  ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 ||
+         number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateConstantPoolArray(number_of_int64_entries,
-                                                   number_of_ptr_entries,
+                                                   number_of_code_ptr_entries,
+                                                   number_of_heap_ptr_entries,
                                                    number_of_int32_entries),
       ConstantPoolArray);
 }
diff --git a/src/factory.h b/src/factory.h
index 86c2604..7d1a17a 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -61,7 +61,8 @@
 
   Handle<ConstantPoolArray> NewConstantPoolArray(
       int number_of_int64_entries,
-      int number_of_ptr_entries,
+      int number_of_code_ptr_entries,
+      int number_of_heap_ptr_entries,
       int number_of_int32_entries);
 
   Handle<SeededNumberDictionary> NewSeededNumberDictionary(
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index e1e8a4b..a882d03 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -241,7 +241,6 @@
 // Flags for Crankshaft.
 DEFINE_bool(crankshaft, true, "use crankshaft")
 DEFINE_string(hydrogen_filter, "*", "optimization filter")
-DEFINE_bool(use_range, true, "use hydrogen range analysis")
 DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
 DEFINE_int(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
 DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
diff --git a/src/heap.cc b/src/heap.cc
index 4e0e8a6..d54a2d6 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -5084,20 +5084,23 @@
 MaybeObject* Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
                                                 Map* map) {
   int int64_entries = src->count_of_int64_entries();
-  int ptr_entries = src->count_of_ptr_entries();
+  int code_ptr_entries = src->count_of_code_ptr_entries();
+  int heap_ptr_entries = src->count_of_heap_ptr_entries();
   int int32_entries = src->count_of_int32_entries();
   Object* obj;
   { MaybeObject* maybe_obj =
-        AllocateConstantPoolArray(int64_entries, ptr_entries, int32_entries);
+        AllocateConstantPoolArray(int64_entries, code_ptr_entries,
+                                  heap_ptr_entries, int32_entries);
     if (!maybe_obj->ToObject(&obj)) return maybe_obj;
   }
   HeapObject* dst = HeapObject::cast(obj);
   dst->set_map_no_write_barrier(map);
+  int size = ConstantPoolArray::SizeFor(
+        int64_entries, code_ptr_entries, heap_ptr_entries, int32_entries);
   CopyBlock(
       dst->address() + ConstantPoolArray::kLengthOffset,
       src->address() + ConstantPoolArray::kLengthOffset,
-      ConstantPoolArray::SizeFor(int64_entries, ptr_entries, int32_entries)
-          - ConstantPoolArray::kLengthOffset);
+      size - ConstantPoolArray::kLengthOffset);
   return obj;
 }
 
@@ -5234,12 +5237,14 @@
 
 
 MaybeObject* Heap::AllocateConstantPoolArray(int number_of_int64_entries,
-                                             int number_of_ptr_entries,
+                                             int number_of_code_ptr_entries,
+                                             int number_of_heap_ptr_entries,
                                              int number_of_int32_entries) {
-  ASSERT(number_of_int64_entries > 0 || number_of_ptr_entries > 0 ||
-         number_of_int32_entries > 0);
+  ASSERT(number_of_int64_entries > 0 || number_of_code_ptr_entries > 0 ||
+         number_of_heap_ptr_entries > 0 || number_of_int32_entries > 0);
   int size = ConstantPoolArray::SizeFor(number_of_int64_entries,
-                                        number_of_ptr_entries,
+                                        number_of_code_ptr_entries,
+                                        number_of_heap_ptr_entries,
                                         number_of_int32_entries);
 #ifndef V8_HOST_ARCH_64_BIT
   size += kPointerSize;
@@ -5256,29 +5261,38 @@
   ConstantPoolArray* constant_pool =
       reinterpret_cast<ConstantPoolArray*>(object);
   constant_pool->SetEntryCounts(number_of_int64_entries,
-                                number_of_ptr_entries,
+                                number_of_code_ptr_entries,
+                                number_of_heap_ptr_entries,
                                 number_of_int32_entries);
-  if (number_of_ptr_entries > 0) {
+  if (number_of_code_ptr_entries > 0) {
+    int offset =
+        constant_pool->OffsetOfElementAt(constant_pool->first_code_ptr_index());
     MemsetPointer(
-        HeapObject::RawField(
-            constant_pool,
-            constant_pool->OffsetOfElementAt(constant_pool->first_ptr_index())),
+        reinterpret_cast<Address*>(HeapObject::RawField(constant_pool, offset)),
+        isolate()->builtins()->builtin(Builtins::kIllegal)->entry(),
+        number_of_code_ptr_entries);
+  }
+  if (number_of_heap_ptr_entries > 0) {
+    int offset =
+        constant_pool->OffsetOfElementAt(constant_pool->first_code_ptr_index());
+    MemsetPointer(
+        HeapObject::RawField(constant_pool, offset),
         undefined_value(),
-        number_of_ptr_entries);
+        number_of_heap_ptr_entries);
   }
   return constant_pool;
 }
 
 
 MaybeObject* Heap::AllocateEmptyConstantPoolArray() {
-  int size = ConstantPoolArray::SizeFor(0, 0, 0);
+  int size = ConstantPoolArray::SizeFor(0, 0, 0, 0);
   Object* result;
   { MaybeObject* maybe_result =
         AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
     if (!maybe_result->ToObject(&result)) return maybe_result;
   }
   HeapObject::cast(result)->set_map_no_write_barrier(constant_pool_array_map());
-  ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0);
+  ConstantPoolArray::cast(result)->SetEntryCounts(0, 0, 0, 0);
   return result;
 }
 
diff --git a/src/heap.h b/src/heap.h
index f99bb51..2292464 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -1012,9 +1012,10 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   MUST_USE_RESULT MaybeObject* AllocateConstantPoolArray(
-      int first_int64_index,
-      int first_ptr_index,
-      int first_int32_index);
+      int number_of_int64_entries,
+      int number_of_code_ptr_entries,
+      int number_of_heap_ptr_entries,
+      int number_of_int32_entries);
 
   // Allocates a fixed double array with uninitialized values. Returns
   // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index fb19209..79c7964 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -1215,18 +1215,52 @@
 
 void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
   value()->PrintNameTo(stream);
-  stream->Add(" == %o", *type_literal_);
+  stream->Add(" == %o", *type_literal_.handle());
   HControlInstruction::PrintDataTo(stream);
 }
 
 
-bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
-  if (value()->representation().IsSpecialization()) {
-    if (compares_number_type()) {
-      *block = FirstSuccessor();
-    } else {
-      *block = SecondSuccessor();
+static String* TypeOfString(HConstant* constant, Isolate* isolate) {
+  Heap* heap = isolate->heap();
+  if (constant->HasNumberValue()) return heap->number_string();
+  if (constant->IsUndetectable()) return heap->undefined_string();
+  if (constant->HasStringValue()) return heap->string_string();
+  switch (constant->GetInstanceType()) {
+    case ODDBALL_TYPE: {
+      Unique<Object> unique = constant->GetUnique();
+      if (unique.IsKnownGlobal(heap->true_value()) ||
+          unique.IsKnownGlobal(heap->false_value())) {
+        return heap->boolean_string();
+      }
+      if (unique.IsKnownGlobal(heap->null_value())) {
+        return FLAG_harmony_typeof ? heap->null_string()
+                                   : heap->object_string();
+      }
+      ASSERT(unique.IsKnownGlobal(heap->undefined_value()));
+      return heap->undefined_string();
     }
+    case SYMBOL_TYPE:
+      return heap->symbol_string();
+    case JS_FUNCTION_TYPE:
+    case JS_FUNCTION_PROXY_TYPE:
+      return heap->function_string();
+    default:
+      return heap->object_string();
+  }
+}
+
+
+bool HTypeofIsAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    HConstant* constant = HConstant::cast(value());
+    String* type_string = TypeOfString(constant, isolate());
+    bool same_type = type_literal_.IsKnownGlobal(type_string);
+    *block = same_type ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  } else if (value()->representation().IsSpecialization()) {
+    bool number_type =
+        type_literal_.IsKnownGlobal(isolate()->heap()->number_string());
+    *block = number_type ? FirstSuccessor() : SecondSuccessor();
     return true;
   }
   *block = NULL;
@@ -2498,13 +2532,16 @@
     has_int32_value_(false),
     has_double_value_(false),
     has_external_reference_value_(false),
-    is_internalized_string_(false),
     is_not_in_new_space_(true),
-    is_cell_(false),
-    boolean_value_(handle->BooleanValue()) {
+    boolean_value_(handle->BooleanValue()),
+    is_undetectable_(false),
+    instance_type_(kUnknownInstanceType) {
   if (handle->IsHeapObject()) {
-    Heap* heap = Handle<HeapObject>::cast(handle)->GetHeap();
+    Handle<HeapObject> heap_obj = Handle<HeapObject>::cast(handle);
+    Heap* heap = heap_obj->GetHeap();
     is_not_in_new_space_ = !heap->InNewSpace(*handle);
+    instance_type_ = heap_obj->map()->instance_type();
+    is_undetectable_ = heap_obj->map()->is_undetectable();
   }
   if (handle->IsNumber()) {
     double n = handle->Number();
@@ -2514,12 +2551,8 @@
     double_value_ = n;
     has_double_value_ = true;
     // TODO(titzer): if this heap number is new space, tenure a new one.
-  } else {
-    is_internalized_string_ = handle->IsInternalizedString();
   }
 
-  is_cell_ = !handle.is_null() &&
-      (handle->IsCell() || handle->IsPropertyCell());
   Initialize(r);
 }
 
@@ -2527,20 +2560,20 @@
 HConstant::HConstant(Unique<Object> unique,
                      Representation r,
                      HType type,
-                     bool is_internalize_string,
                      bool is_not_in_new_space,
-                     bool is_cell,
-                     bool boolean_value)
+                     bool boolean_value,
+                     bool is_undetectable,
+                     InstanceType instance_type)
   : HTemplateInstruction<0>(type),
     object_(unique),
     has_smi_value_(false),
     has_int32_value_(false),
     has_double_value_(false),
     has_external_reference_value_(false),
-    is_internalized_string_(is_internalize_string),
     is_not_in_new_space_(is_not_in_new_space),
-    is_cell_(is_cell),
-    boolean_value_(boolean_value) {
+    boolean_value_(boolean_value),
+    is_undetectable_(is_undetectable),
+    instance_type_(instance_type) {
   ASSERT(!unique.handle().is_null());
   ASSERT(!type.IsTaggedNumber());
   Initialize(r);
@@ -2556,12 +2589,12 @@
     has_int32_value_(true),
     has_double_value_(true),
     has_external_reference_value_(false),
-    is_internalized_string_(false),
     is_not_in_new_space_(is_not_in_new_space),
-    is_cell_(false),
     boolean_value_(integer_value != 0),
+    is_undetectable_(false),
     int32_value_(integer_value),
-    double_value_(FastI2D(integer_value)) {
+    double_value_(FastI2D(integer_value)),
+    instance_type_(kUnknownInstanceType) {
   // It's possible to create a constant with a value in Smi-range but stored
   // in a (pre-existing) HeapNumber. See crbug.com/349878.
   bool could_be_heapobject = r.IsTagged() && !object.handle().is_null();
@@ -2579,12 +2612,12 @@
     has_int32_value_(IsInteger32(double_value)),
     has_double_value_(true),
     has_external_reference_value_(false),
-    is_internalized_string_(false),
     is_not_in_new_space_(is_not_in_new_space),
-    is_cell_(false),
     boolean_value_(double_value != 0 && !std::isnan(double_value)),
+    is_undetectable_(false),
     int32_value_(DoubleToInt32(double_value)),
-    double_value_(double_value) {
+    double_value_(double_value),
+    instance_type_(kUnknownInstanceType) {
   has_smi_value_ = has_int32_value_ && Smi::IsValid(int32_value_);
   // It's possible to create a constant with a value in Smi-range but stored
   // in a (pre-existing) HeapNumber. See crbug.com/349878.
@@ -2602,11 +2635,11 @@
     has_int32_value_(false),
     has_double_value_(false),
     has_external_reference_value_(true),
-    is_internalized_string_(false),
     is_not_in_new_space_(true),
-    is_cell_(false),
     boolean_value_(true),
-    external_reference_value_(reference) {
+    is_undetectable_(false),
+    external_reference_value_(reference),
+    instance_type_(kUnknownInstanceType) {
   Initialize(Representation::External());
 }
 
@@ -2705,10 +2738,10 @@
   return new(zone) HConstant(object_,
                              r,
                              type_,
-                             is_internalized_string_,
                              is_not_in_new_space_,
-                             is_cell_,
-                             boolean_value_);
+                             boolean_value_,
+                             is_undetectable_,
+                             instance_type_);
 }
 
 
@@ -3022,12 +3055,77 @@
 
 
 bool HCompareObjectEqAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
-  if (left()->IsConstant() && right()->IsConstant()) {
-    bool comparison_result =
-        HConstant::cast(left())->Equals(HConstant::cast(right()));
-    *block = comparison_result
-        ? FirstSuccessor()
-        : SecondSuccessor();
+  if (FLAG_fold_constants && left()->IsConstant() && right()->IsConstant()) {
+    *block = HConstant::cast(left())->Equals(HConstant::cast(right()))
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool ConstantIsObject(HConstant* constant, Isolate* isolate) {
+  if (constant->HasNumberValue()) return false;
+  if (constant->GetUnique().IsKnownGlobal(isolate->heap()->null_value())) {
+    return true;
+  }
+  if (constant->IsUndetectable()) return false;
+  InstanceType type = constant->GetInstanceType();
+  return (FIRST_NONCALLABLE_SPEC_OBJECT_TYPE <= type) &&
+         (type <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+}
+
+
+bool HIsObjectAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    *block = ConstantIsObject(HConstant::cast(value()), isolate())
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HIsStringAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    *block = HConstant::cast(value())->HasStringValue()
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HIsSmiAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    *block = HConstant::cast(value())->HasSmiValue()
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HIsUndetectableAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    *block = HConstant::cast(value())->IsUndetectable()
+        ? FirstSuccessor() : SecondSuccessor();
+    return true;
+  }
+  *block = NULL;
+  return false;
+}
+
+
+bool HHasInstanceTypeAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    InstanceType type = HConstant::cast(value())->GetInstanceType();
+    *block = (from_ <= type) && (type <= to_)
+        ? FirstSuccessor() : SecondSuccessor();
     return true;
   }
   *block = NULL;
@@ -3042,6 +3140,14 @@
 
 
 bool HCompareMinusZeroAndBranch::KnownSuccessorBlock(HBasicBlock** block) {
+  if (FLAG_fold_constants && value()->IsConstant()) {
+    HConstant* constant = HConstant::cast(value());
+    if (constant->HasDoubleValue()) {
+      *block = IsMinusZero(constant->DoubleValue())
+          ? FirstSuccessor() : SecondSuccessor();
+      return true;
+    }
+  }
   if (value()->representation().IsSmiOrInteger32()) {
     // A Smi or Integer32 cannot contain minus zero.
     *block = SecondSuccessor();
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 36ef717..e8ae22d 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -3451,8 +3451,8 @@
                                           bool is_not_in_new_space,
                                           HInstruction* instruction) {
     return instruction->Prepend(new(zone) HConstant(
-        unique, Representation::Tagged(), HType::Tagged(), false,
-        is_not_in_new_space, false, false));
+        unique, Representation::Tagged(), HType::Tagged(),
+        is_not_in_new_space, false, false, kUnknownInstanceType));
   }
 
   Handle<Object> handle(Isolate* isolate) {
@@ -3487,7 +3487,7 @@
   bool ImmortalImmovable() const;
 
   bool IsCell() const {
-    return is_cell_;
+    return instance_type_ == CELL_TYPE || instance_type_ == PROPERTY_CELL_TYPE;
   }
 
   virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
@@ -3535,14 +3535,14 @@
   bool HasStringValue() const {
     if (has_double_value_ || has_int32_value_) return false;
     ASSERT(!object_.handle().is_null());
-    return type_.IsString();
+    return instance_type_ < FIRST_NONSTRING_TYPE;
   }
   Handle<String> StringValue() const {
     ASSERT(HasStringValue());
     return Handle<String>::cast(object_.handle());
   }
   bool HasInternalizedStringValue() const {
-    return HasStringValue() && is_internalized_string_;
+    return HasStringValue() && StringShape(instance_type_).IsInternalized();
   }
 
   bool HasExternalReferenceValue() const {
@@ -3554,6 +3554,8 @@
 
   bool HasBooleanValue() const { return type_.IsBoolean(); }
   bool BooleanValue() const { return boolean_value_; }
+  bool IsUndetectable() const { return is_undetectable_; }
+  InstanceType GetInstanceType() const { return instance_type_; }
 
   virtual intptr_t Hashcode() V8_OVERRIDE {
     if (has_int32_value_) {
@@ -3630,10 +3632,10 @@
   HConstant(Unique<Object> unique,
             Representation r,
             HType type,
-            bool is_internalized_string,
             bool is_not_in_new_space,
-            bool is_cell,
-            bool boolean_value);
+            bool boolean_value,
+            bool is_undetectable,
+            InstanceType instance_type);
 
   explicit HConstant(ExternalReference reference);
 
@@ -3656,13 +3658,15 @@
   bool has_int32_value_ : 1;
   bool has_double_value_ : 1;
   bool has_external_reference_value_ : 1;
-  bool is_internalized_string_ : 1;  // TODO(yangguo): make this part of HType.
   bool is_not_in_new_space_ : 1;
-  bool is_cell_ : 1;
   bool boolean_value_ : 1;
+  bool is_undetectable_: 1;
   int32_t int32_value_;
   double double_value_;
   ExternalReference external_reference_value_;
+
+  static const InstanceType kUnknownInstanceType = FILLER_TYPE;
+  InstanceType instance_type_;
 };
 
 
@@ -4328,6 +4332,8 @@
     return Representation::Tagged();
   }
 
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
 
  private:
@@ -4348,6 +4354,8 @@
     return Representation::Tagged();
   }
 
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
 
  protected:
@@ -4373,6 +4381,8 @@
     return Representation::Tagged();
   }
 
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
  protected:
   virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
   virtual int RedefinedOperandIndex() { return 0; }
@@ -4395,6 +4405,8 @@
     return Representation::Tagged();
   }
 
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
   DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
 
  private:
@@ -4477,6 +4489,8 @@
     return Representation::Tagged();
   }
 
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
 
  private:
@@ -4558,8 +4572,7 @@
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
 
-  Handle<String> type_literal() { return type_literal_; }
-  bool compares_number_type() { return compares_number_type_; }
+  Handle<String> type_literal() { return type_literal_.handle(); }
   virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
@@ -4570,16 +4583,16 @@
 
   virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
 
+  virtual void FinalizeUniqueness() V8_OVERRIDE {
+    type_literal_ = Unique<String>(type_literal_.handle());
+  }
+
  private:
   HTypeofIsAndBranch(HValue* value, Handle<String> type_literal)
       : HUnaryControlInstruction(value, NULL, NULL),
-        type_literal_(type_literal) {
-    Heap* heap = type_literal->GetHeap();
-    compares_number_type_ = type_literal->Equals(heap->number_string());
-  }
+        type_literal_(Unique<String>::CreateUninitialized(type_literal)) { }
 
-  Handle<String> type_literal_;
-  bool compares_number_type_ : 1;
+  Unique<String> type_literal_;
 };
 
 
diff --git a/src/hydrogen-minus-zero.cc b/src/hydrogen-minus-zero.cc
deleted file mode 100644
index b1b1ed5..0000000
--- a/src/hydrogen-minus-zero.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "hydrogen-minus-zero.h"
-
-namespace v8 {
-namespace internal {
-
-void HComputeMinusZeroChecksPhase::Run() {
-  const ZoneList<HBasicBlock*>* blocks(graph()->blocks());
-  for (int i = 0; i < blocks->length(); ++i) {
-    for (HInstructionIterator it(blocks->at(i)); !it.Done(); it.Advance()) {
-      HInstruction* current = it.Current();
-      if (current->IsChange()) {
-        HChange* change = HChange::cast(current);
-        // Propagate flags for negative zero checks upwards from conversions
-        // int32-to-tagged and int32-to-double.
-        Representation from = change->value()->representation();
-        ASSERT(from.Equals(change->from()));
-        if (from.IsSmiOrInteger32()) {
-          ASSERT(change->to().IsTagged() ||
-                 change->to().IsDouble() ||
-                 change->to().IsSmiOrInteger32());
-          PropagateMinusZeroChecks(change->value());
-        }
-      } else if (current->IsCompareMinusZeroAndBranch()) {
-        HCompareMinusZeroAndBranch* check =
-            HCompareMinusZeroAndBranch::cast(current);
-        if (check->value()->representation().IsSmiOrInteger32()) {
-          PropagateMinusZeroChecks(check->value());
-        }
-      }
-    }
-  }
-}
-
-
-void HComputeMinusZeroChecksPhase::PropagateMinusZeroChecks(HValue* value) {
-  ASSERT(worklist_.is_empty());
-  ASSERT(in_worklist_.IsEmpty());
-
-  AddToWorklist(value);
-  while (!worklist_.is_empty()) {
-    value = worklist_.RemoveLast();
-
-    if (value->IsPhi()) {
-      // For phis, we must propagate the check to all of its inputs.
-      HPhi* phi = HPhi::cast(value);
-      for (int i = 0; i < phi->OperandCount(); ++i) {
-        AddToWorklist(phi->OperandAt(i));
-      }
-    } else if (value->IsUnaryMathOperation()) {
-      HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
-      if (instr->representation().IsSmiOrInteger32() &&
-          !instr->value()->representation().Equals(instr->representation())) {
-        if (instr->value()->range() == NULL ||
-            instr->value()->range()->CanBeMinusZero()) {
-          instr->SetFlag(HValue::kBailoutOnMinusZero);
-        }
-      }
-      if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
-          instr->representation().Equals(
-              instr->RequiredInputRepresentation(0))) {
-        AddToWorklist(instr->value());
-      }
-    } else if (value->IsChange()) {
-      HChange* instr = HChange::cast(value);
-      if (!instr->from().IsSmiOrInteger32() &&
-          !instr->CanTruncateToInt32() &&
-          (instr->value()->range() == NULL ||
-           instr->value()->range()->CanBeMinusZero())) {
-        instr->SetFlag(HValue::kBailoutOnMinusZero);
-      }
-    } else if (value->IsForceRepresentation()) {
-      HForceRepresentation* instr = HForceRepresentation::cast(value);
-      AddToWorklist(instr->value());
-    } else if (value->IsMod()) {
-      HMod* instr = HMod::cast(value);
-      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
-        instr->SetFlag(HValue::kBailoutOnMinusZero);
-        AddToWorklist(instr->left());
-      }
-    } else if (value->IsDiv() || value->IsMul()) {
-      HBinaryOperation* instr = HBinaryOperation::cast(value);
-      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
-        instr->SetFlag(HValue::kBailoutOnMinusZero);
-      }
-      AddToWorklist(instr->right());
-      AddToWorklist(instr->left());
-    } else if (value->IsMathFloorOfDiv()) {
-      HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
-      instr->SetFlag(HValue::kBailoutOnMinusZero);
-    } else if (value->IsAdd() || value->IsSub()) {
-      HBinaryOperation* instr = HBinaryOperation::cast(value);
-      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
-        // Propagate to the left argument. If the left argument cannot be -0,
-        // then the result of the add/sub operation cannot be either.
-        AddToWorklist(instr->left());
-      }
-    } else if (value->IsMathMinMax()) {
-      HMathMinMax* instr = HMathMinMax::cast(value);
-      AddToWorklist(instr->right());
-      AddToWorklist(instr->left());
-    }
-  }
-
-  in_worklist_.Clear();
-  ASSERT(in_worklist_.IsEmpty());
-  ASSERT(worklist_.is_empty());
-}
-
-} }  // namespace v8::internal
diff --git a/src/hydrogen-minus-zero.h b/src/hydrogen-minus-zero.h
deleted file mode 100644
index 7b74ab9..0000000
--- a/src/hydrogen-minus-zero.h
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_HYDROGEN_MINUS_ZERO_H_
-#define V8_HYDROGEN_MINUS_ZERO_H_
-
-#include "hydrogen.h"
-
-namespace v8 {
-namespace internal {
-
-
-class HComputeMinusZeroChecksPhase : public HPhase {
- public:
-  explicit HComputeMinusZeroChecksPhase(HGraph* graph)
-      : HPhase("H_Compute minus zero checks", graph),
-        in_worklist_(graph->GetMaximumValueID(), zone()),
-        worklist_(32, zone()) {}
-
-  void Run();
-
- private:
-  void AddToWorklist(HValue* value) {
-    if (value->CheckFlag(HValue::kBailoutOnMinusZero)) return;
-    if (in_worklist_.Contains(value->id())) return;
-    in_worklist_.Add(value->id());
-    worklist_.Add(value, zone());
-  }
-  void PropagateMinusZeroChecks(HValue* value);
-
-  BitVector in_worklist_;
-  ZoneList<HValue*> worklist_;
-
-  DISALLOW_COPY_AND_ASSIGN(HComputeMinusZeroChecksPhase);
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_HYDROGEN_MINUS_ZERO_H_
diff --git a/src/hydrogen-range-analysis.cc b/src/hydrogen-range-analysis.cc
index 76fd5f3..9d58fc8 100644
--- a/src/hydrogen-range-analysis.cc
+++ b/src/hydrogen-range-analysis.cc
@@ -78,7 +78,29 @@
 
     // Go through all instructions of the current block.
     for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
-      InferRange(it.Current());
+      HValue* value = it.Current();
+      InferRange(value);
+
+      // Compute the bailout-on-minus-zero flag.
+      if (value->IsChange()) {
+        HChange* instr = HChange::cast(value);
+        // Propagate flags for negative zero checks upwards from conversions
+        // int32-to-tagged and int32-to-double.
+        Representation from = instr->value()->representation();
+        ASSERT(from.Equals(instr->from()));
+        if (from.IsSmiOrInteger32()) {
+          ASSERT(instr->to().IsTagged() ||
+                instr->to().IsDouble() ||
+                instr->to().IsSmiOrInteger32());
+          PropagateMinusZeroChecks(instr->value());
+        }
+      } else if (value->IsCompareMinusZeroAndBranch()) {
+        HCompareMinusZeroAndBranch* instr =
+            HCompareMinusZeroAndBranch::cast(value);
+        if (instr->value()->representation().IsSmiOrInteger32()) {
+          PropagateMinusZeroChecks(instr->value());
+        }
+      }
     }
 
     // Continue analysis in all dominated blocks.
@@ -197,4 +219,79 @@
 }
 
 
+void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
+  ASSERT(worklist_.is_empty());
+  ASSERT(in_worklist_.IsEmpty());
+
+  AddToWorklist(value);
+  while (!worklist_.is_empty()) {
+    value = worklist_.RemoveLast();
+
+    if (value->IsPhi()) {
+      // For phis, we must propagate the check to all of its inputs.
+      HPhi* phi = HPhi::cast(value);
+      for (int i = 0; i < phi->OperandCount(); ++i) {
+        AddToWorklist(phi->OperandAt(i));
+      }
+    } else if (value->IsUnaryMathOperation()) {
+      HUnaryMathOperation* instr = HUnaryMathOperation::cast(value);
+      if (instr->representation().IsSmiOrInteger32() &&
+          !instr->value()->representation().Equals(instr->representation())) {
+        if (instr->value()->range() == NULL ||
+            instr->value()->range()->CanBeMinusZero()) {
+          instr->SetFlag(HValue::kBailoutOnMinusZero);
+        }
+      }
+      if (instr->RequiredInputRepresentation(0).IsSmiOrInteger32() &&
+          instr->representation().Equals(
+              instr->RequiredInputRepresentation(0))) {
+        AddToWorklist(instr->value());
+      }
+    } else if (value->IsChange()) {
+      HChange* instr = HChange::cast(value);
+      if (!instr->from().IsSmiOrInteger32() &&
+          !instr->CanTruncateToInt32() &&
+          (instr->value()->range() == NULL ||
+           instr->value()->range()->CanBeMinusZero())) {
+        instr->SetFlag(HValue::kBailoutOnMinusZero);
+      }
+    } else if (value->IsForceRepresentation()) {
+      HForceRepresentation* instr = HForceRepresentation::cast(value);
+      AddToWorklist(instr->value());
+    } else if (value->IsMod()) {
+      HMod* instr = HMod::cast(value);
+      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+        instr->SetFlag(HValue::kBailoutOnMinusZero);
+        AddToWorklist(instr->left());
+      }
+    } else if (value->IsDiv() || value->IsMul()) {
+      HBinaryOperation* instr = HBinaryOperation::cast(value);
+      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+        instr->SetFlag(HValue::kBailoutOnMinusZero);
+      }
+      AddToWorklist(instr->right());
+      AddToWorklist(instr->left());
+    } else if (value->IsMathFloorOfDiv()) {
+      HMathFloorOfDiv* instr = HMathFloorOfDiv::cast(value);
+      instr->SetFlag(HValue::kBailoutOnMinusZero);
+    } else if (value->IsAdd() || value->IsSub()) {
+      HBinaryOperation* instr = HBinaryOperation::cast(value);
+      if (instr->range() == NULL || instr->range()->CanBeMinusZero()) {
+        // Propagate to the left argument. If the left argument cannot be -0,
+        // then the result of the add/sub operation cannot be either.
+        AddToWorklist(instr->left());
+      }
+    } else if (value->IsMathMinMax()) {
+      HMathMinMax* instr = HMathMinMax::cast(value);
+      AddToWorklist(instr->right());
+      AddToWorklist(instr->left());
+    }
+  }
+
+  in_worklist_.Clear();
+  ASSERT(in_worklist_.IsEmpty());
+  ASSERT(worklist_.is_empty());
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/hydrogen-range-analysis.h b/src/hydrogen-range-analysis.h
index a1e9737..e0cc3c5 100644
--- a/src/hydrogen-range-analysis.h
+++ b/src/hydrogen-range-analysis.h
@@ -37,7 +37,9 @@
 class HRangeAnalysisPhase : public HPhase {
  public:
   explicit HRangeAnalysisPhase(HGraph* graph)
-      : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()) { }
+      : HPhase("H_Range analysis", graph), changed_ranges_(16, zone()),
+        in_worklist_(graph->GetMaximumValueID(), zone()),
+        worklist_(32, zone()) {}
 
   void Run();
 
@@ -49,8 +51,19 @@
   void InferRange(HValue* value);
   void RollBackTo(int index);
   void AddRange(HValue* value, Range* range);
+  void AddToWorklist(HValue* value) {
+    if (in_worklist_.Contains(value->id())) return;
+    in_worklist_.Add(value->id());
+    worklist_.Add(value, zone());
+  }
+  void PropagateMinusZeroChecks(HValue* value);
 
   ZoneList<HValue*> changed_ranges_;
+
+  BitVector in_worklist_;
+  ZoneList<HValue*> worklist_;
+
+  DISALLOW_COPY_AND_ASSIGN(HRangeAnalysisPhase);
 };
 
 
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 25fc749..4dd9955 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -48,7 +48,6 @@
 #include "hydrogen-gvn.h"
 #include "hydrogen-mark-deoptimize.h"
 #include "hydrogen-mark-unreachable.h"
-#include "hydrogen-minus-zero.h"
 #include "hydrogen-osr.h"
 #include "hydrogen-range-analysis.h"
 #include "hydrogen-redundant-phi.h"
@@ -708,10 +707,10 @@
         Unique<Object>::CreateImmovable(isolate()->factory()->name##_value()), \
         Representation::Tagged(),                                              \
         htype,                                                                 \
-        false,                                                                 \
         true,                                                                  \
+        boolean_value,                                                         \
         false,                                                                 \
-        boolean_value);                                                        \
+        ODDBALL_TYPE);                                                         \
     constant->InsertAfter(entry_block()->first());                             \
     constant_##name##_.set(constant);                                          \
   }                                                                            \
@@ -4049,10 +4048,9 @@
 
   if (FLAG_check_elimination) Run<HCheckEliminationPhase>();
 
-  if (FLAG_use_range) Run<HRangeAnalysisPhase>();
+  Run<HRangeAnalysisPhase>();
 
   Run<HComputeChangeUndefinedToNaN>();
-  Run<HComputeMinusZeroChecksPhase>();
 
   // Eliminate redundant stack checks on backwards branches.
   Run<HStackCheckEliminationPhase>();
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index ee5d991..0c35197 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -97,6 +97,12 @@
 }
 
 
+Address RelocInfo::constant_pool_entry_address() {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 int RelocInfo::target_address_size() {
   return Assembler::kSpecialTargetSize;
 }
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 48f0f13..acc36f4 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -160,6 +160,11 @@
 }
 
 
+bool RelocInfo::IsInConstantPool() {
+  return false;
+}
+
+
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   // Patch the code at the current address with the supplied instructions.
   for (int i = 0; i < instruction_count; i++) {
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index cbad030..56d850b 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -1401,6 +1401,37 @@
 }
 
 
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  ASSERT(ToRegister(instr->result()).is(eax));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
+
+  __ FlooringDiv(dividend, Abs(divisor));
+  __ mov(eax, dividend);
+  __ shr(eax, 31);
+  __ add(edx, eax);
+  __ imul(edx, edx, Abs(divisor));
+  __ mov(eax, dividend);
+  __ sub(eax, edx);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hmod->left()->CanBeNegative()) {
+    Label remainder_not_zero;
+    __ j(not_zero, &remainder_not_zero, Label::kNear);
+    __ cmp(dividend, Immediate(0));
+    DeoptimizeIf(less, instr->environment());
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
 void LCodeGen::DoModI(LModI* instr) {
   HMod* hmod = instr->hydrogen();
   HValue* left = hmod->left();
@@ -1500,6 +1531,39 @@
 }
 
 
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  ASSERT(ToRegister(instr->result()).is(edx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    __ test(dividend, dividend);
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  __ FlooringDiv(dividend, Abs(divisor));
+  __ mov(eax, dividend);
+  __ shr(eax, 31);
+  __ add(edx, eax);
+  if (divisor < 0) __ neg(edx);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ mov(eax, edx);
+    __ imul(eax, eax, divisor);
+    __ sub(eax, dividend);
+    DeoptimizeIf(not_equal, instr->environment());
+  }
+}
+
+
 void LCodeGen::DoDivI(LDivI* instr) {
   Register dividend = ToRegister(instr->left());
   Register divisor = ToRegister(instr->right());
@@ -1599,8 +1663,6 @@
 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  Register scratch = ToRegister(instr->temp());
-  ASSERT(ToRegister(instr->dividend()).is(eax));
   ASSERT(ToRegister(instr->result()).is(edx));
 
   if (divisor == 0) {
@@ -1608,52 +1670,15 @@
     return;
   }
 
-  // Find b which: 2^b < divisor_abs < 2^(b+1).
-  uint32_t divisor_abs = abs(divisor);
-  unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
-  unsigned shift = 32 + b;  // Precision +1bit (effectively).
-  double multiplier_f =
-      static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
-  int64_t multiplier;
-  if (multiplier_f - std::floor(multiplier_f) < 0.5) {
-    multiplier = static_cast<int64_t>(std::floor(multiplier_f));
-  } else {
-    multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
-  }
-  // The multiplier is a uint32.
-  ASSERT(multiplier > 0 &&
-         multiplier < (static_cast<int64_t>(1) << 32));
-  __ mov(scratch, dividend);
-  if (divisor < 0 &&
-      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
     __ test(dividend, dividend);
     DeoptimizeIf(zero, instr->environment());
   }
-  __ mov(edx, static_cast<int32_t>(multiplier));
-  __ imul(edx);
-  if (static_cast<int32_t>(multiplier) < 0) {
-    __ add(edx, scratch);
-  }
-  Register reg_lo = eax;
-  Register reg_byte_scratch = scratch;
-  if (!reg_byte_scratch.is_byte_register()) {
-    __ xchg(reg_lo, reg_byte_scratch);
-    reg_lo = scratch;
-    reg_byte_scratch = eax;
-  }
-  if (divisor < 0) {
-    __ xor_(reg_byte_scratch, reg_byte_scratch);
-    __ cmp(reg_lo, 0x40000000);
-    __ setcc(above, reg_byte_scratch);
-    __ neg(edx);
-    __ sub(edx, reg_byte_scratch);
-  } else {
-    __ xor_(reg_byte_scratch, reg_byte_scratch);
-    __ cmp(reg_lo, 0xC0000000);
-    __ setcc(above_equal, reg_byte_scratch);
-    __ add(edx, reg_byte_scratch);
-  }
-  __ sar(edx, shift - 32);
+
+  __ FlooringDiv(dividend, divisor);
 }
 
 
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index b513f85..c4ad7e8 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -1340,6 +1340,26 @@
 }
 
 
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LInstruction* result =
+      DefineFixed(
+          new(zone()) LDivByConstI(dividend, divisor, temp1, temp2), edx);
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32);
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1347,14 +1367,21 @@
   LOperand* dividend = UseFixed(instr->left(), eax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(edx);
-  LDivI* result = new(zone()) LDivI(dividend, divisor, temp);
-  return AssignEnvironment(DefineFixed(result, eax));
+  LInstruction* result =
+      DefineFixed(new(zone()) LDivI(dividend, divisor, temp), eax);
+  return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    return instr->RightIsPowerOf2() ? DoDivByPowerOf2I(instr) : DoDivI(instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
   } else {
@@ -1376,13 +1403,23 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  LOperand* dividend = UseFixed(instr->left(), eax);
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
-  LOperand* temp = TempRegister();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
   LInstruction* result =
-      DefineFixed(
-          new(zone()) LFlooringDivByConstI(dividend, divisor, temp), edx);
-  bool can_deopt = divisor <= 0;
+      DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+                                                   divisor,
+                                                   temp1,
+                                                   temp2),
+                  edx);
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0);
   return can_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -1413,6 +1450,25 @@
 }
 
 
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  ASSERT(instr->representation().IsSmiOrInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(eax);
+  LOperand* temp2 = FixedTemp(edx);
+  LInstruction* result =
+      DefineFixed(
+          new(zone()) LModByConstI(dividend, divisor, temp1, temp2), eax);
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->CanBeNegative());
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1435,7 +1491,13 @@
 
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MOD, instr);
   } else {
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index 03144b2..ac9cffa 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -88,6 +88,7 @@
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
+  V(DivByConstI)                                \
   V(DivByPowerOf2I)                             \
   V(DivI)                                       \
   V(DoubleBits)                                 \
@@ -139,6 +140,7 @@
   V(MathPowHalf)                                \
   V(MathRound)                                  \
   V(MathSqrt)                                   \
+  V(ModByConstI)                                \
   V(ModByPowerOf2I)                             \
   V(ModI)                                       \
   V(MulI)                                       \
@@ -657,6 +659,31 @@
 };
 
 
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LModByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -692,6 +719,31 @@
 };
 
 
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDivByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -730,17 +782,22 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
-  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+  LFlooringDivByConstI(LOperand* dividend,
+                       int32_t divisor,
+                       LOperand* temp1,
+                       LOperand* temp2) {
     inputs_[0] = dividend;
     divisor_ = divisor;
-    temps_[0] = temp;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   LOperand* dividend() { return inputs_[0]; }
   int32_t divisor() const { return divisor_; }
-  LOperand* temp() { return temps_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
 
   DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
   DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
@@ -2720,8 +2777,10 @@
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
   LInstruction* DoMathClz32(HUnaryMathOperation* instr);
   LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
   LInstruction* DoDivI(HBinaryOperation* instr);
   LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
   LInstruction* DoModI(HMod* instr);
   LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
   LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index cc0f392..2e5b835 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -3603,6 +3603,19 @@
   j(not_equal, &loop_again);
 }
 
+
+void MacroAssembler::FlooringDiv(Register dividend, int32_t divisor) {
+  ASSERT(!dividend.is(eax));
+  ASSERT(!dividend.is(edx));
+  MultiplierAndShift ms(divisor);
+  mov(eax, Immediate(ms.multiplier()));
+  imul(dividend);
+  if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
+  if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
+  if (ms.shift() > 0) sar(edx, ms.shift());
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 6b0573c..dafa65d 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -855,6 +855,10 @@
   // Insert code to verify that the x87 stack has the specified depth (0-7)
   void VerifyX87StackDepth(uint32_t depth);
 
+  // Emit code for a flooring division by a constant. The dividend register is
+  // unchanged, the result is in edx, and eax gets clobbered.
+  void FlooringDiv(Register dividend, int32_t divisor);
+
   // ---------------------------------------------------------------------------
   // StatsCounter support
 
diff --git a/src/ic.cc b/src/ic.cc
index 55f21c7..033246f 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -1072,7 +1072,7 @@
     maybe_object = LoadIC::Load(object, Handle<String>::cast(key));
     if (maybe_object->IsFailure()) return maybe_object;
   } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
-    ASSERT(!object->IsJSGlobalProxy());
+    ASSERT(!object->IsAccessCheckNeeded());
     if (object->IsString() && key->IsNumber()) {
       if (state() == UNINITIALIZED) stub = string_stub();
     } else if (object->IsJSObject()) {
@@ -1111,22 +1111,20 @@
   Handle<JSObject> holder = receiver;
   receiver->Lookup(*name, lookup);
   if (lookup->IsFound()) {
-    if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
-
-    if (lookup->holder() == *receiver) {
-      if (lookup->IsInterceptor() && !HasInterceptorSetter(*receiver)) {
-        receiver->LocalLookupRealNamedProperty(*name, lookup);
-        return lookup->IsFound() &&
-            !lookup->IsReadOnly() &&
-            lookup->CanHoldValue(value) &&
-            lookup->IsCacheable();
-      }
-      return lookup->CanHoldValue(value);
+    if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
+      receiver->LocalLookupRealNamedProperty(*name, lookup);
+      if (!lookup->IsFound()) return false;
     }
 
+    if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
+    if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
     if (lookup->IsPropertyCallbacks()) return true;
-    // JSGlobalProxy always goes via the runtime, so it's safe to cache.
-    if (receiver->IsJSGlobalProxy()) return true;
+    // JSGlobalProxy either stores on the global object in the prototype, or
+    // goes into the runtime if access checks are needed, so this is always
+    // safe.
+    if (receiver->IsJSGlobalProxy()) {
+      return lookup->holder() == receiver->GetPrototype();
+    }
     // Currently normal holders in the prototype chain are not supported. They
     // would require a runtime positive lookup and verification that the details
     // have not changed.
@@ -1311,7 +1309,7 @@
                                      Handle<String> name,
                                      Handle<Object> value,
                                      InlineCacheHolderFlag cache_holder) {
-  if (object->IsJSGlobalProxy()) return slow_stub();
+  if (object->IsAccessCheckNeeded()) return slow_stub();
   ASSERT(cache_holder == OWN_MAP);
   // This is currently guaranteed by checks in StoreIC::Store.
   Handle<JSObject> receiver = Handle<JSObject>::cast(object);
@@ -1335,17 +1333,19 @@
     }
     case NORMAL:
       if (kind() == Code::KEYED_STORE_IC) break;
-      if (receiver->IsGlobalObject()) {
+      if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
         // The stub generated for the global object picks the value directly
         // from the property cell. So the property must be directly on the
         // global object.
-        Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+        Handle<GlobalObject> global = receiver->IsJSGlobalProxy()
+            ? handle(GlobalObject::cast(receiver->GetPrototype()))
+            : Handle<GlobalObject>::cast(receiver);
         Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
         Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
-        StoreGlobalStub stub(union_type->IsConstant());
-
+        StoreGlobalStub stub(
+            union_type->IsConstant(), receiver->IsJSGlobalProxy());
         Handle<Code> code = stub.GetCodeCopyFromTemplate(
-            isolate(), receiver->map(), *cell);
+            isolate(), *global, *cell);
         // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
         HeapObject::UpdateMapCodeCache(receiver, name, code);
         return code;
@@ -1385,7 +1385,7 @@
     }
     case INTERCEPTOR:
       if (kind() == Code::KEYED_STORE_IC) break;
-      ASSERT(HasInterceptorSetter(*receiver));
+      ASSERT(HasInterceptorSetter(*holder));
       return compiler.CompileStoreInterceptor(receiver, name);
     case CONSTANT:
       break;
@@ -1681,7 +1681,7 @@
     }
 
     if (use_ic) {
-      ASSERT(!object->IsJSGlobalProxy());
+      ASSERT(!object->IsAccessCheckNeeded());
 
       if (object->IsJSObject()) {
         Handle<JSObject> receiver = Handle<JSObject>::cast(object);
diff --git a/src/isolate.cc b/src/isolate.cc
index 9dd2c5f..8c1c2c1 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -778,7 +778,7 @@
 
 bool Isolate::MayNamedAccess(JSObject* receiver, Object* key,
                              v8::AccessType type) {
-  ASSERT(receiver->IsAccessCheckNeeded());
+  ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
 
   // The callers of this method are not expecting a GC.
   DisallowHeapAllocation no_gc;
@@ -829,7 +829,7 @@
 bool Isolate::MayIndexedAccess(JSObject* receiver,
                                uint32_t index,
                                v8::AccessType type) {
-  ASSERT(receiver->IsAccessCheckNeeded());
+  ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
   // Check for compatibility between the security tokens in the
   // current lexical context and the accessed object.
   ASSERT(context());
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index d5d871e..8a1476a 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -50,12 +50,9 @@
 class LPlatformChunk;
 class LOperand;
 class LUnallocated;
-class LConstantOperand;
 class LGap;
 class LParallelMove;
 class LPointerMap;
-class LStackSlot;
-class LRegister;
 
 
 // This class represents a single point of a LOperand's lifetime.
diff --git a/src/lithium.cc b/src/lithium.cc
index d3b49b5..9ccdc7a 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -114,33 +114,37 @@
   }
 }
 
-#define DEFINE_OPERAND_CACHE(name, type)                      \
-  L##name* L##name::cache = NULL;                             \
-                                                              \
-  void L##name::SetUpCache() {                                \
-    if (cache) return;                                        \
-    cache = new L##name[kNumCachedOperands];                  \
-    for (int i = 0; i < kNumCachedOperands; i++) {            \
-      cache[i].ConvertTo(type, i);                            \
-    }                                                         \
-  }                                                           \
-                                                              \
-  void L##name::TearDownCache() {                             \
-    delete[] cache;                                           \
-  }
 
-LITHIUM_OPERAND_LIST(DEFINE_OPERAND_CACHE)
-#undef DEFINE_OPERAND_CACHE
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+LSubKindOperand<kOperandKind, kNumCachedOperands>*
+LSubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+  if (cache) return;
+  cache = new LSubKindOperand[kNumCachedOperands];
+  for (int i = 0; i < kNumCachedOperands; i++) {
+    cache[i].ConvertTo(kOperandKind, i);
+  }
+}
+
+
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+  delete[] cache;
+}
+
 
 void LOperand::SetUpCaches() {
-#define LITHIUM_OPERAND_SETUP(name, type) L##name::SetUpCache();
+#define LITHIUM_OPERAND_SETUP(name, type, number) L##name::SetUpCache();
   LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_SETUP)
 #undef LITHIUM_OPERAND_SETUP
 }
 
 
 void LOperand::TearDownCaches() {
-#define LITHIUM_OPERAND_TEARDOWN(name, type) L##name::TearDownCache();
+#define LITHIUM_OPERAND_TEARDOWN(name, type, number) L##name::TearDownCache();
   LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_TEARDOWN)
 #undef LITHIUM_OPERAND_TEARDOWN
 }
diff --git a/src/lithium.h b/src/lithium.h
index 439479c..8ae5b87 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -35,12 +35,12 @@
 namespace v8 {
 namespace internal {
 
-#define LITHIUM_OPERAND_LIST(V)         \
-  V(ConstantOperand, CONSTANT_OPERAND)  \
-  V(StackSlot,       STACK_SLOT)        \
-  V(DoubleStackSlot, DOUBLE_STACK_SLOT) \
-  V(Register,        REGISTER)          \
-  V(DoubleRegister,  DOUBLE_REGISTER)
+#define LITHIUM_OPERAND_LIST(V)               \
+  V(ConstantOperand, CONSTANT_OPERAND,  128)  \
+  V(StackSlot,       STACK_SLOT,        128)  \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)  \
+  V(Register,        REGISTER,          16)   \
+  V(DoubleRegister,  DOUBLE_REGISTER,   16)
 
 
 class LOperand : public ZoneObject {
@@ -59,11 +59,11 @@
 
   Kind kind() const { return KindField::decode(value_); }
   int index() const { return static_cast<int>(value_) >> kKindFieldWidth; }
-#define LITHIUM_OPERAND_PREDICATE(name, type) \
+#define LITHIUM_OPERAND_PREDICATE(name, type, number) \
   bool Is##name() const { return kind() == type; }
   LITHIUM_OPERAND_LIST(LITHIUM_OPERAND_PREDICATE)
-  LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED)
-  LITHIUM_OPERAND_PREDICATE(Ignored, INVALID)
+  LITHIUM_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+  LITHIUM_OPERAND_PREDICATE(Ignored, INVALID, 0)
 #undef LITHIUM_OPERAND_PREDICATE
   bool Equals(LOperand* other) const { return value_ == other->value_; }
 
@@ -315,129 +315,35 @@
 };
 
 
-class LConstantOperand V8_FINAL : public LOperand {
+template<LOperand::Kind kOperandKind, int kNumCachedOperands>
+class LSubKindOperand V8_FINAL : public LOperand {
  public:
-  static LConstantOperand* Create(int index, Zone* zone) {
+  static LSubKindOperand* Create(int index, Zone* zone) {
     ASSERT(index >= 0);
     if (index < kNumCachedOperands) return &cache[index];
-    return new(zone) LConstantOperand(index);
+    return new(zone) LSubKindOperand(index);
   }
 
-  static LConstantOperand* cast(LOperand* op) {
-    ASSERT(op->IsConstantOperand());
-    return reinterpret_cast<LConstantOperand*>(op);
+  static LSubKindOperand* cast(LOperand* op) {
+    ASSERT(op->kind() == kOperandKind);
+    return reinterpret_cast<LSubKindOperand*>(op);
   }
 
   static void SetUpCache();
   static void TearDownCache();
 
  private:
-  static const int kNumCachedOperands = 128;
-  static LConstantOperand* cache;
+  static LSubKindOperand* cache;
 
-  LConstantOperand() : LOperand() { }
-  explicit LConstantOperand(int index) : LOperand(CONSTANT_OPERAND, index) { }
+  LSubKindOperand() : LOperand() { }
+  explicit LSubKindOperand(int index) : LOperand(kOperandKind, index) { }
 };
 
 
-class LStackSlot V8_FINAL : public LOperand {
- public:
-  static LStackSlot* Create(int index, Zone* zone) {
-    ASSERT(index >= 0);
-    if (index < kNumCachedOperands) return &cache[index];
-    return new(zone) LStackSlot(index);
-  }
-
-  static LStackSlot* cast(LOperand* op) {
-    ASSERT(op->IsStackSlot());
-    return reinterpret_cast<LStackSlot*>(op);
-  }
-
-  static void SetUpCache();
-  static void TearDownCache();
-
- private:
-  static const int kNumCachedOperands = 128;
-  static LStackSlot* cache;
-
-  LStackSlot() : LOperand() { }
-  explicit LStackSlot(int index) : LOperand(STACK_SLOT, index) { }
-};
-
-
-class LDoubleStackSlot V8_FINAL : public LOperand {
- public:
-  static LDoubleStackSlot* Create(int index, Zone* zone) {
-    ASSERT(index >= 0);
-    if (index < kNumCachedOperands) return &cache[index];
-    return new(zone) LDoubleStackSlot(index);
-  }
-
-  static LDoubleStackSlot* cast(LOperand* op) {
-    ASSERT(op->IsStackSlot());
-    return reinterpret_cast<LDoubleStackSlot*>(op);
-  }
-
-  static void SetUpCache();
-  static void TearDownCache();
-
- private:
-  static const int kNumCachedOperands = 128;
-  static LDoubleStackSlot* cache;
-
-  LDoubleStackSlot() : LOperand() { }
-  explicit LDoubleStackSlot(int index) : LOperand(DOUBLE_STACK_SLOT, index) { }
-};
-
-
-class LRegister V8_FINAL : public LOperand {
- public:
-  static LRegister* Create(int index, Zone* zone) {
-    ASSERT(index >= 0);
-    if (index < kNumCachedOperands) return &cache[index];
-    return new(zone) LRegister(index);
-  }
-
-  static LRegister* cast(LOperand* op) {
-    ASSERT(op->IsRegister());
-    return reinterpret_cast<LRegister*>(op);
-  }
-
-  static void SetUpCache();
-  static void TearDownCache();
-
- private:
-  static const int kNumCachedOperands = 16;
-  static LRegister* cache;
-
-  LRegister() : LOperand() { }
-  explicit LRegister(int index) : LOperand(REGISTER, index) { }
-};
-
-
-class LDoubleRegister V8_FINAL : public LOperand {
- public:
-  static LDoubleRegister* Create(int index, Zone* zone) {
-    ASSERT(index >= 0);
-    if (index < kNumCachedOperands) return &cache[index];
-    return new(zone) LDoubleRegister(index);
-  }
-
-  static LDoubleRegister* cast(LOperand* op) {
-    ASSERT(op->IsDoubleRegister());
-    return reinterpret_cast<LDoubleRegister*>(op);
-  }
-
-  static void SetUpCache();
-  static void TearDownCache();
-
- private:
-  static const int kNumCachedOperands = 16;
-  static LDoubleRegister* cache;
-
-  LDoubleRegister() : LOperand() { }
-  explicit LDoubleRegister(int index) : LOperand(DOUBLE_REGISTER, index) { }
-};
+#define LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number)   \
+typedef LSubKindOperand<LOperand::type, number> L##name;
+LITHIUM_OPERAND_LIST(LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
 
 
 class LParallelMove V8_FINAL : public ZoneObject {
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
index 7aaad0e..810f25d 100644
--- a/src/mark-compact.cc
+++ b/src/mark-compact.cc
@@ -2076,8 +2076,8 @@
       }
       Object* target = allocation->ToObjectUnchecked();
 
-      MigrateObject(HeapObject::cast(target)->address(),
-                    object->address(),
+      MigrateObject(HeapObject::cast(target),
+                    object,
                     size,
                     NEW_SPACE);
     }
@@ -2819,19 +2819,21 @@
 // pointer iteration.  This is an issue if the store buffer overflows and we
 // have to scan the entire old space, including dead objects, looking for
 // pointers to new space.
-void MarkCompactCollector::MigrateObject(Address dst,
-                                         Address src,
+void MarkCompactCollector::MigrateObject(HeapObject* dst,
+                                         HeapObject* src,
                                          int size,
                                          AllocationSpace dest) {
+  Address dst_addr = dst->address();
+  Address src_addr = src->address();
   HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
   if (heap_profiler->is_tracking_object_moves()) {
-    heap_profiler->ObjectMoveEvent(src, dst, size);
+    heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
   }
-  ASSERT(heap()->AllowedToBeMigrated(HeapObject::FromAddress(src), dest));
+  ASSERT(heap()->AllowedToBeMigrated(src, dest));
   ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
   if (dest == OLD_POINTER_SPACE) {
-    Address src_slot = src;
-    Address dst_slot = dst;
+    Address src_slot = src_addr;
+    Address dst_slot = dst_addr;
     ASSERT(IsAligned(size, kPointerSize));
 
     for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
@@ -2852,8 +2854,8 @@
       dst_slot += kPointerSize;
     }
 
-    if (compacting_ && HeapObject::FromAddress(dst)->IsJSFunction()) {
-      Address code_entry_slot = dst + JSFunction::kCodeEntryOffset;
+    if (compacting_ && dst->IsJSFunction()) {
+      Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
       Address code_entry = Memory::Address_at(code_entry_slot);
 
       if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
@@ -2863,21 +2865,36 @@
                            code_entry_slot,
                            SlotsBuffer::IGNORE_OVERFLOW);
       }
+    } else if (compacting_ && dst->IsConstantPoolArray()) {
+      ConstantPoolArray* constant_pool = ConstantPoolArray::cast(dst);
+      for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
+        Address code_entry_slot =
+            dst_addr + constant_pool->OffsetOfElementAt(i);
+        Address code_entry = Memory::Address_at(code_entry_slot);
+
+        if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+          SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                             &migration_slots_buffer_,
+                             SlotsBuffer::CODE_ENTRY_SLOT,
+                             code_entry_slot,
+                             SlotsBuffer::IGNORE_OVERFLOW);
+        }
+      }
     }
   } else if (dest == CODE_SPACE) {
-    PROFILE(isolate(), CodeMoveEvent(src, dst));
-    heap()->MoveBlock(dst, src, size);
+    PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
+    heap()->MoveBlock(dst_addr, src_addr, size);
     SlotsBuffer::AddTo(&slots_buffer_allocator_,
                        &migration_slots_buffer_,
                        SlotsBuffer::RELOCATED_CODE_OBJECT,
-                       dst,
+                       dst_addr,
                        SlotsBuffer::IGNORE_OVERFLOW);
-    Code::cast(HeapObject::FromAddress(dst))->Relocate(dst - src);
+    Code::cast(dst)->Relocate(dst_addr - src_addr);
   } else {
     ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
-    heap()->MoveBlock(dst, src, size);
+    heap()->MoveBlock(dst_addr, src_addr, size);
   }
-  Memory::Address_at(src) = dst;
+  Memory::Address_at(src_addr) = dst_addr;
 }
 
 
@@ -3012,8 +3029,8 @@
   MaybeObject* maybe_result = target_space->AllocateRaw(object_size);
   if (maybe_result->ToObject(&result)) {
     HeapObject* target = HeapObject::cast(result);
-    MigrateObject(target->address(),
-                  object->address(),
+    MigrateObject(target,
+                  object,
                   object_size,
                   target_space->identity());
     heap()->mark_compact_collector()->tracer()->
@@ -3091,8 +3108,8 @@
 
       Object* target_object = target->ToObjectUnchecked();
 
-      MigrateObject(HeapObject::cast(target_object)->address(),
-                    object_addr,
+      MigrateObject(HeapObject::cast(target_object),
+                    object,
                     size,
                     space->identity());
       ASSERT(object->map_word().IsForwardingAddress());
@@ -4370,14 +4387,33 @@
 
 void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  RelocInfo::Mode rmode = rinfo->rmode();
   if (target_page->IsEvacuationCandidate() &&
       (rinfo->host() == NULL ||
        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            target_page->slots_buffer_address(),
-                            SlotTypeForRMode(rinfo->rmode()),
-                            rinfo->pc(),
-                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+    bool success;
+    if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
+      // This doesn't need to be typed since it is just a normal heap pointer.
+      Object** target_pointer =
+          reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
+      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                                   target_page->slots_buffer_address(),
+                                   target_pointer,
+                                   SlotsBuffer::FAIL_ON_OVERFLOW);
+    } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
+      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                                   target_page->slots_buffer_address(),
+                                   SlotsBuffer::CODE_ENTRY_SLOT,
+                                   rinfo->constant_pool_entry_address(),
+                                   SlotsBuffer::FAIL_ON_OVERFLOW);
+    } else {
+      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                                  target_page->slots_buffer_address(),
+                                  SlotTypeForRMode(rmode),
+                                  rinfo->pc(),
+                                  SlotsBuffer::FAIL_ON_OVERFLOW);
+    }
+    if (!success) {
       EvictEvacuationCandidate(target_page);
     }
   }
diff --git a/src/mark-compact.h b/src/mark-compact.h
index 6019f6c..40b73fb 100644
--- a/src/mark-compact.h
+++ b/src/mark-compact.h
@@ -696,8 +696,8 @@
                          SlotsBuffer::AdditionMode mode =
                              SlotsBuffer::FAIL_ON_OVERFLOW));
 
-  void MigrateObject(Address dst,
-                     Address src,
+  void MigrateObject(HeapObject* dst,
+                     HeapObject* src,
                      int size,
                      AllocationSpace to_old_space);
 
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index 514b3aa..04c79cb 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -156,6 +156,12 @@
 }
 
 
+Address RelocInfo::constant_pool_entry_address() {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 int RelocInfo::target_address_size() {
   return Assembler::kSpecialTargetSize;
 }
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 9adb900..696acd8 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -213,6 +213,11 @@
 }
 
 
+bool RelocInfo::IsInConstantPool() {
+  return false;
+}
+
+
 // Patch the code at the current address with the supplied instructions.
 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
   Instr* pc = reinterpret_cast<Instr*>(pc_);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index c7e2a43..b5102a4 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -1132,108 +1132,6 @@
 }
 
 
-void LCodeGen::EmitSignedIntegerDivisionByConstant(
-    Register result,
-    Register dividend,
-    int32_t divisor,
-    Register remainder,
-    Register scratch,
-    LEnvironment* environment) {
-  ASSERT(!AreAliased(dividend, scratch, at, no_reg));
-
-  uint32_t divisor_abs = abs(divisor);
-
-  int32_t power_of_2_factor =
-    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
-
-  switch (divisor_abs) {
-    case 0:
-      DeoptimizeIf(al, environment);
-      return;
-
-    case 1:
-      if (divisor > 0) {
-        __ Move(result, dividend);
-      } else {
-        __ SubuAndCheckForOverflow(result, zero_reg, dividend, scratch);
-        DeoptimizeIf(lt, environment, scratch, Operand(zero_reg));
-      }
-      // Compute the remainder.
-      __ Move(remainder, zero_reg);
-      return;
-
-    default:
-      if (IsPowerOf2(divisor_abs)) {
-        // Branch and condition free code for integer division by a power
-        // of two.
-        int32_t power = WhichPowerOf2(divisor_abs);
-        if (power > 1) {
-          __ sra(scratch, dividend, power - 1);
-        }
-        __ srl(scratch, scratch, 32 - power);
-        __ Addu(scratch, dividend, Operand(scratch));
-        __ sra(result, scratch,  power);
-        // Negate if necessary.
-        // We don't need to check for overflow because the case '-1' is
-        // handled separately.
-        if (divisor < 0) {
-          ASSERT(divisor != -1);
-          __ Subu(result, zero_reg, Operand(result));
-        }
-        // Compute the remainder.
-        if (divisor > 0) {
-          __ sll(scratch, result, power);
-          __ Subu(remainder, dividend, Operand(scratch));
-        } else {
-          __ sll(scratch, result, power);
-          __ Addu(remainder, dividend, Operand(scratch));
-        }
-        return;
-      } else if (LChunkBuilder::HasMagicNumberForDivisor(divisor)) {
-        // Use magic numbers for a few specific divisors.
-        // Details and proofs can be found in:
-        // - Hacker's Delight, Henry S. Warren, Jr.
-        // - The PowerPC Compiler Writer's Guide
-        // and probably many others.
-        //
-        // We handle
-        //   <divisor with magic numbers> * <power of 2>
-        // but not
-        //   <divisor with magic numbers> * <other divisor with magic numbers>
-        DivMagicNumbers magic_numbers =
-          DivMagicNumberFor(divisor_abs >> power_of_2_factor);
-        // Branch and condition free code for integer division by a power
-        // of two.
-        const int32_t M = magic_numbers.M;
-        const int32_t s = magic_numbers.s + power_of_2_factor;
-
-        __ li(scratch, Operand(M));
-        __ mult(dividend, scratch);
-        __ mfhi(scratch);
-        if (M < 0) {
-          __ Addu(scratch, scratch, Operand(dividend));
-        }
-        if (s > 0) {
-          __ sra(scratch, scratch, s);
-          __ mov(scratch, scratch);
-        }
-        __ srl(at, dividend, 31);
-        __ Addu(result, scratch, Operand(at));
-        if (divisor < 0) __ Subu(result, zero_reg, Operand(result));
-        // Compute the remainder.
-        __ li(scratch, Operand(divisor));
-        __ Mul(scratch, result, Operand(scratch));
-        __ Subu(remainder, dividend, Operand(scratch));
-      } else {
-        __ li(scratch, Operand(divisor));
-        __ div(dividend, scratch);
-        __ mfhi(remainder);
-        __ mflo(result);
-      }
-  }
-}
-
-
 void LCodeGen::DoDivI(LDivI* instr) {
   const Register left = ToRegister(instr->left());
   const Register right = ToRegister(instr->right());
@@ -1284,33 +1182,66 @@
 }
 
 
-void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
-  Register left = ToRegister(instr->dividend());
-  Register remainder = ToRegister(instr->temp());
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  ASSERT(dividend.is(ToRegister(instr->result())));
   Register scratch = scratch0();
-  Register result = ToRegister(instr->result());
 
-  ASSERT(instr->divisor()->IsConstantOperand());
-  Label done;
-  int32_t divisor = ToInteger32(LConstantOperand::cast(instr->divisor()));
-  if (divisor < 0) {
-    DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  if (divisor == 1) return;
+  uint16_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ sra(dividend, dividend, shift);
+    return;
   }
-  EmitSignedIntegerDivisionByConstant(result,
-                                      left,
-                                      divisor,
-                                      remainder,
-                                      scratch,
-                                      instr->environment());
-  // We performed a truncating division. Correct the result if necessary.
-  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
-  __ Xor(scratch , remainder, Operand(divisor));
-  __ Branch(&done, ge, scratch, Operand(zero_reg));
-  __ Subu(result, result, Operand(1));
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  Label not_kmin_int, done;
+  __ Subu(scratch, zero_reg, dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+  }
+  if (instr->hydrogen()->left()->RangeCanInclude(kMinInt)) {
+    // Note that we could emit branch-free code, but that would need one more
+    // register.
+    __ Branch(&not_kmin_int, ne, dividend, Operand(kMinInt));
+    if (divisor == -1) {
+      DeoptimizeIf(al, instr->environment());
+    } else {
+      __ li(dividend, Operand(kMinInt / divisor));
+      __ Branch(&done);
+    }
+  }
+  __ bind(&not_kmin_int);
+  __ sra(dividend, scratch, shift);
   __ bind(&done);
 }
 
 
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  ASSERT(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr->environment());
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+  }
+
+  __ FlooringDiv(result, dividend, divisor);
+}
+
+
 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
   const Register result = ToRegister(instr->result());
   const Register left = ToRegister(instr->left());
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index d4f4e22..d2cc39b 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -1266,42 +1266,39 @@
 }
 
 
-bool LChunkBuilder::HasMagicNumberForDivisor(int32_t divisor) {
-  uint32_t divisor_abs = abs(divisor);
-  // Dividing by 0 or powers of 2 is easy.
-  if (divisor == 0 || IsPowerOf2(divisor_abs)) return true;
-
-  // We have magic numbers for a few specific divisors.
-  // Details and proofs can be found in:
-  // - Hacker's Delight, Henry S. Warren, Jr.
-  // - The PowerPC Compiler Writer's Guide
-  // and probably many others.
-  //
-  // We handle
-  //   <divisor with magic numbers> * <power of 2>
-  // but not
-  //   <divisor with magic numbers> * <other divisor with magic numbers>
-  int32_t power_of_2_factor =
-    CompilerIntrinsics::CountTrailingZeros(divisor_abs);
-  DivMagicNumbers magic_numbers =
-    DivMagicNumberFor(divisor_abs >> power_of_2_factor);
-  return magic_numbers.M != InvalidDivMagicNumber.M;
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineSameAsFirst(
+          new(zone()) LFlooringDivByPowerOf2I(dividend, divisor));
+  bool can_deopt =
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->left()->RangeCanInclude(kMinInt) && divisor == -1);
+  return can_deopt ? AssignEnvironment(result) : result;
 }
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
-  LOperand* divisor = UseOrConstant(instr->right());
-  LOperand* remainder = TempRegister();
+  int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result =
-      DefineAsRegister(
-          new(zone()) LFlooringDivByConstI(dividend, divisor, remainder));
-  return AssignEnvironment(result);
+      DefineAsRegister(new(zone()) LFlooringDivByConstI(dividend, divisor));
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0);
+  return can_deopt ? AssignEnvironment(result) : result;
 }
 
 
 LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
-  if (instr->right()->IsConstant()) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
     return DoFlooringDivByConstI(instr);
   } else {
     HValue* right = instr->right();
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index 43ce740..45b6cb5 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -94,6 +94,7 @@
   V(Dummy)                                      \
   V(DummyUse)                                   \
   V(FlooringDivByConstI)                        \
+  V(FlooringDivByPowerOf2I)                     \
   V(ForInCacheArray)                            \
   V(ForInPrepareMap)                            \
   V(FunctionLiteral)                            \
@@ -667,20 +668,41 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
-  LFlooringDivByConstI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
-    inputs_[1] = divisor;
-    temps_[0] = temp;
+    divisor_ = divisor;
   }
 
   LOperand* dividend() { return inputs_[0]; }
-  LOperand* divisor() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
   DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
 };
 
 
@@ -2649,6 +2671,7 @@
   LInstruction* DoDivI(HBinaryOperation* instr);
   LInstruction* DoModByPowerOf2I(HMod* instr);
   LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
   LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
 
  private:
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 1c22a89..0538cd4 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -5706,6 +5706,28 @@
 }
 
 
+void MacroAssembler::FlooringDiv(Register result,
+                                 Register dividend,
+                                 int32_t divisor) {
+  ASSERT(!dividend.is(result));
+  ASSERT(!dividend.is(at));
+  ASSERT(!result.is(at));
+  MultiplierAndShift ms(divisor);
+  li(at, Operand(ms.multiplier()));
+  Mult(dividend, Operand(at));
+  mfhi(result);
+  if (divisor > 0 && ms.multiplier() < 0) {
+    Addu(result, result, Operand(dividend));
+  }
+  if (divisor < 0 && ms.multiplier() > 0) {
+    Subu(result, result, Operand(dividend));
+  }
+  if (ms.shift() > 0) {
+    sra(result, result, ms.shift());
+  }
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 1b64f25..c22056f 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1304,6 +1304,10 @@
     return code_object_;
   }
 
+  // Emit code for a flooring division by a constant. The dividend register is
+  // unchanged and at gets clobbered. Dividend and result must be different.
+  void FlooringDiv(Register result, Register dividend, int32_t divisor);
+
   // -------------------------------------------------------------------------
   // StatsCounter support.
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
index 6291cf3..3e2b056 100644
--- a/src/mips/stub-cache-mips.cc
+++ b/src/mips/stub-cache-mips.cc
@@ -1242,20 +1242,16 @@
 void StoreStubCompiler::GenerateStoreViaSetter(
     MacroAssembler* masm,
     Handle<HeapType> type,
+    Register receiver,
     Handle<JSFunction> setter) {
   // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : receiver
-  //  -- a2    : name
   //  -- ra    : return address
   // -----------------------------------
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    Register receiver = a1;
-    Register value = a0;
 
     // Save value register, so we can restore it later.
-    __ push(value);
+    __ push(value());
 
     if (!setter.is_null()) {
       // Call the JavaScript setter with receiver and value on the stack.
@@ -1265,7 +1261,7 @@
                FieldMemOperand(
                    receiver, JSGlobalObject::kGlobalReceiverOffset));
       }
-      __ Push(receiver, value);
+      __ Push(receiver, value());
       ParameterCount actual(1);
       ParameterCount expected(setter);
       __ InvokeFunction(setter, expected, actual,
@@ -1293,21 +1289,6 @@
 Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
     Handle<JSObject> object,
     Handle<Name> name) {
-  Label miss;
-
-  // Check that the map of the object hasn't changed.
-  __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
-              DO_SMI_CHECK);
-
-  // Perform global security token check if needed.
-  if (object->IsJSGlobalProxy()) {
-    __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
-  }
-
-  // Stub is never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
   __ Push(receiver(), this->name(), value());
 
   // Do tail-call to the runtime system.
@@ -1315,10 +1296,6 @@
       ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
   __ TailCallExternalReference(store_ic_property, 3, 1);
 
-  // Handle store cache miss.
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
   // Return the generated code.
   return GetCode(kind(), Code::FAST, name);
 }
@@ -1352,16 +1329,21 @@
 }
 
 
+Register StoreStubCompiler::value() {
+  return a0;
+}
+
+
 Register* StoreStubCompiler::registers() {
-  // receiver, name, value, scratch1, scratch2, scratch3.
-  static Register registers[] = { a1, a2, a0, a3, t0, t1 };
+  // receiver, name, scratch1, scratch2, scratch3.
+  static Register registers[] = { a1, a2, a3, t0, t1 };
   return registers;
 }
 
 
 Register* KeyedStoreStubCompiler::registers() {
-  // receiver, name, value, scratch1, scratch2, scratch3.
-  static Register registers[] = { a2, a1, a0, a3, t0, t1 };
+  // receiver, name, scratch1, scratch2, scratch3.
+  static Register registers[] = { a2, a1, a3, t0, t1 };
   return registers;
 }
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 31a59cb..0988d9d 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -403,6 +403,13 @@
 
 void ConstantPoolArray::ConstantPoolArrayVerify() {
   CHECK(IsConstantPoolArray());
+  for (int i = 0; i < count_of_code_ptr_entries(); i++) {
+    Address code_entry = get_code_ptr_entry(first_code_ptr_index() + i);
+    VerifyPointer(Code::GetCodeFromTargetAddress(code_entry));
+  }
+  for (int i = 0; i < count_of_heap_ptr_entries(); i++) {
+    VerifyObjectField(OffsetOfElementAt(first_heap_ptr_index() + i));
+  }
 }
 
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 74a252c..ec2ffdb 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -926,7 +926,8 @@
   bool result = IsHeapObject() &&
                 (HeapObject::cast(this)->map()->instance_type() ==
                  JS_GLOBAL_PROXY_TYPE);
-  ASSERT(!result || IsAccessCheckNeeded());
+  ASSERT(!result ||
+         HeapObject::cast(this)->map()->is_access_check_needed());
   return result;
 }
 
@@ -951,8 +952,14 @@
 
 
 bool Object::IsAccessCheckNeeded() {
-  return IsHeapObject()
-    && HeapObject::cast(this)->map()->is_access_check_needed();
+  if (!IsHeapObject()) return false;
+  if (IsJSGlobalProxy()) {
+    JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+    GlobalObject* global =
+        proxy->GetIsolate()->context()->global_object();
+    return proxy->IsDetachedFrom(global);
+  }
+  return HeapObject::cast(this)->map()->is_access_check_needed();
 }
 
 
@@ -2075,7 +2082,6 @@
 }
 
 
-
 void Object::VerifyApiCallResultType() {
 #if ENABLE_EXTRA_CHECKS
   if (!(IsSmi() ||
@@ -2192,8 +2198,12 @@
 }
 
 
-SMI_ACCESSORS(ConstantPoolArray, first_ptr_index, kFirstPointerIndexOffset)
-SMI_ACCESSORS(ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
+SMI_ACCESSORS(
+    ConstantPoolArray, first_code_ptr_index, kFirstCodePointerIndexOffset)
+SMI_ACCESSORS(
+    ConstantPoolArray, first_heap_ptr_index, kFirstHeapPointerIndexOffset)
+SMI_ACCESSORS(
+    ConstantPoolArray, first_int32_index, kFirstInt32IndexOffset)
 
 
 int ConstantPoolArray::first_int64_index() {
@@ -2202,12 +2212,17 @@
 
 
 int ConstantPoolArray::count_of_int64_entries() {
-  return first_ptr_index();
+  return first_code_ptr_index();
 }
 
 
-int ConstantPoolArray::count_of_ptr_entries() {
-  return first_int32_index() - first_ptr_index();
+int ConstantPoolArray::count_of_code_ptr_entries() {
+  return first_heap_ptr_index() - first_code_ptr_index();
+}
+
+
+int ConstantPoolArray::count_of_heap_ptr_entries() {
+  return first_int32_index() - first_heap_ptr_index();
 }
 
 
@@ -2217,32 +2232,44 @@
 
 
 void ConstantPoolArray::SetEntryCounts(int number_of_int64_entries,
-                                       int number_of_ptr_entries,
+                                       int number_of_code_ptr_entries,
+                                       int number_of_heap_ptr_entries,
                                        int number_of_int32_entries) {
-  set_first_ptr_index(number_of_int64_entries);
-  set_first_int32_index(number_of_int64_entries + number_of_ptr_entries);
-  set_length(number_of_int64_entries + number_of_ptr_entries +
-             number_of_int32_entries);
+  int current_index = number_of_int64_entries;
+  set_first_code_ptr_index(current_index);
+  current_index += number_of_code_ptr_entries;
+  set_first_heap_ptr_index(current_index);
+  current_index += number_of_heap_ptr_entries;
+  set_first_int32_index(current_index);
+  current_index += number_of_int32_entries;
+  set_length(current_index);
 }
 
 
 int64_t ConstantPoolArray::get_int64_entry(int index) {
   ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(index >= 0 && index < first_ptr_index());
+  ASSERT(index >= 0 && index < first_code_ptr_index());
   return READ_INT64_FIELD(this, OffsetOfElementAt(index));
 }
 
 double ConstantPoolArray::get_int64_entry_as_double(int index) {
   STATIC_ASSERT(kDoubleSize == kInt64Size);
   ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(index >= 0 && index < first_ptr_index());
+  ASSERT(index >= 0 && index < first_code_ptr_index());
   return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
 }
 
 
-Object* ConstantPoolArray::get_ptr_entry(int index) {
+Address ConstantPoolArray::get_code_ptr_entry(int index) {
   ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(index >= first_ptr_index() && index < first_int32_index());
+  ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+  return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
+}
+
+
+Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
+  ASSERT(map() == GetHeap()->constant_pool_array_map());
+  ASSERT(index >= first_heap_ptr_index() && index < first_int32_index());
   return READ_FIELD(this, OffsetOfElementAt(index));
 }
 
@@ -2254,9 +2281,16 @@
 }
 
 
+void ConstantPoolArray::set(int index, Address value) {
+  ASSERT(map() == GetHeap()->constant_pool_array_map());
+  ASSERT(index >= first_code_ptr_index() && index < first_heap_ptr_index());
+  WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
+}
+
+
 void ConstantPoolArray::set(int index, Object* value) {
   ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(index >= first_ptr_index() && index < first_int32_index());
+  ASSERT(index >= first_code_ptr_index() && index < first_int32_index());
   WRITE_FIELD(this, OffsetOfElementAt(index), value);
   WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
 }
@@ -2264,7 +2298,7 @@
 
 void ConstantPoolArray::set(int index, int64_t value) {
   ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(index >= first_int64_index() && index < first_ptr_index());
+  ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
   WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
 }
 
@@ -2272,7 +2306,7 @@
 void ConstantPoolArray::set(int index, double value) {
   STATIC_ASSERT(kDoubleSize == kInt64Size);
   ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(index >= first_int64_index() && index < first_ptr_index());
+  ASSERT(index >= first_int64_index() && index < first_code_ptr_index());
   WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
 }
 
@@ -3840,7 +3874,8 @@
   if (instance_type == CONSTANT_POOL_ARRAY_TYPE) {
     return ConstantPoolArray::SizeFor(
         reinterpret_cast<ConstantPoolArray*>(this)->count_of_int64_entries(),
-        reinterpret_cast<ConstantPoolArray*>(this)->count_of_ptr_entries(),
+        reinterpret_cast<ConstantPoolArray*>(this)->count_of_code_ptr_entries(),
+        reinterpret_cast<ConstantPoolArray*>(this)->count_of_heap_ptr_entries(),
         reinterpret_cast<ConstantPoolArray*>(this)->count_of_int32_entries());
   }
   if (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 6cf1798..15ea33a 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -604,11 +604,14 @@
   HeapObject::PrintHeader(out, "ConstantPoolArray");
   PrintF(out, " - length: %d", length());
   for (int i = 0; i < length(); i++) {
-    if (i < first_ptr_index()) {
+    if (i < first_code_ptr_index()) {
       PrintF(out, "\n  [%d]: double: %g", i, get_int64_entry_as_double(i));
+    } else if (i < first_heap_ptr_index()) {
+      PrintF(out, "\n  [%d]: code target pointer: %p", i,
+             reinterpret_cast<void*>(get_code_ptr_entry(i)));
     } else if (i < first_int32_index()) {
-      PrintF(out, "\n  [%d]: pointer: %p", i,
-             reinterpret_cast<void*>(get_ptr_entry(i)));
+      PrintF(out, "\n  [%d]: heap pointer: %p", i,
+             reinterpret_cast<void*>(get_heap_ptr_entry(i)));
     } else {
       PrintF(out, "\n  [%d]: int32: %d", i, get_int32_entry(i));
     }
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
index c671e63..0b3ec59 100644
--- a/src/objects-visiting-inl.h
+++ b/src/objects-visiting-inl.h
@@ -489,16 +489,16 @@
     Map* map, HeapObject* object) {
   Heap* heap = map->GetHeap();
   ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
-  if (constant_pool->count_of_ptr_entries() > 0) {
-    int first_ptr_offset = constant_pool->OffsetOfElementAt(
-        constant_pool->first_ptr_index());
-    int last_ptr_offset = constant_pool->OffsetOfElementAt(
-        constant_pool->first_ptr_index() +
-        constant_pool->count_of_ptr_entries() - 1);
-    StaticVisitor::VisitPointers(
-        heap,
-        HeapObject::RawField(object, first_ptr_offset),
-        HeapObject::RawField(object, last_ptr_offset));
+  for (int i = 0; i < constant_pool->count_of_code_ptr_entries(); i++) {
+    int index = constant_pool->first_code_ptr_index() + i;
+    Address code_entry =
+        reinterpret_cast<Address>(constant_pool->RawFieldOfElementAt(index));
+    StaticVisitor::VisitCodeEntry(heap, code_entry);
+  }
+  for (int i = 0; i < constant_pool->count_of_heap_ptr_entries(); i++) {
+    int index = constant_pool->first_heap_ptr_index() + i;
+    StaticVisitor::VisitPointer(heap,
+                                constant_pool->RawFieldOfElementAt(index));
   }
 }
 
diff --git a/src/objects.cc b/src/objects.cc
index 56598be..cd86a43 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -1280,14 +1280,13 @@
   // - the space the existing string occupies is too small for a regular
   //   external string.
   // - the existing string is in old pointer space and the backing store of
-  //   the external string is not aligned.  The GC cannot deal with fields
-  //   containing an unaligned address that points to outside of V8's heap.
+  //   the external string is not aligned.  The GC cannot deal with a field
+  //   containing a possibly unaligned address to outside of V8's heap.
   // In either case we resort to a short external string instead, omitting
   // the field caching the address of the backing store.  When we encounter
   // short external strings in generated code, we need to bailout to runtime.
   if (size < ExternalString::kSize ||
-      (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
-       heap->old_pointer_space()->Contains(this))) {
+      heap->old_pointer_space()->Contains(this)) {
     this->set_map_no_write_barrier(
         is_internalized
             ? (is_ascii
@@ -1351,14 +1350,13 @@
   // - the space the existing string occupies is too small for a regular
   //   external string.
   // - the existing string is in old pointer space and the backing store of
-  //   the external string is not aligned.  The GC cannot deal with fields
-  //   containing an unaligned address that points to outside of V8's heap.
+  //   the external string is not aligned.  The GC cannot deal with a field
+  //   containing a possibly unaligned address to outside of V8's heap.
   // In either case we resort to a short external string instead, omitting
   // the field caching the address of the backing store.  When we encounter
   // short external strings in generated code, we need to bailout to runtime.
   if (size < ExternalString::kSize ||
-      (!IsAligned(reinterpret_cast<intptr_t>(resource->data()), kPointerSize) &&
-       heap->old_pointer_space()->Contains(this))) {
+      heap->old_pointer_space()->Contains(this)) {
     this->set_map_no_write_barrier(
         is_internalized ? heap->short_external_ascii_internalized_string_map()
                         : heap->short_external_ascii_string_map());
@@ -9454,13 +9452,13 @@
 
 
 void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
-  if (count_of_ptr_entries() > 0) {
-    int first_ptr_offset = OffsetOfElementAt(first_ptr_index());
-    int last_ptr_offset =
-        OffsetOfElementAt(first_ptr_index() + count_of_ptr_entries() - 1);
-    v->VisitPointers(
-        HeapObject::RawField(this, first_ptr_offset),
-        HeapObject::RawField(this, last_ptr_offset));
+  for (int i = 0; i < count_of_code_ptr_entries(); i++) {
+    int index = first_code_ptr_index() + i;
+    v->VisitCodeEntry(reinterpret_cast<Address>(RawFieldOfElementAt(index)));
+  }
+  for (int i = 0; i < count_of_heap_ptr_entries(); i++) {
+    int index = first_heap_ptr_index() + i;
+    v->VisitPointer(RawFieldOfElementAt(index));
   }
 }
 
diff --git a/src/objects.h b/src/objects.h
index 089d11d..cd8999c 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2361,10 +2361,6 @@
   // been modified since it was created.  May give false positives.
   bool IsDirty();
 
-  // If the receiver is a JSGlobalProxy this method will return its prototype,
-  // otherwise the result is the receiver itself.
-  inline Object* BypassGlobalProxy();
-
   // Accessors for hidden properties object.
   //
   // Hidden properties are not local properties of the object itself.
@@ -3162,29 +3158,35 @@
 // ConstantPoolArray describes a fixed-sized array containing constant pool
 // entires.
 // The format of the pool is:
-//   [0]: Field holding the first index which is a pointer entry
-//   [1]: Field holding the first index which is a int32 entry
-//   [2] ... [first_ptr_index() - 1]: 64 bit entries
-//   [first_ptr_index()] ... [first_int32_index() - 1]: pointer entries
-//   [first_int32_index()] ... [length - 1]: 32 bit entries
+//   [0]: Field holding the first index which is a raw code target pointer entry
+//   [1]: Field holding the first index which is a heap pointer entry
+//   [2]: Field holding the first index which is a int32 entry
+//   [3]                      ... [first_code_ptr_index() - 1] : 64 bit entries
+//   [first_code_ptr_index()] ... [first_heap_ptr_index() - 1] : code pointers
+//   [first_heap_ptr_index()] ... [first_int32_index() - 1]    : heap pointers
+//   [first_int32_index()]    ... [length - 1]                 : 32 bit entries
 class ConstantPoolArray: public FixedArrayBase {
  public:
   // Getters for the field storing the first index for different type entries.
-  inline int first_ptr_index();
+  inline int first_code_ptr_index();
+  inline int first_heap_ptr_index();
   inline int first_int64_index();
   inline int first_int32_index();
 
   // Getters for counts of different type entries.
-  inline int count_of_ptr_entries();
+  inline int count_of_code_ptr_entries();
+  inline int count_of_heap_ptr_entries();
   inline int count_of_int64_entries();
   inline int count_of_int32_entries();
 
   // Setter and getter for pool elements.
-  inline Object* get_ptr_entry(int index);
+  inline Address get_code_ptr_entry(int index);
+  inline Object* get_heap_ptr_entry(int index);
   inline int64_t get_int64_entry(int index);
   inline int32_t get_int32_entry(int index);
   inline double get_int64_entry_as_double(int index);
 
+  inline void set(int index, Address value);
   inline void set(int index, Object* value);
   inline void set(int index, int64_t value);
   inline void set(int index, double value);
@@ -3192,7 +3194,8 @@
 
   // Set up initial state.
   inline void SetEntryCounts(int number_of_int64_entries,
-                             int number_of_ptr_entries,
+                             int number_of_code_ptr_entries,
+                             int number_of_heap_ptr_entries,
                              int number_of_int32_entries);
 
   // Copy operations
@@ -3200,10 +3203,12 @@
 
   // Garbage collection support.
   inline static int SizeFor(int number_of_int64_entries,
-                            int number_of_ptr_entries,
+                            int number_of_code_ptr_entries,
+                            int number_of_heap_ptr_entries,
                             int number_of_int32_entries) {
     return RoundUp(OffsetAt(number_of_int64_entries,
-                            number_of_ptr_entries,
+                            number_of_code_ptr_entries,
+                            number_of_heap_ptr_entries,
                             number_of_int32_entries),
                    kPointerSize);
   }
@@ -3212,22 +3217,33 @@
   inline int OffsetOfElementAt(int index) {
     ASSERT(index < length());
     if (index >= first_int32_index()) {
-      return OffsetAt(count_of_int64_entries(), count_of_ptr_entries(),
-                      index - first_int32_index());
-    } else if (index >= first_ptr_index()) {
-      return OffsetAt(count_of_int64_entries(), index - first_ptr_index(), 0);
+      return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(),
+                      count_of_heap_ptr_entries(), index - first_int32_index());
+    } else if (index >= first_heap_ptr_index()) {
+      return OffsetAt(count_of_int64_entries(), count_of_code_ptr_entries(),
+                      index - first_heap_ptr_index(), 0);
+    } else if (index >= first_code_ptr_index()) {
+      return OffsetAt(count_of_int64_entries(), index - first_code_ptr_index(),
+                      0, 0);
     } else {
-      return OffsetAt(index, 0, 0);
+      return OffsetAt(index, 0, 0, 0);
     }
   }
 
   // Casting.
   static inline ConstantPoolArray* cast(Object* obj);
 
+  // Garbage collection support.
+  Object** RawFieldOfElementAt(int index) {
+    return HeapObject::RawField(this, OffsetOfElementAt(index));
+  }
+
   // Layout description.
-  static const int kFirstPointerIndexOffset = FixedArray::kHeaderSize;
+  static const int kFirstCodePointerIndexOffset = FixedArray::kHeaderSize;
+  static const int kFirstHeapPointerIndexOffset =
+      kFirstCodePointerIndexOffset + kPointerSize;
   static const int kFirstInt32IndexOffset =
-      kFirstPointerIndexOffset + kPointerSize;
+      kFirstHeapPointerIndexOffset + kPointerSize;
   static const int kFirstOffset = kFirstInt32IndexOffset + kPointerSize;
 
   // Dispatched behavior.
@@ -3237,15 +3253,18 @@
   DECLARE_VERIFIER(ConstantPoolArray)
 
  private:
-  inline void set_first_ptr_index(int value);
+  inline void set_first_code_ptr_index(int value);
+  inline void set_first_heap_ptr_index(int value);
   inline void set_first_int32_index(int value);
 
   inline static int OffsetAt(int number_of_int64_entries,
-                             int number_of_ptr_entries,
+                             int number_of_code_ptr_entries,
+                             int number_of_heap_ptr_entries,
                              int number_of_int32_entries) {
     return kFirstOffset
         + (number_of_int64_entries * kInt64Size)
-        + (number_of_ptr_entries * kPointerSize)
+        + (number_of_code_ptr_entries * kPointerSize)
+        + (number_of_heap_ptr_entries * kPointerSize)
         + (number_of_int32_entries * kInt32Size);
   }
 
diff --git a/src/parser.cc b/src/parser.cc
index 68823e6..9e456f0 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -3360,14 +3360,14 @@
     Handle<String> name;
     bool is_strict_reserved_name = false;
     Scanner::Location function_name_location = Scanner::Location::invalid();
+    FunctionLiteral::FunctionType function_type =
+        FunctionLiteral::ANONYMOUS_EXPRESSION;
     if (peek_any_identifier()) {
       name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
                                                  CHECK_OK);
       function_name_location = scanner()->location();
+      function_type = FunctionLiteral::NAMED_EXPRESSION;
     }
-    FunctionLiteral::FunctionType function_type = name.is_null()
-        ? FunctionLiteral::ANONYMOUS_EXPRESSION
-        : FunctionLiteral::NAMED_EXPRESSION;
     result = ParseFunctionLiteral(name,
                                   function_name_location,
                                   is_strict_reserved_name,
@@ -3492,10 +3492,11 @@
 
 Expression* Parser::ParseObjectLiteral(bool* ok) {
   // ObjectLiteral ::
-  //   '{' (
-  //       ((IdentifierName | String | Number) ':' AssignmentExpression)
-  //     | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
-  //    )*[','] '}'
+  // '{' ((
+  //       ((IdentifierName | String | Number) ':' AssignmentExpression) |
+  //       (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
+  //      ) ',')* '}'
+  // (Except that trailing comma is not required and not allowed.)
 
   int pos = peek_position();
   ZoneList<ObjectLiteral::Property*>* properties =
@@ -3529,14 +3530,12 @@
           // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
           // We have already read the "get" or "set" keyword.
           Token::Value next = Next();
-          bool is_keyword = Token::IsKeyword(next);
           if (next != i::Token::IDENTIFIER &&
               next != i::Token::FUTURE_RESERVED_WORD &&
               next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
               next != i::Token::NUMBER &&
               next != i::Token::STRING &&
-              !is_keyword) {
-            // Unexpected token.
+              !Token::IsKeyword(next)) {
             ReportUnexpectedToken(next);
             *ok = false;
             return NULL;
@@ -3544,9 +3543,7 @@
           // Validate the property.
           PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
           checker.CheckProperty(next, type, CHECK_OK);
-          Handle<String> name = is_keyword
-              ? isolate_->factory()->InternalizeUtf8String(Token::String(next))
-              : GetSymbol();
+          Handle<String> name = GetSymbol();
           FunctionLiteral* value =
               ParseFunctionLiteral(name,
                                    scanner()->location(),
@@ -3571,8 +3568,8 @@
           }
           continue;  // restart the while
         }
-        // Failed to parse as get/set property, so it's just a property
-        // called "get" or "set".
+        // Failed to parse as get/set property, so it's just a normal property
+        // (which might be called "get" or "set" or something else).
         key = factory()->NewLiteral(id, next_pos);
         break;
       }
@@ -3604,7 +3601,6 @@
           Handle<String> string = GetSymbol();
           key = factory()->NewLiteral(string, next_pos);
         } else {
-          // Unexpected token.
           Token::Value next = Next();
           ReportUnexpectedToken(next);
           *ok = false;
@@ -4994,7 +4990,7 @@
 
 
 uc32 RegExpParser::ParseOctalLiteral() {
-  ASSERT('0' <= current() && current() <= '7');
+  ASSERT(('0' <= current() && current() <= '7') || current() == kEndMarker);
   // For compatibility with some other browsers (not all), we parse
   // up to three octal digits with a value below 256.
   uc32 value = current() - '0';
diff --git a/src/preparser.cc b/src/preparser.cc
index a5de23e..dfda980 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -347,7 +347,7 @@
   //   'function' '*' Identifier '(' FormalParameterListopt ')'
   //      '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
-
+  int pos = position();
   bool is_generator = allow_generators() && Check(Token::MUL);
   bool is_strict_reserved = false;
   Identifier name = ParseIdentifierOrStrictReservedWord(
@@ -356,6 +356,8 @@
                        scanner()->location(),
                        is_strict_reserved,
                        is_generator,
+                       pos,
+                       FunctionLiteral::DECLARATION,
                        CHECK_OK);
   return Statement::FunctionDeclaration();
 }
@@ -1063,20 +1065,25 @@
   Expression result = Expression::Default();
   if (peek() == Token::FUNCTION) {
     Consume(Token::FUNCTION);
-
+    int function_token_position = position();
     bool is_generator = allow_generators() && Check(Token::MUL);
     Identifier name = Identifier::Default();
     bool is_strict_reserved_name = false;
     Scanner::Location function_name_location = Scanner::Location::invalid();
+    FunctionLiteral::FunctionType function_type =
+        FunctionLiteral::ANONYMOUS_EXPRESSION;
     if (peek_any_identifier()) {
       name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
                                                  CHECK_OK);
       function_name_location = scanner()->location();
+      function_type = FunctionLiteral::NAMED_EXPRESSION;
     }
     result = ParseFunctionLiteral(name,
                                   function_name_location,
                                   is_strict_reserved_name,
                                   is_generator,
+                                  function_token_position,
+                                  function_type,
                                   CHECK_OK);
   } else {
     result = ParsePrimaryExpression(CHECK_OK);
@@ -1124,10 +1131,11 @@
 
 PreParser::Expression PreParser::ParseObjectLiteral(bool* ok) {
   // ObjectLiteral ::
-  //   '{' (
-  //       ((IdentifierName | String | Number) ':' AssignmentExpression)
-  //     | (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
-  //    )*[','] '}'
+  // '{' ((
+  //       ((IdentifierName | String | Number) ':' AssignmentExpression) |
+  //       (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
+  //      ) ',')* '}'
+  // (Except that trailing comma is not required and not allowed.)
 
   ObjectLiteralChecker checker(this, scope_->language_mode());
 
@@ -1142,56 +1150,57 @@
         bool is_setter = false;
         ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
         if ((is_getter || is_setter) && peek() != Token::COLON) {
-            Token::Value name = Next();
-            bool is_keyword = Token::IsKeyword(name);
-            if (name != Token::IDENTIFIER &&
-                name != Token::FUTURE_RESERVED_WORD &&
-                name != Token::FUTURE_STRICT_RESERVED_WORD &&
-                name != Token::NUMBER &&
-                name != Token::STRING &&
-                !is_keyword) {
+            Token::Value next = Next();
+            if (next != Token::IDENTIFIER &&
+                next != Token::FUTURE_RESERVED_WORD &&
+                next != Token::FUTURE_STRICT_RESERVED_WORD &&
+                next != Token::NUMBER &&
+                next != Token::STRING &&
+                !Token::IsKeyword(next)) {
+              ReportUnexpectedToken(next);
               *ok = false;
               return Expression::Default();
             }
-            if (!is_keyword) {
-              LogSymbol();
-            }
+            // Validate the property
             PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
-            checker.CheckProperty(name, type, CHECK_OK);
-            ParseFunctionLiteral(Identifier::Default(),
+            checker.CheckProperty(next, type, CHECK_OK);
+            PreParserIdentifier name = GetSymbol(scanner());
+            ParseFunctionLiteral(name,
                                  scanner()->location(),
                                  false,  // reserved words are allowed here
                                  false,  // not a generator
+                                 RelocInfo::kNoPosition,
+                                 FunctionLiteral::ANONYMOUS_EXPRESSION,
                                  CHECK_OK);
             if (peek() != Token::RBRACE) {
               Expect(Token::COMMA, CHECK_OK);
             }
             continue;  // restart the while
         }
-        checker.CheckProperty(next, kValueProperty, CHECK_OK);
         break;
       }
       case Token::STRING:
         Consume(next);
-        checker.CheckProperty(next, kValueProperty, CHECK_OK);
         LogSymbol();
         break;
       case Token::NUMBER:
         Consume(next);
-        checker.CheckProperty(next, kValueProperty, CHECK_OK);
         break;
       default:
         if (Token::IsKeyword(next)) {
           Consume(next);
-          checker.CheckProperty(next, kValueProperty, CHECK_OK);
           LogSymbol();
         } else {
-          // Unexpected token.
+          Token::Value next = Next();
+          ReportUnexpectedToken(next);
           *ok = false;
           return Expression::Default();
         }
     }
 
+    // Validate the property
+    checker.CheckProperty(next, kValueProperty, CHECK_OK);
+
     Expect(Token::COLON, CHECK_OK);
     ParseAssignmentExpression(true, CHECK_OK);
 
@@ -1232,6 +1241,8 @@
     Scanner::Location function_name_location,
     bool name_is_strict_reserved,
     bool is_generator,
+    int function_token_pos,
+    FunctionLiteral::FunctionType function_type,
     bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
diff --git a/src/preparser.h b/src/preparser.h
index 6d89713..8a5517e 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -903,6 +903,8 @@
       Scanner::Location function_name_location,
       bool name_is_strict_reserved,
       bool is_generator,
+      int function_token_pos,
+      FunctionLiteral::FunctionType function_type,
       bool* ok);
   void ParseLazyFunctionLiteralBody(bool* ok);
 
diff --git a/src/promise.js b/src/promise.js
index 82aa990..2c36d4d 100644
--- a/src/promise.js
+++ b/src/promise.js
@@ -248,13 +248,17 @@
 function PromiseAll(values) {
   var deferred = %_CallFunction(this, PromiseDeferred);
   var resolutions = [];
+  if (!%_IsArray(values)) {
+    deferred.reject(MakeTypeError('invalid_argument'));
+    return deferred.promise;
+  }
   try {
     var count = values.length;
     if (count === 0) {
       deferred.resolve(resolutions);
     } else {
       for (var i = 0; i < values.length; ++i) {
-        this.cast(values[i]).chain(
+        this.cast(values[i]).then(
           function(i, x) {
             resolutions[i] = x;
             if (--count === 0) deferred.resolve(resolutions);
@@ -271,9 +275,13 @@
 
 function PromiseOne(values) {
   var deferred = %_CallFunction(this, PromiseDeferred);
+  if (!%_IsArray(values)) {
+    deferred.reject(MakeTypeError('invalid_argument'));
+    return deferred.promise;
+  }
   try {
     for (var i = 0; i < values.length; ++i) {
-      this.cast(values[i]).chain(
+      this.cast(values[i]).then(
         function(x) { deferred.resolve(x) },
         function(r) { deferred.reject(r) }
       );
diff --git a/src/runtime.cc b/src/runtime.cc
index ffc5699..7e47615 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -14714,7 +14714,7 @@
   ASSERT(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
-  ASSERT(object->IsAccessCheckNeeded());
+  ASSERT(object->map()->is_access_check_needed());
   Handle<Object> key = args.at<Object>(2);
   SaveContext save(isolate);
   isolate->set_context(observer->context());
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
index e89eb1b..a1479b2 100644
--- a/src/store-buffer.cc
+++ b/src/store-buffer.cc
@@ -509,10 +509,12 @@
 // be marked with a free space or filler.  Because the free space and filler
 // maps do not move we can always recognize these even after a compaction.
 // Normal objects like FixedArrays and JSObjects should not contain references
-// to these maps.  The special garbage section (see comment in spaces.h) is
-// skipped since it can contain absolutely anything.  Any objects that are
-// allocated during iteration may or may not be visited by the iteration, but
-// they will not be partially visited.
+// to these maps.  Constant pool array objects may contain references to these
+// maps, however, constant pool arrays cannot contain pointers to new space
+// objects, therefore they are skipped.  The special garbage section (see
+// comment in spaces.h) is skipped since it can contain absolutely anything.
+// Any objects that are allocated during iteration may or may not be visited by
+// the iteration, but they will not be partially visited.
 void StoreBuffer::FindPointersToNewSpaceOnPage(
     PagedSpace* space,
     Page* page,
@@ -526,13 +528,17 @@
 
   Object* free_space_map = heap_->free_space_map();
   Object* two_pointer_filler_map = heap_->two_pointer_filler_map();
+  Object* constant_pool_array_map = heap_->constant_pool_array_map();
 
   while (visitable_end < end_of_page) {
     Object* o = *reinterpret_cast<Object**>(visitable_end);
-    // Skip fillers but not things that look like fillers in the special
-    // garbage section which can contain anything.
+    // Skip fillers or constant pool arrays (which never contain new-space
+    // pointers but can contain pointers which can be confused for fillers)
+    // but not things that look like fillers in the special garbage section
+    // which can contain anything.
     if (o == free_space_map ||
         o == two_pointer_filler_map ||
+        o == constant_pool_array_map ||
         (visitable_end == space->top() && visitable_end != space->limit())) {
       if (visitable_start != visitable_end) {
         // After calling this the special garbage section may have moved.
@@ -549,12 +555,12 @@
       if (visitable_end == space->top() && visitable_end != space->limit()) {
         visitable_start = visitable_end = space->limit();
       } else {
-        // At this point we are either at the start of a filler or we are at
-        // the point where the space->top() used to be before the
-        // visit_pointer_region call above.  Either way we can skip the
-        // object at the current spot:  We don't promise to visit objects
-        // allocated during heap traversal, and if space->top() moved then it
-        // must be because an object was allocated at this point.
+        // At this point we are either at the start of a filler, a
+        // constant pool array, or we are at the point where the space->top()
+        // used to be before the visit_pointer_region call above.  Either way we
+        // can skip the object at the current spot:  We don't promise to visit
+        // objects allocated during heap traversal, and if space->top() moved
+        // then it must be because an object was allocated at this point.
         visitable_start =
             visitable_end + HeapObject::FromAddress(visitable_end)->Size();
         visitable_end = visitable_start;
@@ -562,6 +568,7 @@
     } else {
       ASSERT(o != free_space_map);
       ASSERT(o != two_pointer_filler_map);
+      ASSERT(o != constant_pool_array_map);
       ASSERT(visitable_end < space->top() || visitable_end >= space->limit());
       visitable_end += kPointerSize;
     }
diff --git a/src/utils.cc b/src/utils.cc
index 8462615..6838cb0 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -97,18 +97,4 @@
 }
 
 
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor) {
-  switch (divisor) {
-    case 3:    return DivMagicNumberFor3;
-    case 5:    return DivMagicNumberFor5;
-    case 7:    return DivMagicNumberFor7;
-    case 9:    return DivMagicNumberFor9;
-    case 11:   return DivMagicNumberFor11;
-    case 25:   return DivMagicNumberFor25;
-    case 125:  return DivMagicNumberFor125;
-    case 625:  return DivMagicNumberFor625;
-    default:   return InvalidDivMagicNumber;
-  }
-}
-
 } }  // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index a20e209..7538226 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -105,32 +105,6 @@
 }
 
 
-// Magic numbers for integer division.
-// These are kind of 2's complement reciprocal of the divisors.
-// Details and proofs can be found in:
-// - Hacker's Delight, Henry S. Warren, Jr.
-// - The PowerPC Compiler Writer’s Guide
-// and probably many others.
-// See details in the implementation of the algorithm in
-// lithium-codegen-arm.cc : LCodeGen::TryEmitSignedIntegerDivisionByConstant().
-struct DivMagicNumbers {
-  unsigned M;
-  unsigned s;
-};
-
-const DivMagicNumbers InvalidDivMagicNumber= {0, 0};
-const DivMagicNumbers DivMagicNumberFor3   = {0x55555556, 0};
-const DivMagicNumbers DivMagicNumberFor5   = {0x66666667, 1};
-const DivMagicNumbers DivMagicNumberFor7   = {0x92492493, 2};
-const DivMagicNumbers DivMagicNumberFor9   = {0x38e38e39, 1};
-const DivMagicNumbers DivMagicNumberFor11  = {0x2e8ba2e9, 1};
-const DivMagicNumbers DivMagicNumberFor25  = {0x51eb851f, 3};
-const DivMagicNumbers DivMagicNumberFor125 = {0x10624dd3, 3};
-const DivMagicNumbers DivMagicNumberFor625 = {0x68db8bad, 8};
-
-const DivMagicNumbers DivMagicNumberFor(int32_t divisor);
-
-
 // The C++ standard leaves the semantics of '>>' undefined for
 // negative signed operands. Most implementations do the right thing,
 // though.
diff --git a/src/version.cc b/src/version.cc
index ea2e5ec..dd5ec3d 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     25
-#define BUILD_NUMBER      6
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      7
+#define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index 073fcbe..f2ec029 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -267,6 +267,12 @@
 }
 
 
+Address RelocInfo::constant_pool_entry_address() {
+  UNREACHABLE();
+  return NULL;
+}
+
+
 int RelocInfo::target_address_size() {
   if (IsCodedSpecially()) {
     return Assembler::kSpecialTargetSize;
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 63f0533..53d45bb 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -1065,6 +1065,14 @@
 }
 
 
+void Assembler::imull(Register src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(src);
+  emit(0xF7);
+  emit_modrm(0x5, src);
+}
+
+
 void Assembler::imull(Register dst, Register src) {
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(dst, src);
@@ -3204,6 +3212,12 @@
   return (1 << rmode_) & kApplyMask;
 }
 
+
+bool RelocInfo::IsInConstantPool() {
+  return false;
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index b1b50c0..4214249 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -937,6 +937,7 @@
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, Immediate imm);  // dst = src * imm.
   // Signed 32-bit multiply instructions.
+  void imull(Register src);                              // edx:eax = eax * src.
   void imull(Register dst, Register src);                 // dst = dst * src.
   void imull(Register dst, const Operand& src);           // dst = dst * src.
   void imull(Register dst, Register src, Immediate imm);  // dst = src * imm.
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 4dc9d4a..1b6a97b 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -1016,6 +1016,37 @@
 }
 
 
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  ASSERT(ToRegister(instr->result()).is(rax));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
+
+  __ FlooringDiv(dividend, Abs(divisor));
+  __ movl(rax, dividend);
+  __ shrl(rax, Immediate(31));
+  __ addl(rdx, rax);
+  __ imull(rdx, rdx, Immediate(Abs(divisor)));
+  __ movl(rax, dividend);
+  __ subl(rax, rdx);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hmod->left()->CanBeNegative()) {
+    Label remainder_not_zero;
+    __ j(not_zero, &remainder_not_zero, Label::kNear);
+    __ cmpl(dividend, Immediate(0));
+    DeoptimizeIf(less, instr->environment());
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
 void LCodeGen::DoModI(LModI* instr) {
   if (instr->hydrogen()->RightIsPowerOf2()) {
     return DoModByPowerOf2I(reinterpret_cast<LModByPowerOf2I*>(instr));
@@ -1119,42 +1150,22 @@
 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  Register temp = ToRegister(instr->temp());
-  Register result = ToRegister(instr->result());
+  ASSERT(ToRegister(instr->result()).is(rdx));
 
   if (divisor == 0) {
     DeoptimizeIf(no_condition, instr->environment());
     return;
   }
 
-  // Find b which: 2^b < divisor_abs < 2^(b+1).
-  uint32_t divisor_abs = abs(divisor);
-  unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
-  unsigned shift = 32 + b;  // Precision +1bit (effectively).
-  double multiplier_f =
-      static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
-  int64_t multiplier;
-  if (multiplier_f - std::floor(multiplier_f) < 0.5) {
-    multiplier = static_cast<int64_t>(std::floor(multiplier_f));
-  } else {
-    multiplier = static_cast<int64_t>(std::floor(multiplier_f)) + 1;
-  }
-  // The multiplier is a uint32.
-  ASSERT(multiplier > 0 &&
-         multiplier < (static_cast<int64_t>(1) << 32));
-  // The multiply is int64, so sign-extend to r64.
-  __ movsxlq(temp, dividend);
-  if (divisor < 0 &&
-      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    __ neg(temp);
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    __ testl(dividend, dividend);
     DeoptimizeIf(zero, instr->environment());
   }
-  __ Set(result, multiplier);
-  // Result just fit in r64, because it's int32 * uint32.
-  __ imul(result, temp);
 
-  __ addq(result, Immediate(1 << 30));
-  __ sar(result, Immediate(shift));
+  __ FlooringDiv(dividend, divisor);
 }
 
 
@@ -1198,6 +1209,39 @@
 }
 
 
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  ASSERT(ToRegister(instr->result()).is(rdx));
+
+  if (divisor == 0) {
+    DeoptimizeIf(no_condition, instr->environment());
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) &&
+      hdiv->left()->RangeCanInclude(0) && divisor < 0) {
+    __ testl(dividend, dividend);
+    DeoptimizeIf(zero, instr->environment());
+  }
+
+  __ FlooringDiv(dividend, Abs(divisor));
+  __ movl(rax, dividend);
+  __ shrl(rax, Immediate(31));
+  __ addl(rdx, rax);
+  if (divisor < 0) __ neg(rdx);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ movl(rax, rdx);
+    __ imull(rax, rax, Immediate(divisor));
+    __ subl(rax, dividend);
+    DeoptimizeIf(not_equal, instr->environment());
+  }
+}
+
+
 void LCodeGen::DoDivI(LDivI* instr) {
   Register dividend = ToRegister(instr->left());
   Register divisor = ToRegister(instr->right());
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 27001ab..74390e6 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -1261,6 +1261,26 @@
 }
 
 
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(rax);
+  LOperand* temp2 = FixedTemp(rdx);
+  LInstruction* result =
+      DefineFixed(
+          new(zone()) LDivByConstI(dividend, divisor, temp1, temp2), rdx);
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32);
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1276,7 +1296,13 @@
 
 LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    return instr->RightIsPowerOf2() ? DoDivByPowerOf2I(instr) : DoDivI(instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::DIV, instr);
   } else {
@@ -1298,13 +1324,23 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  LOperand* dividend = UseRegisterAtStart(instr->left());
+  ASSERT(instr->representation().IsInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
-  LOperand* temp = TempRegister();
+  LOperand* temp1 = FixedTemp(rax);
+  LOperand* temp2 = FixedTemp(rdx);
   LInstruction* result =
-      DefineAsRegister(
-          new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
-  bool can_deopt = divisor <= 0;
+      DefineFixed(new(zone()) LFlooringDivByConstI(dividend,
+                                                   divisor,
+                                                   temp1,
+                                                   temp2),
+                  rdx);
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->RangeCanInclude(0) && divisor < 0);
   return can_deopt ? AssignEnvironment(result) : result;
 }
 
@@ -1335,6 +1371,25 @@
 }
 
 
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  ASSERT(instr->representation().IsSmiOrInteger32());
+  ASSERT(instr->left()->representation().Equals(instr->representation()));
+  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp1 = FixedTemp(rax);
+  LOperand* temp2 = FixedTemp(rdx);
+  LInstruction* result =
+      DefineFixed(
+          new(zone()) LModByConstI(dividend, divisor, temp1, temp2), rax);
+  bool can_deopt =
+      divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) &&
+       instr->left()->CanBeNegative());
+  return can_deopt ? AssignEnvironment(result) : result;
+}
+
+
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
   ASSERT(instr->representation().IsSmiOrInteger32());
   ASSERT(instr->left()->representation().Equals(instr->representation()));
@@ -1357,7 +1412,13 @@
 
 LInstruction* LChunkBuilder::DoMod(HMod* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    return  instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::MOD, instr);
   } else {
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index c6a6850..4df350d 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -86,6 +86,7 @@
   V(DebugBreak)                                 \
   V(DeclareGlobals)                             \
   V(Deoptimize)                                 \
+  V(DivByConstI)                                \
   V(DivByPowerOf2I)                             \
   V(DivI)                                       \
   V(DoubleBits)                                 \
@@ -137,6 +138,7 @@
   V(MathPowHalf)                                \
   V(MathRound)                                  \
   V(MathSqrt)                                   \
+  V(ModByConstI)                                \
   V(ModByPowerOf2I)                             \
   V(ModI)                                       \
   V(MulI)                                       \
@@ -638,6 +640,31 @@
 };
 
 
+class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LModByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LModI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -673,6 +700,31 @@
 };
 
 
+class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LDivByConstI(LOperand* dividend,
+               int32_t divisor,
+               LOperand* temp1,
+               LOperand* temp2) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
 class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* left, LOperand* right, LOperand* temp) {
@@ -711,17 +763,22 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
-  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+  LFlooringDivByConstI(LOperand* dividend,
+                       int32_t divisor,
+                       LOperand* temp1,
+                       LOperand* temp2) {
     inputs_[0] = dividend;
     divisor_ = divisor;
-    temps_[0] = temp;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   LOperand* dividend() { return inputs_[0]; }
   int32_t divisor() const { return divisor_; }
-  LOperand* temp() { return temps_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
   DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
@@ -2634,8 +2691,10 @@
   LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
   LInstruction* DoMathClz32(HUnaryMathOperation* instr);
   LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
   LInstruction* DoDivI(HBinaryOperation* instr);
   LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
   LInstruction* DoModI(HMod* instr);
   LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
   LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 95f1724..3191199 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -4979,6 +4979,18 @@
 }
 
 
+void MacroAssembler::FlooringDiv(Register dividend, int32_t divisor) {
+  ASSERT(!dividend.is(rax));
+  ASSERT(!dividend.is(rdx));
+  MultiplierAndShift ms(divisor);
+  movl(rax, Immediate(ms.multiplier()));
+  imull(dividend);
+  if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
+  if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
+  if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+}
+
+
 } }  // namespace v8::internal
 
 #endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 63cf9c5..c72f0db 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1363,6 +1363,10 @@
                                   Register filler);
 
 
+  // Emit code for a flooring division by a constant. The dividend register is
+  // unchanged, the result is in rdx, and rax gets clobbered.
+  void FlooringDiv(Register dividend, int32_t divisor);
+
   // ---------------------------------------------------------------------------
   // StatsCounter support