Merge V8 5.3.332.45.  DO NOT MERGE

Test: Manual

FPIIM-449

Change-Id: Id3254828b068abdea3cb10442e0172a8c9a98e03
(cherry picked from commit 13e2dadd00298019ed862f2b2fc5068bba730bcf)
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 1ccc3a6..9633a63 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -57,7 +57,7 @@
     answer |= 1u << ARMv8;
     // ARMv8 always features VFP and NEON.
     answer |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
-    answer |= 1u << SUDIV | 1u << MLS;
+    answer |= 1u << SUDIV;
   }
 #endif  // CAN_USE_ARMV8_INSTRUCTIONS
 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
@@ -93,7 +93,7 @@
     supported_ |= 1u << ARMv8;
     // ARMv8 always features VFP and NEON.
     supported_ |= 1u << ARMv7 | 1u << VFP3 | 1u << NEON | 1u << VFP32DREGS;
-    supported_ |= 1u << SUDIV | 1u << MLS;
+    supported_ |= 1u << SUDIV;
     if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
   }
   if (FLAG_enable_armv7) {
@@ -104,7 +104,6 @@
     if (FLAG_enable_movw_movt) supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
     if (FLAG_enable_32dregs) supported_ |= 1u << VFP32DREGS;
   }
-  if (FLAG_enable_mls) supported_ |= 1u << MLS;
   if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
 
 #else  // __arm__
@@ -119,7 +118,6 @@
 
   if (FLAG_enable_neon && cpu.has_neon()) supported_ |= 1u << NEON;
   if (FLAG_enable_sudiv && cpu.has_idiva()) supported_ |= 1u << SUDIV;
-  if (FLAG_enable_mls && cpu.has_thumb2()) supported_ |= 1u << MLS;
 
   if (cpu.architecture() >= 7) {
     if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
@@ -203,12 +201,11 @@
 
 void CpuFeatures::PrintFeatures() {
   printf(
-      "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d MLS=%d"
+      "ARMv8=%d ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d "
       "UNALIGNED_ACCESSES=%d MOVW_MOVT_IMMEDIATE_LOADS=%d",
       CpuFeatures::IsSupported(ARMv8), CpuFeatures::IsSupported(ARMv7),
       CpuFeatures::IsSupported(VFP3), CpuFeatures::IsSupported(VFP32DREGS),
       CpuFeatures::IsSupported(NEON), CpuFeatures::IsSupported(SUDIV),
-      CpuFeatures::IsSupported(MLS),
       CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
       CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
 #ifdef __arm__
@@ -252,31 +249,20 @@
   return reinterpret_cast<uint32_t>(Assembler::target_address_at(pc_, host_));
 }
 
-void RelocInfo::update_wasm_memory_reference(
-    Address old_base, Address new_base, uint32_t old_size, uint32_t new_size,
-    ICacheFlushMode icache_flush_mode) {
-  DCHECK(IsWasmMemoryReference(rmode_) || IsWasmMemorySizeReference(rmode_));
-  if (IsWasmMemoryReference(rmode_)) {
-    Address updated_memory_reference;
-    DCHECK(old_base <= wasm_memory_reference() &&
-           wasm_memory_reference() < old_base + old_size);
-    updated_memory_reference = new_base + (wasm_memory_reference() - old_base);
-    DCHECK(new_base <= updated_memory_reference &&
-           updated_memory_reference < new_base + new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, updated_memory_reference, icache_flush_mode);
-  } else if (IsWasmMemorySizeReference(rmode_)) {
-    uint32_t updated_size_reference;
-    DCHECK(wasm_memory_size_reference() <= old_size);
-    updated_size_reference =
-        new_size + (wasm_memory_size_reference() - old_size);
-    DCHECK(updated_size_reference <= new_size);
-    Assembler::set_target_address_at(
-        isolate_, pc_, host_, reinterpret_cast<Address>(updated_size_reference),
-        icache_flush_mode);
-  } else {
-    UNREACHABLE();
-  }
+Address RelocInfo::wasm_global_reference() {
+  DCHECK(IsWasmGlobalReference(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_reference(
+    Address address, ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
+}
+
+void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
+                                                  ICacheFlushMode flush_mode) {
+  Assembler::set_target_address_at(isolate_, pc_, host_,
+                                   reinterpret_cast<Address>(size), flush_mode);
 }
 
 // -----------------------------------------------------------------------------
@@ -486,17 +472,16 @@
     al | B26 | NegOffset | Register::kCode_fp * B16;
 const Instr kLdrStrInstrTypeMask = 0xffff0000;
 
-
 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
     : AssemblerBase(isolate, buffer, buffer_size),
       recorded_ast_id_(TypeFeedbackId::None()),
-      pending_32_bit_constants_(&pending_32_bit_constants_buffer_[0]),
-      pending_64_bit_constants_(&pending_64_bit_constants_buffer_[0]),
+      pending_32_bit_constants_(),
+      pending_64_bit_constants_(),
       constant_pool_builder_(kLdrMaxReachBits, kVldrMaxReachBits),
       positions_recorder_(this) {
+  pending_32_bit_constants_.reserve(kMinNumPendingConstants);
+  pending_64_bit_constants_.reserve(kMinNumPendingConstants);
   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
-  num_pending_32_bit_constants_ = 0;
-  num_pending_64_bit_constants_ = 0;
   next_buffer_check_ = 0;
   const_pool_blocked_nesting_ = 0;
   no_const_pool_before_ = 0;
@@ -509,12 +494,6 @@
 
 Assembler::~Assembler() {
   DCHECK(const_pool_blocked_nesting_ == 0);
-  if (pending_32_bit_constants_ != &pending_32_bit_constants_buffer_[0]) {
-    delete[] pending_32_bit_constants_;
-  }
-  if (pending_64_bit_constants_ != &pending_64_bit_constants_buffer_[0]) {
-    delete[] pending_64_bit_constants_;
-  }
 }
 
 
@@ -527,8 +506,8 @@
     constant_pool_offset = EmitEmbeddedConstantPool();
   } else {
     CheckConstPool(true, false);
-    DCHECK(num_pending_32_bit_constants_ == 0);
-    DCHECK(num_pending_64_bit_constants_ == 0);
+    DCHECK(pending_32_bit_constants_.empty());
+    DCHECK(pending_64_bit_constants_.empty());
   }
   // Set up code descriptor.
   desc->buffer = buffer_;
@@ -538,6 +517,8 @@
   desc->constant_pool_size =
       (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
   desc->origin = this;
+  desc->unwinding_info_size = 0;
+  desc->unwinding_info = nullptr;
 }
 
 
@@ -851,6 +832,19 @@
     // Load the position of the label relative to the generated code object
     // pointer in a register.
 
+    // The existing code must be a single 24-bit label chain link, followed by
+    // nops encoding the destination register. See mov_label_offset.
+
+    // Extract the destination register from the first nop instructions.
+    Register dst =
+        Register::from_code(Instruction::RmValue(instr_at(pos + kInstrSize)));
+    // In addition to the 24-bit label chain link, we expect to find one nop for
+    // ARMv7 and above, or two nops for ARMv6. See mov_label_offset.
+    DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
+    if (!CpuFeatures::IsSupported(ARMv7)) {
+      DCHECK(IsNop(instr_at(pos + 2 * kInstrSize), dst.code()));
+    }
+
     // Here are the instructions we need to emit:
     //   For ARMv7: target24 => target16_1:target16_0
     //      movw dst, #target16_0
@@ -860,10 +854,6 @@
     //      orr dst, dst, #target8_1 << 8
     //      orr dst, dst, #target8_2 << 16
 
-    // We extract the destination register from the emitted nop instruction.
-    Register dst = Register::from_code(
-        Instruction::RmValue(instr_at(pos + kInstrSize)));
-    DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
     uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
     DCHECK(is_uint24(target24));
     if (is_uint8(target24)) {
@@ -1390,7 +1380,6 @@
 
 
 void Assembler::bl(int branch_offset, Condition cond) {
-  positions_recorder()->WriteRecordedPositions();
   DCHECK((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
   CHECK(is_int24(imm24));
@@ -1399,7 +1388,6 @@
 
 
 void Assembler::blx(int branch_offset) {  // v5 and above
-  positions_recorder()->WriteRecordedPositions();
   DCHECK((branch_offset & 1) == 0);
   int h = ((branch_offset & 2) >> 1)*B24;
   int imm24 = branch_offset >> 2;
@@ -1409,14 +1397,12 @@
 
 
 void Assembler::blx(Register target, Condition cond) {  // v5 and above
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(!target.is(pc));
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
 }
 
 
 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
-  positions_recorder()->WriteRecordedPositions();
   DCHECK(!target.is(pc));  // use of pc is actually allowed, but discouraged
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
 }
@@ -1524,9 +1510,6 @@
 
 
 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
-  if (dst.is(pc)) {
-    positions_recorder()->WriteRecordedPositions();
-  }
   // Don't allow nop instructions in the form mov rn, rn to be generated using
   // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
   // or MarkCode(int/NopMarkerTypes) pseudo instructions.
@@ -1609,7 +1592,7 @@
 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
                     Condition cond) {
   DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
-  DCHECK(IsEnabled(MLS));
+  DCHECK(IsEnabled(ARMv7));
   emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -2015,9 +1998,6 @@
 
 // Load/Store instructions.
 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
-  if (dst.is(pc)) {
-    positions_recorder()->WriteRecordedPositions();
-  }
   addrmod2(cond | B26 | L, dst, src);
 }
 
@@ -2076,6 +2056,53 @@
   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
+// Load/Store exclusive instructions.
+void Assembler::ldrex(Register dst, Register src, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.75.
+  // cond(31-28) | 00011001(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+  emit(cond | B24 | B23 | B20 | src.code() * B16 | dst.code() * B12 | 0xf9f);
+}
+
+void Assembler::strex(Register src1, Register src2, Register dst,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.212.
+  // cond(31-28) | 00011000(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+  // Rt(3-0)
+  emit(cond | B24 | B23 | dst.code() * B16 | src1.code() * B12 | 0xf9 * B4 |
+       src2.code());
+}
+
+void Assembler::ldrexb(Register dst, Register src, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.76.
+  // cond(31-28) | 00011101(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+  emit(cond | B24 | B23 | B22 | B20 | src.code() * B16 | dst.code() * B12 |
+       0xf9f);
+}
+
+void Assembler::strexb(Register src1, Register src2, Register dst,
+                       Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.213.
+  // cond(31-28) | 00011100(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+  // Rt(3-0)
+  emit(cond | B24 | B23 | B22 | dst.code() * B16 | src1.code() * B12 |
+       0xf9 * B4 | src2.code());
+}
+
+void Assembler::ldrexh(Register dst, Register src, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.78.
+  // cond(31-28) | 00011111(27-20) | Rn(19-16) | Rt(15-12) | 111110011111(11-0)
+  emit(cond | B24 | B23 | B22 | B21 | B20 | src.code() * B16 |
+       dst.code() * B12 | 0xf9f);
+}
+
+void Assembler::strexh(Register src1, Register src2, Register dst,
+                       Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.215.
+  // cond(31-28) | 00011110(27-20) | Rn(19-16) | Rd(15-12) | 11111001(11-4) |
+  // Rt(3-0)
+  emit(cond | B24 | B23 | B22 | B21 | dst.code() * B16 | src1.code() * B12 |
+       0xf9 * B4 | src2.code());
+}
 
 // Preload instructions.
 void Assembler::pld(const MemOperand& address) {
@@ -3827,8 +3854,8 @@
 void Assembler::db(uint8_t data) {
   // db is used to write raw data. The constant pool should be emitted or
   // blocked before using db.
-  DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
-  DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+  DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+  DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
   CheckBuffer();
   *reinterpret_cast<uint8_t*>(pc_) = data;
   pc_ += sizeof(uint8_t);
@@ -3838,8 +3865,8 @@
 void Assembler::dd(uint32_t data) {
   // dd is used to write raw data. The constant pool should be emitted or
   // blocked before using dd.
-  DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
-  DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+  DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+  DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
   CheckBuffer();
   *reinterpret_cast<uint32_t*>(pc_) = data;
   pc_ += sizeof(uint32_t);
@@ -3849,8 +3876,8 @@
 void Assembler::dq(uint64_t value) {
   // dq is used to write raw data. The constant pool should be emitted or
   // blocked before using dq.
-  DCHECK(is_const_pool_blocked() || (num_pending_32_bit_constants_ == 0));
-  DCHECK(is_const_pool_blocked() || (num_pending_64_bit_constants_ == 0));
+  DCHECK(is_const_pool_blocked() || pending_32_bit_constants_.empty());
+  DCHECK(is_const_pool_blocked() || pending_64_bit_constants_.empty());
   CheckBuffer();
   *reinterpret_cast<uint64_t*>(pc_) = value;
   pc_ += sizeof(uint64_t);
@@ -3893,21 +3920,12 @@
   if (FLAG_enable_embedded_constant_pool) {
     return constant_pool_builder_.AddEntry(position, value, sharing_ok);
   } else {
-    DCHECK(num_pending_32_bit_constants_ < kMaxNumPending32Constants);
-    if (num_pending_32_bit_constants_ == 0) {
+    DCHECK(pending_32_bit_constants_.size() < kMaxNumPending32Constants);
+    if (pending_32_bit_constants_.empty()) {
       first_const_pool_32_use_ = position;
-    } else if (num_pending_32_bit_constants_ == kMinNumPendingConstants &&
-               pending_32_bit_constants_ ==
-                   &pending_32_bit_constants_buffer_[0]) {
-      // Inline buffer is full, switch to dynamically allocated buffer.
-      pending_32_bit_constants_ =
-          new ConstantPoolEntry[kMaxNumPending32Constants];
-      std::copy(&pending_32_bit_constants_buffer_[0],
-                &pending_32_bit_constants_buffer_[kMinNumPendingConstants],
-                &pending_32_bit_constants_[0]);
     }
     ConstantPoolEntry entry(position, value, sharing_ok);
-    pending_32_bit_constants_[num_pending_32_bit_constants_++] = entry;
+    pending_32_bit_constants_.push_back(entry);
 
     // Make sure the constant pool is not emitted in place of the next
     // instruction for which we just recorded relocation info.
@@ -3922,21 +3940,12 @@
   if (FLAG_enable_embedded_constant_pool) {
     return constant_pool_builder_.AddEntry(position, value);
   } else {
-    DCHECK(num_pending_64_bit_constants_ < kMaxNumPending64Constants);
-    if (num_pending_64_bit_constants_ == 0) {
+    DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
+    if (pending_64_bit_constants_.empty()) {
       first_const_pool_64_use_ = position;
-    } else if (num_pending_64_bit_constants_ == kMinNumPendingConstants &&
-               pending_64_bit_constants_ ==
-                   &pending_64_bit_constants_buffer_[0]) {
-      // Inline buffer is full, switch to dynamically allocated buffer.
-      pending_64_bit_constants_ =
-          new ConstantPoolEntry[kMaxNumPending64Constants];
-      std::copy(&pending_64_bit_constants_buffer_[0],
-                &pending_64_bit_constants_buffer_[kMinNumPendingConstants],
-                &pending_64_bit_constants_[0]);
     }
     ConstantPoolEntry entry(position, value);
-    pending_64_bit_constants_[num_pending_64_bit_constants_++] = entry;
+    pending_64_bit_constants_.push_back(entry);
 
     // Make sure the constant pool is not emitted in place of the next
     // instruction for which we just recorded relocation info.
@@ -3949,8 +3958,8 @@
 void Assembler::BlockConstPoolFor(int instructions) {
   if (FLAG_enable_embedded_constant_pool) {
     // Should be a no-op if using an embedded constant pool.
-    DCHECK(num_pending_32_bit_constants_ == 0);
-    DCHECK(num_pending_64_bit_constants_ == 0);
+    DCHECK(pending_32_bit_constants_.empty());
+    DCHECK(pending_64_bit_constants_.empty());
     return;
   }
 
@@ -3959,11 +3968,11 @@
     // Max pool start (if we need a jump and an alignment).
 #ifdef DEBUG
     int start = pc_limit + kInstrSize + 2 * kPointerSize;
-    DCHECK((num_pending_32_bit_constants_ == 0) ||
+    DCHECK(pending_32_bit_constants_.empty() ||
            (start - first_const_pool_32_use_ +
-                num_pending_64_bit_constants_ * kDoubleSize <
+                pending_64_bit_constants_.size() * kDoubleSize <
             kMaxDistToIntPool));
-    DCHECK((num_pending_64_bit_constants_ == 0) ||
+    DCHECK(pending_64_bit_constants_.empty() ||
            (start - first_const_pool_64_use_ < kMaxDistToFPPool));
 #endif
     no_const_pool_before_ = pc_limit;
@@ -3978,8 +3987,8 @@
 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
   if (FLAG_enable_embedded_constant_pool) {
     // Should be a no-op if using an embedded constant pool.
-    DCHECK(num_pending_32_bit_constants_ == 0);
-    DCHECK(num_pending_64_bit_constants_ == 0);
+    DCHECK(pending_32_bit_constants_.empty());
+    DCHECK(pending_64_bit_constants_.empty());
     return;
   }
 
@@ -3993,8 +4002,7 @@
   }
 
   // There is nothing to do if there are no pending constant pool entries.
-  if ((num_pending_32_bit_constants_ == 0) &&
-      (num_pending_64_bit_constants_ == 0)) {
+  if (pending_32_bit_constants_.empty() && pending_64_bit_constants_.empty()) {
     // Calculate the offset of the next check.
     next_buffer_check_ = pc_offset() + kCheckPoolInterval;
     return;
@@ -4006,9 +4014,9 @@
   int jump_instr = require_jump ? kInstrSize : 0;
   int size_up_to_marker = jump_instr + kInstrSize;
   int estimated_size_after_marker =
-      num_pending_32_bit_constants_ * kPointerSize;
-  bool has_int_values = (num_pending_32_bit_constants_ > 0);
-  bool has_fp_values = (num_pending_64_bit_constants_ > 0);
+      pending_32_bit_constants_.size() * kPointerSize;
+  bool has_int_values = !pending_32_bit_constants_.empty();
+  bool has_fp_values = !pending_64_bit_constants_.empty();
   bool require_64_bit_align = false;
   if (has_fp_values) {
     require_64_bit_align =
@@ -4017,7 +4025,8 @@
     if (require_64_bit_align) {
       estimated_size_after_marker += kInstrSize;
     }
-    estimated_size_after_marker += num_pending_64_bit_constants_ * kDoubleSize;
+    estimated_size_after_marker +=
+        pending_64_bit_constants_.size() * kDoubleSize;
   }
   int estimated_size = size_up_to_marker + estimated_size_after_marker;
 
@@ -4036,7 +4045,7 @@
       // The 64-bit constants are always emitted before the 32-bit constants, so
       // we can ignore the effect of the 32-bit constants on estimated_size.
       int dist64 = pc_offset() + estimated_size -
-                   num_pending_32_bit_constants_ * kPointerSize -
+                   pending_32_bit_constants_.size() * kPointerSize -
                    first_const_pool_64_use_;
       if ((dist64 >= kMaxDistToFPPool - kCheckPoolInterval) ||
           (!require_jump && (dist64 >= kMaxDistToFPPool / 2))) {
@@ -4055,7 +4064,7 @@
 
   // Deduplicate constants.
   int size_after_marker = estimated_size_after_marker;
-  for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+  for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
     ConstantPoolEntry& entry = pending_64_bit_constants_[i];
     DCHECK(!entry.is_merged());
     for (int j = 0; j < i; j++) {
@@ -4068,7 +4077,7 @@
     }
   }
 
-  for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+  for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
     ConstantPoolEntry& entry = pending_32_bit_constants_[i];
     DCHECK(!entry.is_merged());
     if (!entry.sharing_ok()) continue;
@@ -4113,7 +4122,7 @@
 
     // Emit 64-bit constant pool entries first: their range is smaller than
     // 32-bit entries.
-    for (int i = 0; i < num_pending_64_bit_constants_; i++) {
+    for (int i = 0; i < pending_64_bit_constants_.size(); i++) {
       ConstantPoolEntry& entry = pending_64_bit_constants_[i];
 
       Instr instr = instr_at(entry.position());
@@ -4142,7 +4151,7 @@
     }
 
     // Emit 32-bit constant pool entries.
-    for (int i = 0; i < num_pending_32_bit_constants_; i++) {
+    for (int i = 0; i < pending_32_bit_constants_.size(); i++) {
       ConstantPoolEntry& entry = pending_32_bit_constants_[i];
       Instr instr = instr_at(entry.position());
 
@@ -4176,8 +4185,8 @@
       }
     }
 
-    num_pending_32_bit_constants_ = 0;
-    num_pending_64_bit_constants_ = 0;
+    pending_32_bit_constants_.clear();
+    pending_64_bit_constants_.clear();
     first_const_pool_32_use_ = -1;
     first_const_pool_64_use_ = -1;
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 26e062b..461d5b0 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -118,8 +118,6 @@
     Register r = {code};
     return r;
   }
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kNumRegisters; }
   bool is(Register reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -147,9 +145,22 @@
 #undef DECLARE_REGISTER
 const Register no_reg = {Register::kCode_no_reg};
 
+static const bool kSimpleFPAliasing = false;
+
 // Single word VFP register.
 struct SwVfpRegister {
+  enum Code {
+#define REGISTER_CODE(R) kCode_##R,
+    FLOAT_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+        kAfterLast,
+    kCode_no_reg = -1
+  };
+
+  static const int kMaxNumRegisters = Code::kAfterLast;
+
   static const int kSizeInBytes = 4;
+
   bool is_valid() const { return 0 <= reg_code && reg_code < 32; }
   bool is(SwVfpRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -195,8 +206,6 @@
   //  d15: scratch register.
   static const int kSizeInBytes = 8;
 
-  const char* ToString();
-  bool IsAllocatable() const;
   bool is_valid() const { return 0 <= reg_code && reg_code < kMaxNumRegisters; }
   bool is(DwVfpRegister reg) const { return reg_code == reg.reg_code; }
   int code() const {
@@ -986,6 +995,14 @@
             Register src2,
             const MemOperand& dst, Condition cond = al);
 
+  // Load/Store exclusive instructions
+  void ldrex(Register dst, Register src, Condition cond = al);
+  void strex(Register src1, Register src2, Register dst, Condition cond = al);
+  void ldrexb(Register dst, Register src, Condition cond = al);
+  void strexb(Register src1, Register src2, Register dst, Condition cond = al);
+  void ldrexh(Register dst, Register src, Condition cond = al);
+  void strexh(Register src1, Register src2, Register dst, Condition cond = al);
+
   // Preload instructions
   void pld(const MemOperand& address);
 
@@ -1312,6 +1329,10 @@
     vstm(db_w, sp, src, src, cond);
   }
 
+  void vpush(SwVfpRegister src, Condition cond = al) {
+    vstm(db_w, sp, src, src, cond);
+  }
+
   void vpop(DwVfpRegister dst, Condition cond = al) {
     vldm(ia_w, sp, dst, dst, cond);
   }
@@ -1545,10 +1566,10 @@
       // Max pool start (if we need a jump and an alignment).
       int start = pc_offset() + kInstrSize + 2 * kPointerSize;
       // Check the constant pool hasn't been blocked for too long.
-      DCHECK((num_pending_32_bit_constants_ == 0) ||
-             (start + num_pending_64_bit_constants_ * kDoubleSize <
+      DCHECK(pending_32_bit_constants_.empty() ||
+             (start + pending_64_bit_constants_.size() * kDoubleSize <
               (first_const_pool_32_use_ + kMaxDistToIntPool)));
-      DCHECK((num_pending_64_bit_constants_ == 0) ||
+      DCHECK(pending_64_bit_constants_.empty() ||
              (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
 #endif
       // Two cases:
@@ -1615,14 +1636,8 @@
   // pending relocation entry per instruction.
 
   // The buffers of pending constant pool entries.
-  ConstantPoolEntry pending_32_bit_constants_buffer_[kMinNumPendingConstants];
-  ConstantPoolEntry pending_64_bit_constants_buffer_[kMinNumPendingConstants];
-  ConstantPoolEntry* pending_32_bit_constants_;
-  ConstantPoolEntry* pending_64_bit_constants_;
-  // Number of pending constant pool entries in the 32 bits buffer.
-  int num_pending_32_bit_constants_;
-  // Number of pending constant pool entries in the 64 bits buffer.
-  int num_pending_64_bit_constants_;
+  std::vector<ConstantPoolEntry> pending_32_bit_constants_;
+  std::vector<ConstantPoolEntry> pending_64_bit_constants_;
 
   ConstantPoolBuilder constant_pool_builder_;
 
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 031b483..365bc1e 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -16,10 +16,7 @@
 
 #define __ ACCESS_MASM(masm)
 
-
-void Builtins::Generate_Adaptor(MacroAssembler* masm,
-                                CFunctionId id,
-                                BuiltinExtraArguments extra_args) {
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
   // ----------- S t a t e -------------
   //  -- r0                 : number of arguments excluding receiver
   //  -- r1                 : target
@@ -38,23 +35,8 @@
   __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
   // Insert extra arguments.
-  int num_extra_args = 0;
-  switch (extra_args) {
-    case BuiltinExtraArguments::kTarget:
-      __ Push(r1);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kNewTarget:
-      __ Push(r3);
-      ++num_extra_args;
-      break;
-    case BuiltinExtraArguments::kTargetAndNewTarget:
-      __ Push(r1, r3);
-      num_extra_args += 2;
-      break;
-    case BuiltinExtraArguments::kNone:
-      break;
-  }
+  const int num_extra_args = 2;
+  __ Push(r1, r3);
 
   // JumpToExternalReference expects r0 to contain the number of arguments
   // including the receiver and the extra arguments.
@@ -140,6 +122,8 @@
 void Builtins::Generate_MathMaxMin(MacroAssembler* masm, MathMaxMinKind kind) {
   // ----------- S t a t e -------------
   //  -- r0                 : number of arguments
+  //  -- r1                 : function
+  //  -- cp                 : context
   //  -- lr                 : return address
   //  -- sp[(argc - n) * 8] : arg[n] (zero-based)
   //  -- sp[(argc + 1) * 8] : receiver
@@ -152,9 +136,9 @@
   DoubleRegister const reg = (kind == MathMaxMinKind::kMin) ? d2 : d1;
 
   // Load the accumulator with the default return value (either -Infinity or
-  // +Infinity), with the tagged value in r1 and the double value in d1.
-  __ LoadRoot(r1, root_index);
-  __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+  // +Infinity), with the tagged value in r5 and the double value in d1.
+  __ LoadRoot(r5, root_index);
+  __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
 
   // Remember how many slots to drop (including the receiver).
   __ add(r4, r0, Operand(1));
@@ -170,33 +154,36 @@
     __ ldr(r2, MemOperand(sp, r0, LSL, kPointerSizeLog2));
 
     // Load the double value of the parameter into d2, maybe converting the
-    // parameter to a number first using the ToNumberStub if necessary.
+    // parameter to a number first using the ToNumber builtin if necessary.
     Label convert, convert_smi, convert_number, done_convert;
     __ bind(&convert);
     __ JumpIfSmi(r2, &convert_smi);
     __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
     __ JumpIfRoot(r3, Heap::kHeapNumberMapRootIndex, &convert_number);
     {
-      // Parameter is not a Number, use the ToNumberStub to convert it.
-      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      // Parameter is not a Number, use the ToNumber builtin to convert it.
+      DCHECK(!FLAG_enable_embedded_constant_pool);
+      FrameScope scope(masm, StackFrame::MANUAL);
+      __ Push(lr, fp, cp, r1);
+      __ add(fp, sp, Operand(2 * kPointerSize));
       __ SmiTag(r0);
       __ SmiTag(r4);
-      __ Push(r0, r1, r4);
+      __ Push(r0, r4, r5);
       __ mov(r0, r2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ mov(r2, r0);
-      __ Pop(r0, r1, r4);
+      __ Pop(r0, r4, r5);
       {
         // Restore the double accumulator value (d1).
         Label done_restore;
-        __ SmiToDouble(d1, r1);
-        __ JumpIfSmi(r1, &done_restore);
-        __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+        __ SmiToDouble(d1, r5);
+        __ JumpIfSmi(r5, &done_restore);
+        __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
         __ bind(&done_restore);
       }
       __ SmiUntag(r4);
       __ SmiUntag(r0);
+      __ Pop(lr, fp, cp, r1);
     }
     __ b(&convert);
     __ bind(&convert_number);
@@ -222,18 +209,18 @@
     // Result is on the right hand side.
     __ bind(&compare_swap);
     __ vmov(d1, d2);
-    __ mov(r1, r2);
+    __ mov(r5, r2);
     __ b(&loop);
 
     // At least one side is NaN, which means that the result will be NaN too.
     __ bind(&compare_nan);
-    __ LoadRoot(r1, Heap::kNanValueRootIndex);
-    __ vldr(d1, FieldMemOperand(r1, HeapNumber::kValueOffset));
+    __ LoadRoot(r5, Heap::kNanValueRootIndex);
+    __ vldr(d1, FieldMemOperand(r5, HeapNumber::kValueOffset));
     __ b(&loop);
   }
 
   __ bind(&done_loop);
-  __ mov(r0, r1);
+  __ mov(r0, r5);
   __ Drop(r4);
   __ Ret();
 }
@@ -259,8 +246,7 @@
   }
 
   // 2a. Convert the first argument to a number.
-  ToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
+  __ Jump(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
 
   // 2b. No arguments, return +0.
   __ bind(&no_arguments);
@@ -308,8 +294,7 @@
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
       __ Push(r1, r3);
       __ Move(r0, r2);
-      ToNumberStub stub(masm->isolate());
-      __ CallStub(&stub);
+      __ Call(masm->isolate()->builtins()->ToNumber(), RelocInfo::CODE_TARGET);
       __ Move(r2, r0);
       __ Pop(r1, r3);
     }
@@ -708,8 +693,8 @@
   __ AssertGeneratorObject(r1);
 
   // Store input value into generator object.
-  __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOffset));
-  __ RecordWriteField(r1, JSGeneratorObject::kInputOffset, r0, r3,
+  __ str(r0, FieldMemOperand(r1, JSGeneratorObject::kInputOrDebugPosOffset));
+  __ RecordWriteField(r1, JSGeneratorObject::kInputOrDebugPosOffset, r0, r3,
                       kLRHasNotBeenSaved, kDontSaveFPRegs);
 
   // Store resume mode into generator object.
@@ -720,21 +705,24 @@
   __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
 
   // Flood function if we are stepping.
-  Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(masm->isolate());
-  __ mov(ip, Operand(step_in_enabled));
-  __ ldrb(ip, MemOperand(ip));
-  __ cmp(ip, Operand(0));
-  __ b(eq, &skip_flooding);
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ Push(r1, r2, r4);
-    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
-    __ Pop(r1, r2);
-    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
-  }
-  __ bind(&skip_flooding);
+  Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+  Label stepping_prepared;
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(masm->isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  __ mov(ip, Operand(last_step_action));
+  __ ldrsb(ip, MemOperand(ip));
+  __ cmp(ip, Operand(StepIn));
+  __ b(ge, &prepare_step_in_if_stepping);
+
+  // Flood function if we need to continue stepping in the suspended generator.
+  ExternalReference debug_suspended_generator =
+      ExternalReference::debug_suspended_generator_address(masm->isolate());
+  __ mov(ip, Operand(debug_suspended_generator));
+  __ ldr(ip, MemOperand(ip));
+  __ cmp(ip, Operand(r1));
+  __ b(eq, &prepare_step_in_suspended_generator);
+  __ bind(&stepping_prepared);
 
   // Push receiver.
   __ ldr(ip, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
@@ -830,6 +818,26 @@
     __ Move(r0, r1);  // Continuation expects generator object in r0.
     __ Jump(r3);
   }
+
+  __ bind(&prepare_step_in_if_stepping);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r2, r4);
+    __ CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
+    __ Pop(r1, r2);
+    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
+
+  __ bind(&prepare_step_in_suspended_generator);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r1, r2);
+    __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+    __ Pop(r1, r2);
+    __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+  }
+  __ b(&stepping_prepared);
 }
 
 void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
@@ -959,6 +967,22 @@
   Generate_JSEntryTrampolineHelper(masm, true);
 }
 
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch) {
+  Register args_count = scratch;
+
+  // Get the arguments + receiver count.
+  __ ldr(args_count,
+         MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+  __ ldr(args_count,
+         FieldMemOperand(args_count, BytecodeArray::kParameterSizeOffset));
+
+  // Leave the frame (also dropping the register file).
+  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
+
+  // Drop receiver + arguments.
+  __ add(sp, sp, args_count, LeaveCC);
+}
+
 // Generate code for entering a JS function with the interpreter.
 // On entry to the function the receiver and arguments have been pushed on the
 // stack left to right.  The actual argument count matches the formal parameter
@@ -1062,15 +1086,7 @@
   masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
 
   // The return value is in r0.
-
-  // Get the arguments + reciever count.
-  __ ldr(r2, MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
-  __ ldr(r2, FieldMemOperand(r2, BytecodeArray::kParameterSizeOffset));
-
-  // Leave the frame (also dropping the register file).
-  __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-
-  __ add(sp, sp, r2, LeaveCC);
+  LeaveInterpreterFrame(masm, r2);
   __ Jump(lr);
 
   // If the bytecode array is no longer present, then the underlying function
@@ -1086,6 +1102,31 @@
   __ Jump(r4);
 }
 
+void Builtins::Generate_InterpreterMarkBaselineOnReturn(MacroAssembler* masm) {
+  // Save the function and context for call to CompileBaseline.
+  __ ldr(r1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+  __ ldr(kContextRegister,
+         MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  // Leave the frame before recompiling for baseline so that we don't count as
+  // an activation on the stack.
+  LeaveInterpreterFrame(masm, r2);
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    // Push return value.
+    __ push(r0);
+
+    // Push function as argument and compile for baseline.
+    __ push(r1);
+    __ CallRuntime(Runtime::kCompileBaseline);
+
+    // Restore return value.
+    __ pop(r0);
+  }
+  __ Jump(lr);
+}
+
 static void Generate_InterpreterPushArgs(MacroAssembler* masm, Register index,
                                          Register limit, Register scratch) {
   Label loop_header, loop_check;
@@ -1242,13 +1283,29 @@
   const int bailout_id = BailoutId::None().ToInt();
   __ cmp(temp, Operand(Smi::FromInt(bailout_id)));
   __ b(ne, &loop_bottom);
+
   // Literals available?
+  Label got_literals, maybe_cleared_weakcell;
   __ ldr(temp, FieldMemOperand(array_pointer,
                                SharedFunctionInfo::kOffsetToPreviousLiterals));
+  // temp contains either a WeakCell pointing to the literals array or the
+  // literals array directly.
+  STATIC_ASSERT(WeakCell::kValueOffset == FixedArray::kLengthOffset);
+  __ ldr(r4, FieldMemOperand(temp, WeakCell::kValueOffset));
+  __ JumpIfSmi(r4, &maybe_cleared_weakcell);
+  // r4 is a pointer, therefore temp is a WeakCell pointing to a literals array.
   __ ldr(temp, FieldMemOperand(temp, WeakCell::kValueOffset));
-  __ JumpIfSmi(temp, &gotta_call_runtime);
+  __ jmp(&got_literals);
+
+  // r4 is a smi. If it's 0, then we are looking at a cleared WeakCell
+  // around the literals array, and we should visit the runtime. If it's > 0,
+  // then temp already contains the literals array.
+  __ bind(&maybe_cleared_weakcell);
+  __ cmp(r4, Operand(Smi::FromInt(0)));
+  __ b(eq, &gotta_call_runtime);
 
   // Save the literals in the closure.
+  __ bind(&got_literals);
   __ ldr(r4, MemOperand(sp, 0));
   __ str(temp, FieldMemOperand(r4, JSFunction::kLiteralsOffset));
   __ push(index);
@@ -1659,6 +1716,9 @@
 void Builtins::Generate_DatePrototype_GetField(MacroAssembler* masm,
                                                int field_index) {
   // ----------- S t a t e -------------
+  //  -- r0    : number of arguments
+  //  -- r1    : function
+  //  -- cp    : context
   //  -- lr    : return address
   //  -- sp[0] : receiver
   // -----------------------------------
@@ -1668,7 +1728,7 @@
   {
     __ Pop(r0);
     __ JumpIfSmi(r0, &receiver_not_date);
-    __ CompareObjectType(r0, r1, r2, JS_DATE_TYPE);
+    __ CompareObjectType(r0, r2, r3, JS_DATE_TYPE);
     __ b(ne, &receiver_not_date);
   }
 
@@ -1698,7 +1758,14 @@
 
   // 3. Raise a TypeError if the receiver is not a date.
   __ bind(&receiver_not_date);
-  __ TailCallRuntime(Runtime::kThrowNotDateError);
+  {
+    FrameScope scope(masm, StackFrame::MANUAL);
+    __ Push(r0, lr, fp);
+    __ Move(fp, sp);
+    __ Push(cp, r1);
+    __ Push(Smi::FromInt(0));
+    __ CallRuntime(Runtime::kThrowNotDateError);
+  }
 }
 
 // static
@@ -2618,6 +2685,73 @@
   __ TailCallRuntime(Runtime::kAllocateInTargetSpace);
 }
 
+// static
+void Builtins::Generate_StringToNumber(MacroAssembler* masm) {
+  // The StringToNumber stub takes one argument in r0.
+  __ AssertString(r0);
+
+  // Check if string has a cached array index.
+  Label runtime;
+  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
+  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
+  __ b(ne, &runtime);
+  __ IndexFromHash(r2, r0);
+  __ Ret();
+
+  __ bind(&runtime);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(r0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kStringToNumber);
+  }
+  __ Ret();
+}
+
+void Builtins::Generate_ToNumber(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in r0.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ tst(r0, Operand(kSmiTagMask));
+  __ Ret(eq);
+
+  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
+  // r0: receiver
+  // r1: receiver instance type
+  __ Ret(eq);
+
+  __ Jump(masm->isolate()->builtins()->NonNumberToNumber(),
+          RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_NonNumberToNumber(MacroAssembler* masm) {
+  // The NonNumberToNumber stub takes one argument in r0.
+  __ AssertNotNumber(r0);
+
+  __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
+  // r0: receiver
+  // r1: receiver instance type
+  __ Jump(masm->isolate()->builtins()->StringToNumber(), RelocInfo::CODE_TARGET,
+          lo);
+
+  Label not_oddball;
+  __ cmp(r1, Operand(ODDBALL_TYPE));
+  __ b(ne, &not_oddball);
+  __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+  {
+    FrameScope frame(masm, StackFrame::INTERNAL);
+    // Push argument.
+    __ Push(r0);
+    // We cannot use a tail call here because this builtin can also be called
+    // from wasm.
+    __ CallRuntime(Runtime::kToNumber);
+  }
+  __ Ret();
+}
+
 void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
   // ----------- S t a t e -------------
   //  -- r0 : actual number of arguments
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 0224f9d..0ef31d7 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -22,70 +22,28 @@
 namespace v8 {
 namespace internal {
 
+#define __ ACCESS_MASM(masm)
 
-static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
+void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
+  __ lsl(r5, r0, Operand(kPointerSizeLog2));
+  __ str(r1, MemOperand(sp, r5));
+  __ Push(r1);
+  __ Push(r2);
+  __ add(r0, r0, Operand(3));
+  __ TailCallRuntime(Runtime::kNewArray);
 }
 
-
-static void InitializeInternalArrayConstructorDescriptor(
-    Isolate* isolate, CodeStubDescriptor* descriptor,
-    int constant_stack_parameter_count) {
-  Address deopt_handler = Runtime::FunctionForId(
-      Runtime::kInternalArrayConstructor)->entry;
-
-  if (constant_stack_parameter_count == 0) {
-    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  } else {
-    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
-                           JS_FUNCTION_STUB_MODE);
-  }
-}
-
-
-void ArraySingleArgumentConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
-}
-
-
-void ArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
 void FastArrayPushStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   Address deopt_handler = Runtime::FunctionForId(Runtime::kArrayPush)->entry;
   descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+void FastFunctionBindStub::InitializeDescriptor(
     CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+  Address deopt_handler = Runtime::FunctionForId(Runtime::kFunctionBind)->entry;
+  descriptor->Initialize(r0, deopt_handler, -1, JS_FUNCTION_STUB_MODE);
 }
 
-
-void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
-    CodeStubDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
-}
-
-
-#define __ ACCESS_MASM(masm)
-
 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
                                           Condition cond);
 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
@@ -942,7 +900,7 @@
   CEntryStub::GenerateAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
-  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CommonArrayConstructorStub::GenerateStubsAheadOfTime(isolate);
   CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
   CreateWeakCellStub::GenerateAheadOfTime(isolate);
   BinaryOpICStub::GenerateAheadOfTime(isolate);
@@ -1354,7 +1312,6 @@
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
                                           &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX,
                                           RECEIVER_IS_STRING);
   char_at_generator.GenerateFast(masm);
   __ Ret();
@@ -1798,6 +1755,7 @@
   // r2 : feedback vector
   // r3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
+  Label done_initialize_count, done_increment_count;
 
   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
@@ -1817,7 +1775,7 @@
   Register weak_value = r9;
   __ ldr(weak_value, FieldMemOperand(r5, WeakCell::kValueOffset));
   __ cmp(r1, weak_value);
-  __ b(eq, &done);
+  __ b(eq, &done_increment_count);
   __ CompareRoot(r5, Heap::kmegamorphic_symbolRootIndex);
   __ b(eq, &done);
   __ ldr(feedback_map, FieldMemOperand(r5, HeapObject::kMapOffset));
@@ -1840,7 +1798,7 @@
   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r5);
   __ cmp(r1, r5);
   __ b(ne, &megamorphic);
-  __ jmp(&done);
+  __ jmp(&done_increment_count);
 
   __ bind(&miss);
 
@@ -1869,11 +1827,28 @@
   // slot.
   CreateAllocationSiteStub create_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &create_stub);
-  __ b(&done);
+  __ b(&done_initialize_count);
 
   __ bind(&not_array_function);
   CreateWeakCellStub weak_cell_stub(masm->isolate());
   CallStubInRecordCallTarget(masm, &weak_cell_stub);
+
+  __ bind(&done_initialize_count);
+  // Initialize the call counter.
+  __ Move(r5, Operand(Smi::FromInt(1)));
+  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
+  __ b(&done);
+
+  __ bind(&done_increment_count);
+
+  // Increment the call count for monomorphic function calls.
+  __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ add(r5, r5, Operand(FixedArray::kHeaderSize + kPointerSize));
+  __ ldr(r4, FieldMemOperand(r5, 0));
+  __ add(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r5, 0));
+
   __ bind(&done);
 }
 
@@ -1935,7 +1910,7 @@
   __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
   __ ldr(r3, FieldMemOperand(r2, 0));
-  __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ add(r3, r3, Operand(Smi::FromInt(1)));
   __ str(r3, FieldMemOperand(r2, 0));
 
   __ mov(r2, r4);
@@ -1983,7 +1958,7 @@
   __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3));
   __ add(r2, r2, Operand(FixedArray::kHeaderSize + kPointerSize));
   __ ldr(r3, FieldMemOperand(r2, 0));
-  __ add(r3, r3, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ add(r3, r3, Operand(Smi::FromInt(1)));
   __ str(r3, FieldMemOperand(r2, 0));
 
   __ bind(&call_function);
@@ -2054,7 +2029,7 @@
   __ b(ne, &miss);
 
   // Initialize the call counter.
-  __ Move(r5, Operand(Smi::FromInt(CallICNexus::kCallCountIncrement)));
+  __ Move(r5, Operand(Smi::FromInt(1)));
   __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
   __ str(r5, FieldMemOperand(r4, FixedArray::kHeaderSize + kPointerSize));
 
@@ -2152,13 +2127,7 @@
     // index_ is consumed by runtime conversion function.
     __ Push(object_, index_);
   }
-  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
-    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero);
-  } else {
-    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
-    // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kNumberToSmi);
-  }
+  __ CallRuntime(Runtime::kNumberToSmi);
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
   __ Move(index_, r0);
@@ -2488,67 +2457,13 @@
   // r3: from index (untagged)
   __ SmiTag(r3, r3);
   StringCharAtGenerator generator(r0, r3, r2, r0, &runtime, &runtime, &runtime,
-                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+                                  RECEIVER_IS_STRING);
   generator.GenerateFast(masm);
   __ Drop(3);
   __ Ret();
   generator.SkipSlow(masm, &runtime);
 }
 
-
-void ToNumberStub::Generate(MacroAssembler* masm) {
-  // The ToNumber stub takes one argument in r0.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ tst(r0, Operand(kSmiTagMask));
-  __ Ret(eq);
-
-  __ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
-  // r0: receiver
-  // r1: receiver instance type
-  __ Ret(eq);
-
-  NonNumberToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub);
-}
-
-void NonNumberToNumberStub::Generate(MacroAssembler* masm) {
-  // The NonNumberToNumber stub takes one argument in r0.
-  __ AssertNotNumber(r0);
-
-  __ CompareObjectType(r0, r1, r1, FIRST_NONSTRING_TYPE);
-  // r0: receiver
-  // r1: receiver instance type
-  StringToNumberStub stub(masm->isolate());
-  __ TailCallStub(&stub, lo);
-
-  Label not_oddball;
-  __ cmp(r1, Operand(ODDBALL_TYPE));
-  __ b(ne, &not_oddball);
-  __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
-  __ Ret();
-  __ bind(&not_oddball);
-
-  __ Push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kToNumber);
-}
-
-void StringToNumberStub::Generate(MacroAssembler* masm) {
-  // The StringToNumber stub takes one argument in r0.
-  __ AssertString(r0);
-
-  // Check if string has a cached array index.
-  Label runtime;
-  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
-  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
-  __ b(ne, &runtime);
-  __ IndexFromHash(r2, r0);
-  __ Ret();
-
-  __ bind(&runtime);
-  __ Push(r0);  // Push argument.
-  __ TailCallRuntime(Runtime::kStringToNumber);
-}
-
 void ToStringStub::Generate(MacroAssembler* masm) {
   // The ToString stub takes one argument in r0.
   Label is_number;
@@ -2714,7 +2629,7 @@
   // Load r2 with the allocation site.  We stick an undefined dummy value here
   // and replace it with the real allocation site later when we instantiate this
   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
-  __ Move(r2, handle(isolate()->heap()->undefined_value()));
+  __ Move(r2, isolate()->factory()->undefined_value());
 
   // Make sure that we actually patched the allocation site.
   if (FLAG_debug_code) {
@@ -3559,14 +3474,14 @@
 
 void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  LoadICStub stub(isolate(), state());
+  LoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
 
 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister());
-  KeyedLoadICStub stub(isolate(), state());
+  KeyedLoadICStub stub(isolate());
   stub.GenerateForTrampoline(masm);
 }
 
@@ -4196,19 +4111,13 @@
   }
 }
 
-
-void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+void CommonArrayConstructorStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
       isolate);
   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
       isolate);
-  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
-      isolate);
-}
-
-
-void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
-    Isolate* isolate) {
+  ArrayNArgumentsConstructorStub stub(isolate);
+  stub.GetCode();
   ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
   for (int i = 0; i < 2; i++) {
     // For internal arrays we only need a few things
@@ -4216,8 +4125,6 @@
     stubh1.GetCode();
     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
     stubh2.GetCode();
-    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
-    stubh3.GetCode();
   }
 }
 
@@ -4237,13 +4144,15 @@
     CreateArrayDispatchOneArgument(masm, mode);
 
     __ bind(&not_one_case);
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
   } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
   } else if (argument_count() == MORE_THAN_ONE) {
-    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+    ArrayNArgumentsConstructorStub stub(masm->isolate());
+    __ TailCallStub(&stub);
   } else {
     UNREACHABLE();
   }
@@ -4325,7 +4234,7 @@
   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
   __ TailCallStub(&stub0, lo);
 
-  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  ArrayNArgumentsConstructorStub stubN(isolate());
   __ TailCallStub(&stubN, hi);
 
   if (IsFastPackedElementsKind(kind)) {
@@ -4547,10 +4456,10 @@
   // specified by the function's internal formal parameter count.
   Label rest_parameters;
   __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
-  __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
-  __ ldr(r1,
-         FieldMemOperand(r1, SharedFunctionInfo::kFormalParameterCountOffset));
-  __ sub(r0, r0, r1, SetCC);
+  __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+  __ ldr(r3,
+         FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ sub(r0, r0, r3, SetCC);
   __ b(gt, &rest_parameters);
 
   // Return an empty rest parameter array.
@@ -4597,15 +4506,16 @@
     // ----------- S t a t e -------------
     //  -- cp : context
     //  -- r0 : number of rest parameters (tagged)
+    //  -- r1 : function
     //  -- r2 : pointer to first rest parameters
     //  -- lr : return address
     // -----------------------------------
 
     // Allocate space for the rest parameter array plus the backing store.
     Label allocate, done_allocate;
-    __ mov(r1, Operand(JSArray::kSize + FixedArray::kHeaderSize));
-    __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
-    __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
+    __ mov(r6, Operand(JSArray::kSize + FixedArray::kHeaderSize));
+    __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
+    __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
     __ bind(&done_allocate);
 
     // Setup the elements array in r3.
@@ -4637,17 +4547,25 @@
     __ mov(r0, r4);
     __ Ret();
 
-    // Fall back to %AllocateInNewSpace.
+    // Fall back to %AllocateInNewSpace (if not too big).
+    Label too_big_for_new_space;
     __ bind(&allocate);
+    __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+    __ b(gt, &too_big_for_new_space);
     {
       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-      __ SmiTag(r1);
-      __ Push(r0, r2, r1);
+      __ SmiTag(r6);
+      __ Push(r0, r2, r6);
       __ CallRuntime(Runtime::kAllocateInNewSpace);
       __ mov(r3, r0);
       __ Pop(r0, r2);
     }
     __ jmp(&done_allocate);
+
+    // Fall back to %NewRestParameter.
+    __ bind(&too_big_for_new_space);
+    __ push(r1);
+    __ TailCallRuntime(Runtime::kNewRestParameter);
   }
 }
 
@@ -4906,9 +4824,9 @@
   __ cmp(ip, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   __ b(eq, &arguments_adaptor);
   {
-    __ ldr(r1, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
     __ ldr(r0, FieldMemOperand(
-                   r1, SharedFunctionInfo::kFormalParameterCountOffset));
+                   r4, SharedFunctionInfo::kFormalParameterCountOffset));
     __ add(r2, r2, Operand(r0, LSL, kPointerSizeLog2 - 1));
     __ add(r2, r2,
            Operand(StandardFrameConstants::kCallerSPOffset - 1 * kPointerSize));
@@ -4926,15 +4844,16 @@
   // ----------- S t a t e -------------
   //  -- cp : context
   //  -- r0 : number of rest parameters (tagged)
+  //  -- r1 : function
   //  -- r2 : pointer to first rest parameters
   //  -- lr : return address
   // -----------------------------------
 
   // Allocate space for the strict arguments object plus the backing store.
   Label allocate, done_allocate;
-  __ mov(r1, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
-  __ add(r1, r1, Operand(r0, LSL, kPointerSizeLog2 - 1));
-  __ Allocate(r1, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
+  __ mov(r6, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize));
+  __ add(r6, r6, Operand(r0, LSL, kPointerSizeLog2 - 1));
+  __ Allocate(r6, r3, r4, r5, &allocate, NO_ALLOCATION_FLAGS);
   __ bind(&done_allocate);
 
   // Setup the elements array in r3.
@@ -4966,44 +4885,25 @@
   __ mov(r0, r4);
   __ Ret();
 
-  // Fall back to %AllocateInNewSpace.
+  // Fall back to %AllocateInNewSpace (if not too big).
+  Label too_big_for_new_space;
   __ bind(&allocate);
+  __ cmp(r6, Operand(Page::kMaxRegularHeapObjectSize));
+  __ b(gt, &too_big_for_new_space);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ SmiTag(r1);
-    __ Push(r0, r2, r1);
+    __ SmiTag(r6);
+    __ Push(r0, r2, r6);
     __ CallRuntime(Runtime::kAllocateInNewSpace);
     __ mov(r3, r0);
     __ Pop(r0, r2);
   }
   __ b(&done_allocate);
-}
 
-
-void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) {
-  Register context = cp;
-  Register result = r0;
-  Register slot = r2;
-
-  // Go up the context chain to the script context.
-  for (int i = 0; i < depth(); ++i) {
-    __ ldr(result, ContextMemOperand(context, Context::PREVIOUS_INDEX));
-    context = result;
-  }
-
-  // Load the PropertyCell value at the specified slot.
-  __ add(result, context, Operand(slot, LSL, kPointerSizeLog2));
-  __ ldr(result, ContextMemOperand(result));
-  __ ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
-
-  // If the result is not the_hole, return. Otherwise, handle in the runtime.
-  __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  __ Ret(ne);
-
-  // Fallback to runtime.
-  __ SmiTag(slot);
-  __ push(slot);
-  __ TailCallRuntime(Runtime::kLoadGlobalViaContext);
+  // Fall back to %NewStrictArguments.
+  __ bind(&too_big_for_new_space);
+  __ push(r1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments);
 }
 
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 4014aba..a7b38ff 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -16,68 +16,6 @@
 
 #define __ masm.
 
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = nullptr;
-double fast_exp_simulator(double x, Isolate* isolate) {
-  return Simulator::current(isolate)
-      ->CallFPReturnsDouble(fast_exp_arm_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
-  size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
-  if (buffer == nullptr) return nullptr;
-  ExternalReference::InitializeMathExpData();
-
-  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
-                      CodeObjectRequired::kNo);
-
-  {
-    DwVfpRegister input = d0;
-    DwVfpRegister result = d1;
-    DwVfpRegister double_scratch1 = d2;
-    DwVfpRegister double_scratch2 = d3;
-    Register temp1 = r4;
-    Register temp2 = r5;
-    Register temp3 = r6;
-
-    if (masm.use_eabi_hardfloat()) {
-      // Input value is in d0 anyway, nothing to do.
-    } else {
-      __ vmov(input, r0, r1);
-    }
-    __ Push(temp3, temp2, temp1);
-    MathExpGenerator::EmitMathExp(
-        &masm, input, result, double_scratch1, double_scratch2,
-        temp1, temp2, temp3);
-    __ Pop(temp3, temp2, temp1);
-    if (masm.use_eabi_hardfloat()) {
-      __ vmov(d0, result);
-    } else {
-      __ vmov(r0, r1, result);
-    }
-    __ Ret();
-  }
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  DCHECK(!RelocInfo::RequiresRelocation(desc));
-
-  Assembler::FlushICache(isolate, buffer, actual_size);
-  base::OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
-  return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
-#else
-  fast_exp_arm_machine_code = buffer;
-  return &fast_exp_simulator;
-#endif
-}
-
 #if defined(V8_HOST_ARCH_ARM)
 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
                                                 MemCopyUint8Function stub) {
@@ -794,94 +732,6 @@
   __ bind(&done);
 }
 
-
-static MemOperand ExpConstant(int index, Register base) {
-  return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
-                                   DwVfpRegister input,
-                                   DwVfpRegister result,
-                                   DwVfpRegister double_scratch1,
-                                   DwVfpRegister double_scratch2,
-                                   Register temp1,
-                                   Register temp2,
-                                   Register temp3) {
-  DCHECK(!input.is(result));
-  DCHECK(!input.is(double_scratch1));
-  DCHECK(!input.is(double_scratch2));
-  DCHECK(!result.is(double_scratch1));
-  DCHECK(!result.is(double_scratch2));
-  DCHECK(!double_scratch1.is(double_scratch2));
-  DCHECK(!temp1.is(temp2));
-  DCHECK(!temp1.is(temp3));
-  DCHECK(!temp2.is(temp3));
-  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
-  DCHECK(!masm->serializer_enabled());  // External references not serializable.
-
-  Label zero, infinity, done;
-
-  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
-  __ vldr(double_scratch1, ExpConstant(0, temp3));
-  __ VFPCompareAndSetFlags(double_scratch1, input);
-  __ b(ge, &zero);
-
-  __ vldr(double_scratch2, ExpConstant(1, temp3));
-  __ VFPCompareAndSetFlags(input, double_scratch2);
-  __ b(ge, &infinity);
-
-  __ vldr(double_scratch1, ExpConstant(3, temp3));
-  __ vldr(result, ExpConstant(4, temp3));
-  __ vmul(double_scratch1, double_scratch1, input);
-  __ vadd(double_scratch1, double_scratch1, result);
-  __ VmovLow(temp2, double_scratch1);
-  __ vsub(double_scratch1, double_scratch1, result);
-  __ vldr(result, ExpConstant(6, temp3));
-  __ vldr(double_scratch2, ExpConstant(5, temp3));
-  __ vmul(double_scratch1, double_scratch1, double_scratch2);
-  __ vsub(double_scratch1, double_scratch1, input);
-  __ vsub(result, result, double_scratch1);
-  __ vmul(double_scratch2, double_scratch1, double_scratch1);
-  __ vmul(result, result, double_scratch2);
-  __ vldr(double_scratch2, ExpConstant(7, temp3));
-  __ vmul(result, result, double_scratch2);
-  __ vsub(result, result, double_scratch1);
-  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
-  DCHECK(*reinterpret_cast<double*>
-         (ExternalReference::math_exp_constants(8).address()) == 1);
-  __ vmov(double_scratch2, 1);
-  __ vadd(result, result, double_scratch2);
-  __ mov(temp1, Operand(temp2, LSR, 11));
-  __ Ubfx(temp2, temp2, 0, 11);
-  __ add(temp1, temp1, Operand(0x3ff));
-
-  // Must not call ExpConstant() after overwriting temp3!
-  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
-  __ add(temp3, temp3, Operand(temp2, LSL, 3));
-  __ ldm(ia, temp3, temp2.bit() | temp3.bit());
-  // The first word is loaded is the lower number register.
-  if (temp2.code() < temp3.code()) {
-    __ orr(temp1, temp3, Operand(temp1, LSL, 20));
-    __ vmov(double_scratch1, temp2, temp1);
-  } else {
-    __ orr(temp1, temp2, Operand(temp1, LSL, 20));
-    __ vmov(double_scratch1, temp3, temp1);
-  }
-  __ vmul(result, result, double_scratch1);
-  __ b(&done);
-
-  __ bind(&zero);
-  __ vmov(result, kDoubleRegZero);
-  __ b(&done);
-
-  __ bind(&infinity);
-  __ vldr(result, ExpConstant(2, temp3));
-
-  __ bind(&done);
-}
-
 #undef __
 
 #ifdef DEBUG
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 880825a..0086739 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -28,22 +28,6 @@
 };
 
 
-class MathExpGenerator : public AllStatic {
- public:
-  // Register input isn't modified. All other registers are clobbered.
-  static void EmitMathExp(MacroAssembler* masm,
-                          DwVfpRegister input,
-                          DwVfpRegister result,
-                          DwVfpRegister double_scratch1,
-                          DwVfpRegister double_scratch2,
-                          Register temp1,
-                          Register temp2,
-                          Register temp3);
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
 }  // namespace internal
 }  // namespace v8
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 2785b75..c569e66 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -66,15 +66,12 @@
     Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
     // We need calls to have a predictable size in the unoptimized code, but
     // this is optimized code, so we don't have to have a predictable size.
-    int call_size_in_bytes =
-        MacroAssembler::CallSizeNotPredictableCodeSize(isolate,
-                                                       deopt_entry,
-                                                       RelocInfo::NONE32);
+    int call_size_in_bytes = MacroAssembler::CallDeoptimizerSize();
     int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
     DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
     DCHECK(call_size_in_bytes <= patch_size());
     CodePatcher patcher(isolate, call_address, call_size_in_words);
-    patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
+    patcher.masm()->CallDeoptimizer(deopt_entry);
     DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
     DCHECK(call_address + patch_size() <= code->instruction_end());
@@ -189,8 +186,7 @@
   // Copy VFP registers to
   // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
   int double_regs_offset = FrameDescription::double_registers_offset();
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
     int code = config->GetAllocatableDoubleCode(i);
     int dst_offset = code * kDoubleSize + double_regs_offset;
@@ -307,15 +303,50 @@
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
   // Create a sequence of deoptimization entries.
   // Note that registers are still live when jumping to an entry.
-  Label done;
-  for (int i = 0; i < count(); i++) {
-    int start = masm()->pc_offset();
-    USE(start);
-    __ mov(ip, Operand(i));
-    __ b(&done);
-    DCHECK(masm()->pc_offset() - start == table_entry_size_);
+
+  // We need to be able to generate immediates up to kMaxNumberOfEntries. On
+  // ARMv7, we can use movw (with a maximum immediate of 0xffff). On ARMv6, we
+  // need two instructions.
+  STATIC_ASSERT((kMaxNumberOfEntries - 1) <= 0xffff);
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatureScope scope(masm(), ARMv7);
+    Label done;
+    for (int i = 0; i < count(); i++) {
+      int start = masm()->pc_offset();
+      USE(start);
+      __ movw(ip, i);
+      __ b(&done);
+      DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
+    }
+    __ bind(&done);
+  } else {
+    // We want to keep table_entry_size_ == 8 (since this is the common case),
+    // but we need two instructions to load most immediates over 0xff. To handle
+    // this, we set the low byte in the main table, and then set the high byte
+    // in a separate table if necessary.
+    Label high_fixes[256];
+    int high_fix_max = (count() - 1) >> 8;
+    DCHECK_GT(arraysize(high_fixes), high_fix_max);
+    for (int i = 0; i < count(); i++) {
+      int start = masm()->pc_offset();
+      USE(start);
+      __ mov(ip, Operand(i & 0xff));  // Set the low byte.
+      __ b(&high_fixes[i >> 8]);      // Jump to the secondary table.
+      DCHECK_EQ(table_entry_size_, masm()->pc_offset() - start);
+    }
+    // Generate the secondary table, to set the high byte.
+    for (int high = 1; high <= high_fix_max; high++) {
+      __ bind(&high_fixes[high]);
+      __ orr(ip, ip, Operand(high << 8));
+      // If this isn't the last entry, emit a branch to the end of the table.
+      // The last entry can just fall through.
+      if (high < high_fix_max) __ b(&high_fixes[0]);
+    }
+    // Bind high_fixes[0] last, for indices like 0x00**. This case requires no
+    // fix-up, so for (common) small tables we can jump here, then just fall
+    // through with no additional branch.
+    __ bind(&high_fixes[0]);
   }
-  __ bind(&done);
   __ push(ip);
 }
 
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 20a898e..1bb33fa 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -40,6 +40,7 @@
 namespace v8 {
 namespace internal {
 
+const auto GetRegConfig = RegisterConfiguration::Crankshaft;
 
 //------------------------------------------------------------------------------
 
@@ -755,7 +756,45 @@
           Format(instr, "'um'al'cond's 'rd, 'rn, 'rm, 'rs");
         }
       } else {
-        Unknown(instr);  // not used by V8
+        if (instr->Bits(24, 23) == 3) {
+          if (instr->Bit(20) == 1) {
+            // ldrex
+            switch (instr->Bits(22, 21)) {
+              case 0:
+                Format(instr, "ldrex'cond 'rt, ['rn]");
+                break;
+              case 2:
+                Format(instr, "ldrexb'cond 'rt, ['rn]");
+                break;
+              case 3:
+                Format(instr, "ldrexh'cond 'rt, ['rn]");
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          } else {
+            // strex
+            // The instruction is documented as strex rd, rt, [rn], but the
+            // "rt" register is using the rm bits.
+            switch (instr->Bits(22, 21)) {
+              case 0:
+                Format(instr, "strex'cond 'rd, 'rm, ['rn]");
+                break;
+              case 2:
+                Format(instr, "strexb'cond 'rd, 'rm, ['rn]");
+                break;
+              case 3:
+                Format(instr, "strexh'cond 'rd, 'rm, ['rn]");
+                break;
+              default:
+                UNREACHABLE();
+                break;
+            }
+          }
+        } else {
+          Unknown(instr);  // not used by V8
+        }
       }
     } else if ((instr->Bit(20) == 0) && ((instr->Bits(7, 4) & 0xd) == 0xd)) {
       // ldrd, strd
@@ -2010,7 +2049,7 @@
 
 
 const char* NameConverter::NameOfAddress(byte* addr) const {
-  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  v8::internal::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
   return tmp_buffer_.start();
 }
 
@@ -2021,7 +2060,7 @@
 
 
 const char* NameConverter::NameOfCPURegister(int reg) const {
-  return v8::internal::Register::from_code(reg).ToString();
+  return v8::internal::GetRegConfig()->GetGeneralRegisterName(reg);
 }
 
 
@@ -2073,9 +2112,8 @@
     buffer[0] = '\0';
     byte* prev_pc = pc;
     pc += d.InstructionDecode(buffer, pc);
-    v8::internal::PrintF(
-        f, "%p    %08x      %s\n",
-        prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+    v8::internal::PrintF(f, "%p    %08x      %s\n", static_cast<void*>(prev_pc),
+                         *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
   }
 }
 
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
index 4e8c95c..fa0c040 100644
--- a/src/arm/interface-descriptors-arm.cc
+++ b/src/arm/interface-descriptors-arm.cc
@@ -13,6 +13,14 @@
 
 const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
 
+void CallInterfaceDescriptor::DefaultInitializePlatformSpecific(
+    CallInterfaceDescriptorData* data, int register_parameter_count) {
+  const Register default_stub_registers[] = {r0, r1, r2, r3, r4};
+  CHECK_LE(static_cast<size_t>(register_parameter_count),
+           arraysize(default_stub_registers));
+  data->InitializePlatformSpecific(register_parameter_count,
+                                   default_stub_registers);
+}
 
 const Register LoadDescriptor::ReceiverRegister() { return r1; }
 const Register LoadDescriptor::NameRegister() { return r2; }
@@ -41,9 +49,6 @@
 const Register StoreTransitionDescriptor::MapRegister() { return r3; }
 
 
-const Register LoadGlobalViaContextDescriptor::SlotRegister() { return r2; }
-
-
 const Register StoreGlobalViaContextDescriptor::SlotRegister() { return r2; }
 const Register StoreGlobalViaContextDescriptor::ValueRegister() { return r0; }
 
@@ -65,8 +70,6 @@
 const Register GrowArrayElementsDescriptor::ObjectRegister() { return r0; }
 const Register GrowArrayElementsDescriptor::KeyRegister() { return r3; }
 
-const Register HasPropertyDescriptor::ObjectRegister() { return r0; }
-const Register HasPropertyDescriptor::KeyRegister() { return r3; }
 
 void FastNewClosureDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
@@ -255,18 +258,17 @@
   data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
+void ArraySingleArgumentConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // register state
   // r0 -- number of arguments
   // r1 -- function
   // r2 -- allocation site with elements kind
-  Register registers[] = {r1, r2};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
+  Register registers[] = {r1, r2, r0};
+  data->InitializePlatformSpecific(arraysize(registers), registers, NULL);
 }
 
-
-void ArrayConstructorDescriptor::InitializePlatformSpecific(
+void ArrayNArgumentsConstructorDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (constructor pointer, and single argument)
   Register registers[] = {r1, r2, r0};
@@ -274,24 +276,7 @@
 }
 
 
-void InternalArrayConstructorConstantArgCountDescriptor::
-    InitializePlatformSpecific(CallInterfaceDescriptorData* data) {
-  // register state
-  // r0 -- number of arguments
-  // r1 -- constructor function
-  Register registers[] = {r1};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-
-void InternalArrayConstructorDescriptor::InitializePlatformSpecific(
-    CallInterfaceDescriptorData* data) {
-  // stack param count needs (constructor pointer, and single argument)
-  Register registers[] = {r1, r0};
-  data->InitializePlatformSpecific(arraysize(registers), registers);
-}
-
-void FastArrayPushDescriptor::InitializePlatformSpecific(
+void VarArgFunctionDescriptor::InitializePlatformSpecific(
     CallInterfaceDescriptorData* data) {
   // stack param count needs (arg count)
   Register registers[] = {r0};
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index d723251..4feadb7 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -89,17 +89,6 @@
 }
 
 
-int MacroAssembler::CallSizeNotPredictableCodeSize(Isolate* isolate,
-                                                   Address target,
-                                                   RelocInfo::Mode rmode,
-                                                   Condition cond) {
-  Instr mov_instr = cond | MOV | LeaveCC;
-  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
-  return kInstrSize +
-         mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
-}
-
-
 void MacroAssembler::Call(Address target,
                           RelocInfo::Mode rmode,
                           Condition cond,
@@ -131,12 +120,6 @@
   //  blx   ip
   //                      @ return address
 
-  // Statement positions are expected to be recorded when the target
-  // address is loaded. The mov method will automatically record
-  // positions when pc is the target, since this is not the case here
-  // we have to do it explicitly.
-  positions_recorder()->WriteRecordedPositions();
-
   mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
   blx(ip, cond);
 
@@ -173,6 +156,40 @@
   Call(reinterpret_cast<Address>(code.location()), rmode, cond, mode);
 }
 
+void MacroAssembler::CallDeoptimizer(Address target) {
+  BlockConstPoolScope block_const_pool(this);
+
+  uintptr_t target_raw = reinterpret_cast<uintptr_t>(target);
+
+  // We use blx, like a call, but it does not return here. The link register is
+  // used by the deoptimizer to work out what called it.
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatureScope scope(this, ARMv7);
+    movw(ip, target_raw & 0xffff);
+    movt(ip, (target_raw >> 16) & 0xffff);
+    blx(ip);
+  } else {
+    // We need to load a literal, but we can't use the usual constant pool
+    // because we call this from a patcher, and cannot afford the guard
+    // instruction and other administrative overhead.
+    ldr(ip, MemOperand(pc, (2 * kInstrSize) - kPcLoadDelta));
+    blx(ip);
+    dd(target_raw);
+  }
+}
+
+int MacroAssembler::CallDeoptimizerSize() {
+  // ARMv7+:
+  //    movw    ip, ...
+  //    movt    ip, ...
+  //    blx     ip              @ This never returns.
+  //
+  // ARMv6:
+  //    ldr     ip, =address
+  //    blx     ip              @ This never returns.
+  //    .word   address
+  return 3 * kInstrSize;
+}
 
 void MacroAssembler::Ret(Condition cond) {
   bx(lr, cond);
@@ -245,6 +262,11 @@
   }
 }
 
+void MacroAssembler::Move(SwVfpRegister dst, SwVfpRegister src) {
+  if (!dst.is(src)) {
+    vmov(dst, src);
+  }
+}
 
 void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
   if (!dst.is(src)) {
@@ -252,11 +274,10 @@
   }
 }
 
-
 void MacroAssembler::Mls(Register dst, Register src1, Register src2,
                          Register srcA, Condition cond) {
-  if (CpuFeatures::IsSupported(MLS)) {
-    CpuFeatureScope scope(this, MLS);
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    CpuFeatureScope scope(this, ARMv7);
     mls(dst, src1, src2, srcA, cond);
   } else {
     DCHECK(!srcA.is(ip));
@@ -841,8 +862,7 @@
   // Number of d-regs not known at snapshot time.
   DCHECK(!serializer_enabled());
   // General purpose registers are pushed last on the stack.
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   int doubles_size = config->num_allocatable_double_registers() * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
   return MemOperand(sp, doubles_size + register_offset);
@@ -1237,9 +1257,8 @@
 
 void MacroAssembler::EmitLoadTypeFeedbackVector(Register vector) {
   ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
-  ldr(vector, FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
-  ldr(vector,
-      FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+  ldr(vector, FieldMemOperand(vector, JSFunction::kLiteralsOffset));
+  ldr(vector, FieldMemOperand(vector, LiteralsArray::kFeedbackVectorOffset));
 }
 
 
@@ -1553,12 +1572,13 @@
                                              const ParameterCount& expected,
                                              const ParameterCount& actual) {
   Label skip_flooding;
-  ExternalReference step_in_enabled =
-      ExternalReference::debug_step_in_enabled_address(isolate());
-  mov(r4, Operand(step_in_enabled));
-  ldrb(r4, MemOperand(r4));
-  cmp(r4, Operand(0));
-  b(eq, &skip_flooding);
+  ExternalReference last_step_action =
+      ExternalReference::debug_last_step_action_address(isolate());
+  STATIC_ASSERT(StepFrame > StepIn);
+  mov(r4, Operand(last_step_action));
+  ldrsb(r4, MemOperand(r4));
+  cmp(r4, Operand(StepIn));
+  b(lt, &skip_flooding);
   {
     FrameScope frame(this,
                      has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
@@ -3870,8 +3890,7 @@
   if (reg5.is_valid()) regs |= reg5.bit();
   if (reg6.is_valid()) regs |= reg6.bit();
 
-  const RegisterConfiguration* config =
-      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  const RegisterConfiguration* config = RegisterConfiguration::Crankshaft();
   for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
     int code = config->GetAllocatableGeneralCode(i);
     Register candidate = Register::from_code(code);
@@ -3969,6 +3988,10 @@
     Assembler::FlushICache(masm_.isolate(), address_, size_);
   }
 
+  // Check that we don't have any pending constant pools.
+  DCHECK(masm_.pending_32_bit_constants_.empty());
+  DCHECK(masm_.pending_64_bit_constants_.empty());
+
   // Check that the code was patched as expected.
   DCHECK(masm_.pc_ == address_ + size_);
   DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 8fa197c..16dcd47 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -101,10 +101,6 @@
   int CallStubSize(CodeStub* stub,
                    TypeFeedbackId ast_id = TypeFeedbackId::None(),
                    Condition cond = al);
-  static int CallSizeNotPredictableCodeSize(Isolate* isolate,
-                                            Address target,
-                                            RelocInfo::Mode rmode,
-                                            Condition cond = al);
 
   // Jump, Call, and Ret pseudo instructions implementing inter-working.
   void Jump(Register target, Condition cond = al);
@@ -114,17 +110,19 @@
   void Call(Address target, RelocInfo::Mode rmode,
             Condition cond = al,
             TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
+  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            TypeFeedbackId ast_id = TypeFeedbackId::None(), Condition cond = al,
+            TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
   int CallSize(Handle<Code> code,
                RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
                TypeFeedbackId ast_id = TypeFeedbackId::None(),
                Condition cond = al);
-  void Call(Handle<Code> code,
-            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
-            TypeFeedbackId ast_id = TypeFeedbackId::None(),
-            Condition cond = al,
-            TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
   void Ret(Condition cond = al);
 
+  // Used for patching in calls to the deoptimizer.
+  void CallDeoptimizer(Address target);
+  static int CallDeoptimizerSize();
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the sp register.
   void Drop(int count, Condition cond = al);
@@ -172,6 +170,7 @@
       mov(dst, src, sbit, cond);
     }
   }
+  void Move(SwVfpRegister dst, SwVfpRegister src);
   void Move(DwVfpRegister dst, DwVfpRegister src);
 
   void Load(Register dst, const MemOperand& src, Representation r);
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 1a870c5..afe31db 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -299,8 +299,11 @@
           if (strcmp(arg1, "all") == 0) {
             for (int i = 0; i < kNumRegisters; i++) {
               value = GetRegisterValue(i);
-              PrintF("%3s: 0x%08x %10d", Register::from_code(i).ToString(),
-                     value, value);
+              PrintF(
+                  "%3s: 0x%08x %10d",
+                  RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
+                      i),
+                  value, value);
               if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
                   i < 8 &&
                   (i % 2) == 0) {
@@ -633,9 +636,7 @@
   last_debugger_input_ = input;
 }
 
-
-void Simulator::FlushICache(v8::internal::HashMap* i_cache,
-                            void* start_addr,
+void Simulator::FlushICache(base::HashMap* i_cache, void* start_addr,
                             size_t size) {
   intptr_t start = reinterpret_cast<intptr_t>(start_addr);
   int intra_line = (start & CachePage::kLineMask);
@@ -656,10 +657,8 @@
   }
 }
 
-
-CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
-  v8::internal::HashMap::Entry* entry =
-      i_cache->LookupOrInsert(page, ICacheHash(page));
+CachePage* Simulator::GetCachePage(base::HashMap* i_cache, void* page) {
+  base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
   if (entry->value == NULL) {
     CachePage* new_page = new CachePage();
     entry->value = new_page;
@@ -669,9 +668,7 @@
 
 
 // Flush from start up to and not including start + size.
-void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
-                             intptr_t start,
-                             int size) {
+void Simulator::FlushOnePage(base::HashMap* i_cache, intptr_t start, int size) {
   DCHECK(size <= CachePage::kPageSize);
   DCHECK(AllOnOnePage(start, size - 1));
   DCHECK((start & CachePage::kLineMask) == 0);
@@ -683,9 +680,7 @@
   memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
 }
 
-
-void Simulator::CheckICache(v8::internal::HashMap* i_cache,
-                            Instruction* instr) {
+void Simulator::CheckICache(base::HashMap* i_cache, Instruction* instr) {
   intptr_t address = reinterpret_cast<intptr_t>(instr);
   void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
   void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
@@ -718,7 +713,7 @@
 Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
   i_cache_ = isolate_->simulator_i_cache();
   if (i_cache_ == NULL) {
-    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    i_cache_ = new base::HashMap(&ICacheMatch);
     isolate_->set_simulator_i_cache(i_cache_);
   }
   Initialize(isolate);
@@ -850,10 +845,10 @@
 
 
 // static
-void Simulator::TearDown(HashMap* i_cache, Redirection* first) {
+void Simulator::TearDown(base::HashMap* i_cache, Redirection* first) {
   Redirection::DeleteChain(first);
   if (i_cache != nullptr) {
-    for (HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
+    for (base::HashMap::Entry* entry = i_cache->Start(); entry != nullptr;
          entry = i_cache->Next(entry)) {
       delete static_cast<CachePage*>(entry->value);
     }
@@ -1808,15 +1803,17 @@
           case ExternalReference::BUILTIN_FP_FP_CALL:
           case ExternalReference::BUILTIN_COMPARE_CALL:
             PrintF("Call to host function at %p with args %f, %f",
-                   FUNCTION_ADDR(generic_target), dval0, dval1);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   dval1);
             break;
           case ExternalReference::BUILTIN_FP_CALL:
             PrintF("Call to host function at %p with arg %f",
-                FUNCTION_ADDR(generic_target), dval0);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0);
             break;
           case ExternalReference::BUILTIN_FP_INT_CALL:
             PrintF("Call to host function at %p with args %f, %d",
-                   FUNCTION_ADDR(generic_target), dval0, ival);
+                   static_cast<void*>(FUNCTION_ADDR(generic_target)), dval0,
+                   ival);
             break;
           default:
             UNREACHABLE();
@@ -1942,7 +1939,8 @@
           PrintF(
               "Call to host triple returning runtime function %p "
               "args %08x, %08x, %08x, %08x, %08x",
-              FUNCTION_ADDR(target), arg1, arg2, arg3, arg4, arg5);
+              static_cast<void*>(FUNCTION_ADDR(target)), arg1, arg2, arg3, arg4,
+              arg5);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
@@ -1953,7 +1951,8 @@
         // pass it to the target function.
         ObjectTriple result = target(arg1, arg2, arg3, arg4, arg5);
         if (::v8::internal::FLAG_trace_sim) {
-          PrintF("Returned { %p, %p, %p }\n", result.x, result.y, result.z);
+          PrintF("Returned { %p, %p, %p }\n", static_cast<void*>(result.x),
+                 static_cast<void*>(result.y), static_cast<void*>(result.z));
         }
         // Return is passed back in address pointed to by hidden first argument.
         ObjectTriple* sim_result = reinterpret_cast<ObjectTriple*>(arg0);
@@ -1969,13 +1968,8 @@
           PrintF(
               "Call to host function at %p "
               "args %08x, %08x, %08x, %08x, %08x, %08x",
-              FUNCTION_ADDR(target),
-              arg0,
-              arg1,
-              arg2,
-              arg3,
-              arg4,
-              arg5);
+              static_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2, arg3,
+              arg4, arg5);
           if (!stack_aligned) {
             PrintF(" with unaligned stack %08x\n", get_register(sp));
           }
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index b3c8eb4..71b8e40 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -68,7 +68,7 @@
 
 #include "src/arm/constants-arm.h"
 #include "src/assembler.h"
-#include "src/hashmap.h"
+#include "src/base/hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -200,7 +200,7 @@
   // Call on program start.
   static void Initialize(Isolate* isolate);
 
-  static void TearDown(HashMap* i_cache, Redirection* first);
+  static void TearDown(base::HashMap* i_cache, Redirection* first);
 
   // V8 generally calls into generated JS code with 5 parameters and into
   // generated RegExp code with 7 parameters. This is a convenience function,
@@ -222,8 +222,7 @@
   char* last_debugger_input() { return last_debugger_input_; }
 
   // ICache checking.
-  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
-                          size_t size);
+  static void FlushICache(base::HashMap* i_cache, void* start, size_t size);
 
   // Returns true if pc register contains one of the 'special_values' defined
   // below (bad_lr, end_sim_pc).
@@ -342,10 +341,9 @@
   void InstructionDecode(Instruction* instr);
 
   // ICache.
-  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
-  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
-                           int size);
-  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+  static void CheckICache(base::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(base::HashMap* i_cache, intptr_t start, int size);
+  static CachePage* GetCachePage(base::HashMap* i_cache, void* page);
 
   // Runtime call support.
   static void* RedirectExternalReference(
@@ -405,7 +403,7 @@
   char* last_debugger_input_;
 
   // Icache simulation
-  v8::internal::HashMap* i_cache_;
+  base::HashMap* i_cache_;
 
   // Registered breakpoints.
   Instruction* break_pc_;