Merge V8 5.2.361.47  DO NOT MERGE

https://chromium.googlesource.com/v8/v8/+/5.2.361.47

FPIIM-449

Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 7cbbd3a..3dbfd6b 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -1192,14 +1192,199 @@
 // ------------Pseudo-instructions-------------
 
 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
-  lwr(rd, rs);
-  lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    lw(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset() + kMipsLwrOffset) &&
+        is_int16(rs.offset() + kMipsLwlOffset)) {
+      if (!rd.is(rs.rm())) {
+        lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+        lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+      } else {
+        lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
+        lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
+        mov(rd, at);
+      }
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+      lwr(rd, MemOperand(at, kMipsLwrOffset));
+      lwl(rd, MemOperand(at, kMipsLwlOffset));
+    }
+  }
 }
 
 
 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
-  swr(rd, rs);
-  swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    sw(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset() + kMipsSwrOffset) &&
+        is_int16(rs.offset() + kMipsSwlOffset)) {
+      swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
+      swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
+    } else {
+      LoadRegPlusOffsetToAt(rs);
+      swr(rd, MemOperand(at, kMipsSwrOffset));
+      swl(rd, MemOperand(at, kMipsSwlOffset));
+    }
+  }
+}
+
+void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    lh(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(at, rs);
+      lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+      lb(rd, rs);
+#endif
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lb(rd, MemOperand(at, 1));
+      lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lb(rd, MemOperand(at, 0));
+      lbu(at, MemOperand(at, 1));
+#endif
+    }
+    sll(rd, rd, 8);
+    or_(rd, rd, at);
+  }
+}
+
+void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    lhu(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(at, rs);
+      lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
+      lbu(rd, rs);
+#endif
+    } else {  // Offset > 16 bits, use multiple instructions to load.
+      LoadRegPlusOffsetToAt(rs);
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+      lbu(rd, MemOperand(at, 1));
+      lbu(at, MemOperand(at, 0));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+      lbu(rd, MemOperand(at, 0));
+      lbu(at, MemOperand(at, 1));
+#endif
+    }
+    sll(rd, rd, 8);
+    or_(rd, rd, at);
+  }
+}
+
+void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
+  DCHECK(!rd.is(at));
+  DCHECK(!rs.rm().is(at));
+  DCHECK(!rs.rm().is(scratch));
+  DCHECK(!scratch.is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    sh(rd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    MemOperand source = rs;
+    // If offset > 16 bits, load address to at with offset 0.
+    if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
+      LoadRegPlusOffsetToAt(rs);
+      source = MemOperand(at, 0);
+    }
+
+    if (!scratch.is(rd)) {
+      mov(scratch, rd);
+    }
+
+#if defined(V8_TARGET_LITTLE_ENDIAN)
+    sb(scratch, source);
+    srl(scratch, scratch, 8);
+    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+#elif defined(V8_TARGET_BIG_ENDIAN)
+    sb(scratch, MemOperand(source.rm(), source.offset() + 1));
+    srl(scratch, scratch, 8);
+    sb(scratch, source);
+#endif
+  }
+}
+
+void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  if (IsMipsArchVariant(kMips32r6)) {
+    lwc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    Ulw(scratch, rs);
+    mtc1(scratch, fd);
+  }
+}
+
+void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  if (IsMipsArchVariant(kMips32r6)) {
+    swc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    mfc1(scratch, fd);
+    Usw(scratch, rs);
+  }
+}
+
+void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  DCHECK(!scratch.is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    ldc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
+    mtc1(scratch, fd);
+    Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
+    Mthc1(scratch, fd);
+  }
+}
+
+void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
+                           Register scratch) {
+  DCHECK(!scratch.is(at));
+  if (IsMipsArchVariant(kMips32r6)) {
+    sdc1(fd, rs);
+  } else {
+    DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
+           IsMipsArchVariant(kLoongson));
+    mfc1(scratch, fd);
+    Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
+    Mfhc1(scratch, fd);
+    Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
+  }
 }
 
 
@@ -1354,6 +1539,252 @@
   addiu(sp, sp, stack_offset);
 }
 
+void MacroAssembler::AddPair(Register dst_low, Register dst_high,
+                             Register left_low, Register left_high,
+                             Register right_low, Register right_high) {
+  Label no_overflow;
+  Register kScratchReg = s3;
+  Register kScratchReg2 = s4;
+  // Add lower word
+  Addu(dst_low, left_low, right_low);
+  Addu(dst_high, left_high, right_high);
+  // Check for lower word unsigned overflow
+  Sltu(kScratchReg, dst_low, left_low);
+  Sltu(kScratchReg2, dst_low, right_low);
+  Or(kScratchReg, kScratchReg2, kScratchReg);
+  Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
+  // Increment higher word if there was overflow
+  Addu(dst_high, dst_high, 0x1);
+  bind(&no_overflow);
+}
+
+void MacroAssembler::SubPair(Register dst_low, Register dst_high,
+                             Register left_low, Register left_high,
+                             Register right_low, Register right_high) {
+  Label no_overflow;
+  Register kScratchReg = s3;
+  // Subtract lower word
+  Subu(dst_low, left_low, right_low);
+  Subu(dst_high, left_high, right_high);
+  // Check for lower word unsigned underflow
+  Sltu(kScratchReg, left_low, right_low);
+  Branch(&no_overflow, eq, kScratchReg, Operand(zero_reg));
+  // Decrement higher word if there was underflow
+  Subu(dst_high, dst_high, 0x1);
+  bind(&no_overflow);
+}
+
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register shift) {
+  Label less_than_32;
+  Label zero_shift;
+  Label word_shift;
+  Label done;
+  Register kScratchReg = s3;
+  And(shift, shift, 0x3F);
+  li(kScratchReg, 0x20);
+  Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+  Branch(&word_shift, eq, shift, Operand(kScratchReg));
+  // Shift more than 32
+  Subu(kScratchReg, shift, kScratchReg);
+  mov(dst_low, zero_reg);
+  sllv(dst_high, src_low, kScratchReg);
+  Branch(&done);
+  // Word shift
+  bind(&word_shift);
+  mov(dst_low, zero_reg);
+  mov(dst_high, src_low);
+  Branch(&done);
+
+  bind(&less_than_32);
+  // Check if zero shift
+  Branch(&zero_shift, eq, shift, Operand(zero_reg));
+  // Shift less than 32
+  Subu(kScratchReg, kScratchReg, shift);
+  sllv(dst_high, src_high, shift);
+  sllv(dst_low, src_low, shift);
+  srlv(kScratchReg, src_low, kScratchReg);
+  Or(dst_high, dst_high, kScratchReg);
+  Branch(&done);
+  // Zero shift
+  bind(&zero_shift);
+  mov(dst_low, src_low);
+  mov(dst_high, src_high);
+  bind(&done);
+}
+
+void MacroAssembler::ShlPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  Register kScratchReg = s3;
+  shift = shift & 0x3F;
+  if (shift < 32) {
+    if (shift == 0) {
+      mov(dst_low, src_low);
+      mov(dst_high, src_high);
+    } else {
+      sll(dst_high, src_high, shift);
+      sll(dst_low, src_low, shift);
+      shift = 32 - shift;
+      srl(kScratchReg, src_low, shift);
+      Or(dst_high, dst_high, kScratchReg);
+    }
+  } else {
+    if (shift == 32) {
+      mov(dst_low, zero_reg);
+      mov(dst_high, src_low);
+    } else {
+      shift = shift - 32;
+      mov(dst_low, zero_reg);
+      sll(dst_high, src_low, shift);
+    }
+  }
+}
+
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register shift) {
+  Label less_than_32;
+  Label zero_shift;
+  Label word_shift;
+  Label done;
+  Register kScratchReg = s3;
+  And(shift, shift, 0x3F);
+  li(kScratchReg, 0x20);
+  Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+  Branch(&word_shift, eq, shift, Operand(kScratchReg));
+  // Shift more than 32
+  Subu(kScratchReg, shift, kScratchReg);
+  mov(dst_high, zero_reg);
+  srlv(dst_low, src_high, kScratchReg);
+  Branch(&done);
+  // Word shift
+  bind(&word_shift);
+  mov(dst_high, zero_reg);
+  mov(dst_low, src_high);
+  Branch(&done);
+
+  bind(&less_than_32);
+  // Check if zero shift
+  Branch(&zero_shift, eq, shift, Operand(zero_reg));
+  // Shift less than 32
+  Subu(kScratchReg, kScratchReg, shift);
+  srlv(dst_high, src_high, shift);
+  srlv(dst_low, src_low, shift);
+  sllv(kScratchReg, src_high, kScratchReg);
+  Or(dst_low, dst_low, kScratchReg);
+  Branch(&done);
+  // Zero shift
+  bind(&zero_shift);
+  mov(dst_low, src_low);
+  mov(dst_high, src_high);
+  bind(&done);
+}
+
+void MacroAssembler::ShrPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  Register kScratchReg = s3;
+  shift = shift & 0x3F;
+  if (shift < 32) {
+    if (shift == 0) {
+      mov(dst_low, src_low);
+      mov(dst_high, src_high);
+    } else {
+      srl(dst_high, src_high, shift);
+      srl(dst_low, src_low, shift);
+      shift = 32 - shift;
+      sll(kScratchReg, src_high, shift);
+      Or(dst_low, dst_low, kScratchReg);
+    }
+  } else {
+    if (shift == 32) {
+      mov(dst_high, zero_reg);
+      mov(dst_low, src_high);
+    } else {
+      shift = shift - 32;
+      mov(dst_high, zero_reg);
+      srl(dst_low, src_high, shift);
+    }
+  }
+}
+
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             Register shift) {
+  Label less_than_32;
+  Label zero_shift;
+  Label word_shift;
+  Label done;
+  Register kScratchReg = s3;
+  Register kScratchReg2 = s4;
+  And(shift, shift, 0x3F);
+  li(kScratchReg, 0x20);
+  Branch(&less_than_32, lt, shift, Operand(kScratchReg));
+
+  Branch(&word_shift, eq, shift, Operand(kScratchReg));
+
+  // Shift more than 32
+  li(kScratchReg2, 0x1F);
+  Subu(kScratchReg, shift, kScratchReg);
+  srav(dst_high, src_high, kScratchReg2);
+  srav(dst_low, src_high, kScratchReg);
+  Branch(&done);
+  // Word shift
+  bind(&word_shift);
+  li(kScratchReg2, 0x1F);
+  srav(dst_high, src_high, kScratchReg2);
+  mov(dst_low, src_high);
+  Branch(&done);
+
+  bind(&less_than_32);
+  // Check if zero shift
+  Branch(&zero_shift, eq, shift, Operand(zero_reg));
+
+  // Shift less than 32
+  Subu(kScratchReg, kScratchReg, shift);
+  srav(dst_high, src_high, shift);
+  srlv(dst_low, src_low, shift);
+  sllv(kScratchReg, src_high, kScratchReg);
+  Or(dst_low, dst_low, kScratchReg);
+  Branch(&done);
+  // Zero shift
+  bind(&zero_shift);
+  mov(dst_low, src_low);
+  mov(dst_high, src_high);
+  bind(&done);
+}
+
+void MacroAssembler::SarPair(Register dst_low, Register dst_high,
+                             Register src_low, Register src_high,
+                             uint32_t shift) {
+  Register kScratchReg = s3;
+  shift = shift & 0x3F;
+  if (shift < 32) {
+    if (shift == 0) {
+      mov(dst_low, src_low);
+      mov(dst_high, src_high);
+    } else {
+      sra(dst_high, src_high, shift);
+      srl(dst_low, src_low, shift);
+      shift = 32 - shift;
+      sll(kScratchReg, src_high, shift);
+      Or(dst_low, dst_low, kScratchReg);
+    }
+  } else {
+    if (shift == 32) {
+      sra(dst_high, src_high, 31);
+      mov(dst_low, src_high);
+    } else {
+      shift = shift - 32;
+      sra(dst_high, src_high, 31);
+      sra(dst_low, src_high, shift);
+    }
+  }
+}
 
 void MacroAssembler::Ext(Register rt,
                          Register rs,
@@ -3654,6 +4085,7 @@
                               Label* gc_required,
                               AllocationFlags flags) {
   DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3726,18 +4158,21 @@
   // to calculate the new top.
   Addu(result_end, result, Operand(object_size));
   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
-  sw(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    Addu(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    sw(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  Addu(result, result, Operand(kHeapObjectTag));
 }
 
 
 void MacroAssembler::Allocate(Register object_size, Register result,
                               Register result_end, Register scratch,
                               Label* gc_required, AllocationFlags flags) {
+  DCHECK((flags & ALLOCATION_FOLDED) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3810,6 +4245,7 @@
   } else {
     Addu(result_end, result, Operand(object_size));
   }
+
   Branch(gc_required, Ugreater, result_end, Operand(alloc_limit));
 
   // Update allocation top. result temporarily holds the new top.
@@ -3817,14 +4253,104 @@
     And(alloc_limit, result_end, Operand(kObjectAlignmentMask));
     Check(eq, kUnalignedAllocationInNewSpace, alloc_limit, Operand(zero_reg));
   }
-  sw(result_end, MemOperand(top_address));
 
-  // Tag object if requested.
-  if ((flags & TAG_OBJECT) != 0) {
-    Addu(result, result, Operand(kHeapObjectTag));
+  if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) {
+    // The top pointer is not updated for allocation folding dominators.
+    sw(result_end, MemOperand(top_address));
   }
+
+  // Tag object.
+  Addu(result, result, Operand(kHeapObjectTag));
 }
 
+void MacroAssembler::FastAllocate(int object_size, Register result,
+                                  Register scratch1, Register scratch2,
+                                  AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(!AreAliased(result, scratch1, scratch2, t9, at));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address and allocation limit registers.
+  Register top_address = scratch1;
+  // This code stores a temporary value in t9.
+  Register result_end = scratch2;
+  li(top_address, Operand(allocation_top));
+  lw(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    And(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    Branch(&aligned, eq, result_end, Operand(zero_reg));
+    li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    sw(result_end, MemOperand(result));
+    Addu(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
+  Addu(result_end, result, Operand(object_size));
+
+  // The top pointer is not updated for allocation folding dominators.
+  sw(result_end, MemOperand(top_address));
+
+  Addu(result, result, Operand(kHeapObjectTag));
+}
+
+void MacroAssembler::FastAllocate(Register object_size, Register result,
+                                  Register result_end, Register scratch,
+                                  AllocationFlags flags) {
+  // |object_size| and |result_end| may overlap if the DOUBLE_ALIGNMENT flag
+  // is not specified. Other registers must not overlap.
+  DCHECK(!AreAliased(object_size, result, scratch, t9, at));
+  DCHECK(!AreAliased(result_end, result, scratch, t9, at));
+  DCHECK((flags & DOUBLE_ALIGNMENT) == 0 || !object_size.is(result_end));
+
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+
+  // Set up allocation top address and allocation limit registers.
+  Register top_address = scratch;
+  // This code stores a temporary value in t9.
+  li(top_address, Operand(allocation_top));
+  lw(result, MemOperand(top_address));
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
+    And(result_end, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    Branch(&aligned, eq, result_end, Operand(zero_reg));
+    li(result_end, Operand(isolate()->factory()->one_pointer_filler_map()));
+    sw(result_end, MemOperand(result));
+    Addu(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    Lsa(result_end, result, object_size, kPointerSizeLog2);
+  } else {
+    Addu(result_end, result, Operand(object_size));
+  }
+
+  // The top pointer is not updated for allocation folding dominators.
+  sw(result_end, MemOperand(top_address));
+
+  Addu(result, result, Operand(kHeapObjectTag));
+}
 
 void MacroAssembler::AllocateTwoByteString(Register result,
                                            Register length,
@@ -3841,12 +4367,8 @@
   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate two-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result,
@@ -3869,12 +4391,8 @@
   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
   // Allocate one-byte string in new space.
-  Allocate(scratch1,
-           result,
-           scratch2,
-           scratch3,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(scratch1, result, scratch2, scratch3, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Set the map, length and hash field.
   InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
@@ -3888,7 +4406,7 @@
                                                Register scratch2,
                                                Label* gc_required) {
   Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
   InitializeNewString(result,
                       length,
                       Heap::kConsStringMapRootIndex,
@@ -3901,12 +4419,8 @@
                                                Register scratch1,
                                                Register scratch2,
                                                Label* gc_required) {
-  Allocate(ConsString::kSize,
-           result,
-           scratch1,
-           scratch2,
-           gc_required,
-           TAG_OBJECT);
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -3919,7 +4433,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result,
                       length,
@@ -3935,7 +4449,7 @@
                                                  Register scratch2,
                                                  Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
-           TAG_OBJECT);
+           NO_ALLOCATION_FLAGS);
 
   InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
                       scratch1, scratch2);
@@ -3961,12 +4475,11 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* need_gc,
-                                        TaggingMode tagging_mode,
                                         MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
-           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+           NO_ALLOCATION_FLAGS);
 
   Heap::RootListIndex map_index = mode == MUTABLE
       ? Heap::kMutableHeapNumberMapRootIndex
@@ -3974,11 +4487,7 @@
   AssertIsRoot(heap_number_map, map_index);
 
   // Store heap number map in the allocated object.
-  if (tagging_mode == TAG_RESULT) {
-    sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
-  } else {
-    sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
-  }
+  sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
 }
 
 
@@ -4002,7 +4511,8 @@
   DCHECK(!result.is(value));
 
   // Allocate JSValue in new space.
-  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, TAG_OBJECT);
+  Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required,
+           NO_ALLOCATION_FLAGS);
 
   // Initialize the JSValue.
   LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2);
@@ -5615,6 +6125,16 @@
   }
 }
 
+void MacroAssembler::AssertGeneratorObject(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    SmiTst(object, t8);
+    Check(ne, kOperandIsASmiAndNotAGeneratorObject, t8, Operand(zero_reg));
+    GetObjectType(object, t8, t8);
+    Check(eq, kOperandIsNotAGeneratorObject, t8,
+          Operand(JS_GENERATOR_OBJECT_TYPE));
+  }
+}
 
 void MacroAssembler::AssertReceiver(Register object) {
   if (emit_debug_code()) {
@@ -6089,7 +6609,7 @@
                                                      Label* no_memento_found) {
   Label map_check;
   Label top_check;
-  ExternalReference new_space_allocation_top =
+  ExternalReference new_space_allocation_top_adr =
       ExternalReference::new_space_allocation_top_address(isolate());
   const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag;
   const int kMementoEndOffset = kMementoMapOffset + AllocationMemento::kSize;
@@ -6099,7 +6619,9 @@
   // If the object is in new space, we need to check whether it is on the same
   // page as the current top.
   Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  Xor(scratch_reg, scratch_reg, Operand(new_space_allocation_top));
+  li(at, Operand(new_space_allocation_top_adr));
+  lw(at, MemOperand(at));
+  Xor(scratch_reg, scratch_reg, Operand(at));
   And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask));
   Branch(&top_check, eq, scratch_reg, Operand(zero_reg));
   // The object is on a different page than allocation top. Bail out if the
@@ -6115,7 +6637,7 @@
   // we are below top.
   bind(&top_check);
   Addu(scratch_reg, receiver_reg, Operand(kMementoEndOffset));
-  li(at, Operand(new_space_allocation_top));
+  li(at, Operand(new_space_allocation_top_adr));
   lw(at, MemOperand(at));
   Branch(no_memento_found, gt, scratch_reg, Operand(at));
   // Memento map check.