Upgrade to V8 3.3

Merge V8 at 3.3.10.39

Simple merge required updates to makefiles only.

Bug: 5688872
Change-Id: I14703f418235f5ce6013b9b3e2e502407a9f6dfd
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 7f027f7..2d28579 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -201,8 +201,8 @@
                                        Register scratch) {
   if (emit_debug_code()) {
     // Check that the object is not in new space.
-    NearLabel not_in_new_space;
-    InNewSpace(object, scratch, not_equal, &not_in_new_space);
+    Label not_in_new_space;
+    InNewSpace(object, scratch, not_equal, &not_in_new_space, Label::kNear);
     Abort("new-space object passed to RecordWriteHelper");
     bind(&not_in_new_space);
   }
@@ -221,6 +221,42 @@
 }
 
 
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch,
+                                Label::Distance near_jump) {
+  if (Serializer::enabled()) {
+    // Can't do arithmetic on external references if it might get serialized.
+    // The mask isn't really an address.  We load it as an external reference in
+    // case the size of the new space is different between the snapshot maker
+    // and the running system.
+    if (scratch.is(object)) {
+      movq(kScratchRegister, ExternalReference::new_space_mask(isolate()));
+      and_(scratch, kScratchRegister);
+    } else {
+      movq(scratch, ExternalReference::new_space_mask(isolate()));
+      and_(scratch, object);
+    }
+    movq(kScratchRegister, ExternalReference::new_space_start(isolate()));
+    cmpq(scratch, kScratchRegister);
+    j(cc, branch, near_jump);
+  } else {
+    ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask())));
+    intptr_t new_space_start =
+        reinterpret_cast<intptr_t>(HEAP->NewSpaceStart());
+    movq(kScratchRegister, -new_space_start, RelocInfo::NONE);
+    if (scratch.is(object)) {
+      addq(scratch, kScratchRegister);
+    } else {
+      lea(scratch, Operand(object, kScratchRegister, times_1, 0));
+    }
+    and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask())));
+    j(cc, branch, near_jump);
+  }
+}
+
+
 void MacroAssembler::RecordWrite(Register object,
                                  int offset,
                                  Register value,
@@ -287,8 +323,8 @@
   Label done;
 
   if (emit_debug_code()) {
-    NearLabel okay;
-    JumpIfNotSmi(object, &okay);
+    Label okay;
+    JumpIfNotSmi(object, &okay, Label::kNear);
     Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis");
     bind(&okay);
 
@@ -344,13 +380,13 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (emit_debug_code()) {
-    NearLabel ok;
+    Label ok;
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedArrayMapRootIndex);
-    j(equal, &ok);
+    j(equal, &ok, Label::kNear);
     CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
                 Heap::kFixedCOWArrayMapRootIndex);
-    j(equal, &ok);
+    j(equal, &ok, Label::kNear);
     Abort("JSObject with fast elements map has slow elements");
     bind(&ok);
   }
@@ -358,8 +394,8 @@
 
 
 void MacroAssembler::Check(Condition cc, const char* msg) {
-  NearLabel L;
-  j(cc, &L);
+  Label L;
+  j(cc, &L, Label::kNear);
   Abort(msg);
   // will not return here
   bind(&L);
@@ -371,9 +407,9 @@
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
     ASSERT(IsPowerOf2(frame_alignment));
-    NearLabel alignment_as_expected;
+    Label alignment_as_expected;
     testq(rsp, Immediate(frame_alignment_mask));
-    j(zero, &alignment_as_expected);
+    j(zero, &alignment_as_expected, Label::kNear);
     // Abort if stack is not aligned.
     int3();
     bind(&alignment_as_expected);
@@ -384,9 +420,9 @@
 void MacroAssembler::NegativeZeroTest(Register result,
                                       Register op,
                                       Label* then_label) {
-  NearLabel ok;
+  Label ok;
   testl(result, result);
-  j(not_zero, &ok);
+  j(not_zero, &ok, Label::kNear);
   testl(op, op);
   j(sign, then_label);
   bind(&ok);
@@ -425,9 +461,9 @@
 }
 
 
-void MacroAssembler::CallStub(CodeStub* stub) {
+void MacroAssembler::CallStub(CodeStub* stub, unsigned ast_id) {
   ASSERT(allow_stub_calls());  // calls are not allowed in some stubs
-  Call(stub->GetCode(), RelocInfo::CODE_TARGET);
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
 
@@ -650,6 +686,7 @@
   Label leave_exit_frame;
   Label write_back;
 
+  Factory* factory = isolate()->factory();
   ExternalReference next_address =
       ExternalReference::handle_scope_next_address();
   const int kNextOffset = 0;
@@ -697,7 +734,7 @@
 
   // Check if the function scheduled an exception.
   movq(rsi, scheduled_exception_address);
-  Cmp(Operand(rsi, 0), FACTORY->the_hole_value());
+  Cmp(Operand(rsi, 0), factory->the_hole_value());
   j(not_equal, &promote_scheduled_exception);
 
   LeaveApiExitFrame();
@@ -712,7 +749,7 @@
 
   bind(&empty_result);
   // It was zero; the result is undefined.
-  Move(rax, FACTORY->undefined_value());
+  Move(rax, factory->undefined_value());
   jmp(&prologue);
 
   // HandleScope limit has changed. Delete allocated extensions.
@@ -754,7 +791,7 @@
 
 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
                                    InvokeFlag flag,
-                                   CallWrapper* call_wrapper) {
+                                   const CallWrapper& call_wrapper) {
   // Calls are not allowed in some stubs.
   ASSERT(flag == JUMP_FUNCTION || allow_stub_calls());
 
@@ -763,7 +800,7 @@
   // parameter count to avoid emitting code to do the check.
   ParameterCount expected(0);
   GetBuiltinEntry(rdx, id);
-  InvokeCode(rdx, expected, expected, flag, call_wrapper);
+  InvokeCode(rdx, expected, expected, flag, call_wrapper, CALL_AS_METHOD);
 }
 
 
@@ -831,8 +868,8 @@
     if (allow_stub_calls()) {
       Assert(equal, "Uninitialized kSmiConstantRegister");
     } else {
-      NearLabel ok;
-      j(equal, &ok);
+      Label ok;
+      j(equal, &ok, Label::kNear);
       int3();
       bind(&ok);
     }
@@ -894,8 +931,8 @@
 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
   if (emit_debug_code()) {
     testb(dst, Immediate(0x01));
-    NearLabel ok;
-    j(zero, &ok);
+    Label ok;
+    j(zero, &ok, Label::kNear);
     if (allow_stub_calls()) {
       Abort("Integer32ToSmiField writing to non-smi location");
     } else {
@@ -1052,6 +1089,24 @@
 }
 
 
+void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2,
+                                 Label* on_not_smis,
+                                 Label::Distance near_jump) {
+  if (dst.is(src1) || dst.is(src2)) {
+    ASSERT(!src1.is(kScratchRegister));
+    ASSERT(!src2.is(kScratchRegister));
+    movq(kScratchRegister, src1);
+    or_(kScratchRegister, src2);
+    JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    or_(dst, src2);
+    JumpIfNotSmi(dst, on_not_smis, near_jump);
+  }
+}
+
+
 Condition MacroAssembler::CheckSmi(Register src) {
   ASSERT_EQ(0, kSmiTag);
   testb(src, Immediate(kSmiTagMask));
@@ -1162,6 +1217,95 @@
 }
 
 
+void MacroAssembler::JumpIfNotValidSmiValue(Register src,
+                                            Label* on_invalid,
+                                            Label::Distance near_jump) {
+  Condition is_valid = CheckInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
+                                                Label* on_invalid,
+                                                Label::Distance near_jump) {
+  Condition is_valid = CheckUInteger32ValidSmiValue(src);
+  j(NegateCondition(is_valid), on_invalid, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmi(Register src,
+                               Label* on_smi,
+                               Label::Distance near_jump) {
+  Condition smi = CheckSmi(src);
+  j(smi, on_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotSmi(Register src,
+                                  Label* on_not_smi,
+                                  Label::Distance near_jump) {
+  Condition smi = CheckSmi(src);
+  j(NegateCondition(smi), on_not_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessNonNegativeSmi(
+    Register src, Label* on_not_smi_or_negative,
+    Label::Distance near_jump) {
+  Condition non_negative_smi = CheckNonNegativeSmi(src);
+  j(NegateCondition(non_negative_smi), on_not_smi_or_negative, near_jump);
+}
+
+
+void MacroAssembler::JumpIfSmiEqualsConstant(Register src,
+                                             Smi* constant,
+                                             Label* on_equals,
+                                             Label::Distance near_jump) {
+  SmiCompare(src, constant);
+  j(equal, on_equals, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register src1,
+                                      Register src2,
+                                      Label* on_not_both_smi,
+                                      Label::Distance near_jump) {
+  Condition both_smi = CheckBothSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::JumpUnlessBothNonNegativeSmi(Register src1,
+                                                  Register src2,
+                                                  Label* on_not_both_smi,
+                                                  Label::Distance near_jump) {
+  Condition both_smi = CheckBothNonNegativeSmi(src1, src2);
+  j(NegateCondition(both_smi), on_not_both_smi, near_jump);
+}
+
+
+void MacroAssembler::SmiTryAddConstant(Register dst,
+                                       Register src,
+                                       Smi* constant,
+                                       Label* on_not_smi_result,
+                                       Label::Distance near_jump) {
+  // Does not assume that src is a smi.
+  ASSERT_EQ(static_cast<int>(1), static_cast<int>(kSmiTagMask));
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src.is(kScratchRegister));
+
+  JumpIfNotSmi(src, on_not_smi_result, near_jump);
+  Register tmp = (dst.is(src) ? kScratchRegister : dst);
+  LoadSmiConstant(tmp, constant);
+  addq(tmp, src);
+  j(overflow, on_not_smi_result, near_jump);
+  if (dst.is(src)) {
+    movq(dst, tmp);
+  }
+}
+
+
 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1218,6 +1362,30 @@
 }
 
 
+void MacroAssembler::SmiAddConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    Label* on_not_smi_result,
+                                    Label::Distance near_jump) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+
+    LoadSmiConstant(kScratchRegister, constant);
+    addq(kScratchRegister, src);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    LoadSmiConstant(dst, constant);
+    addq(dst, src);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
   if (constant->value() == 0) {
     if (!dst.is(src)) {
@@ -1242,17 +1410,148 @@
 }
 
 
+void MacroAssembler::SmiSubConstant(Register dst,
+                                    Register src,
+                                    Smi* constant,
+                                    Label* on_not_smi_result,
+                                    Label::Distance near_jump) {
+  if (constant->value() == 0) {
+    if (!dst.is(src)) {
+      movq(dst, src);
+    }
+  } else if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result, near_jump);
+      LoadSmiConstant(kScratchRegister, constant);
+      subq(dst, kScratchRegister);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
+      addq(kScratchRegister, dst);
+      j(overflow, on_not_smi_result, near_jump);
+      movq(dst, kScratchRegister);
+    }
+  } else {
+    if (constant->value() == Smi::kMinValue) {
+      // Subtracting min-value from any non-negative value will overflow.
+      // We test the non-negativeness before doing the subtraction.
+      testq(src, src);
+      j(not_sign, on_not_smi_result, near_jump);
+      LoadSmiConstant(dst, constant);
+      // Adding and subtracting the min-value gives the same result, it only
+      // differs on the overflow bit, which we don't check here.
+      addq(dst, src);
+    } else {
+      // Subtract by adding the negation.
+      LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
+      addq(dst, src);
+      j(overflow, on_not_smi_result, near_jump);
+    }
+  }
+}
+
+
+void MacroAssembler::SmiNeg(Register dst,
+                            Register src,
+                            Label* on_smi_result,
+                            Label::Distance near_jump) {
+  if (dst.is(src)) {
+    ASSERT(!dst.is(kScratchRegister));
+    movq(kScratchRegister, src);
+    neg(dst);  // Low 32 bits are retained as zero by negation.
+    // Test if result is zero or Smi::kMinValue.
+    cmpq(dst, kScratchRegister);
+    j(not_equal, on_smi_result, near_jump);
+    movq(src, kScratchRegister);
+  } else {
+    movq(dst, src);
+    neg(dst);
+    cmpq(dst, src);
+    // If the result is zero or Smi::kMinValue, negation failed to create a smi.
+    j(not_equal, on_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiAdd(Register dst,
+                            Register src1,
+                            const Operand& src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src1);
+    addq(kScratchRegister, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    movq(dst, kScratchRegister);
+  } else {
+    ASSERT(!src2.AddressUsesRegister(dst));
+    movq(dst, src1);
+    addq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiAdd(Register dst,
                             Register src1,
                             Register src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible.
-  ASSERT(!dst.is(src2));
   if (!dst.is(src1)) {
-    movq(dst, src1);
+    if (emit_debug_code()) {
+      movq(kScratchRegister, src1);
+      addq(kScratchRegister, src2);
+      Check(no_overflow, "Smi addition overflow");
+    }
+    lea(dst, Operand(src1, src2, times_1, 0));
+  } else {
+    addq(dst, src2);
+    Assert(no_overflow, "Smi addition overflow");
   }
-  addq(dst, src2);
-  Assert(no_overflow, "Smi addition overflow");
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  ASSERT(!dst.is(src2));
+  if (dst.is(src1)) {
+    cmpq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    subq(dst, src2);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
 }
 
 
@@ -1270,6 +1569,25 @@
 
 void MacroAssembler::SmiSub(Register dst,
                             Register src1,
+                            const Operand& src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT_NOT_NULL(on_not_smi_result);
+  if (dst.is(src1)) {
+    movq(kScratchRegister, src2);
+    cmpq(src1, kScratchRegister);
+    j(overflow, on_not_smi_result, near_jump);
+    subq(src1, kScratchRegister);
+  } else {
+    movq(dst, src1);
+    subq(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+  }
+}
+
+
+void MacroAssembler::SmiSub(Register dst,
+                            Register src1,
                             const Operand& src2) {
   // No overflow checking. Use only when it's known that
   // overflowing is impossible (e.g., subtracting two positive smis).
@@ -1281,6 +1599,180 @@
 }
 
 
+void MacroAssembler::SmiMul(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!dst.is(src2));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+
+  if (dst.is(src1)) {
+    Label failure, zero_correct_result;
+    movq(kScratchRegister, src1);  // Create backup for later testing.
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, &failure, Label::kNear);
+
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result, Label::kNear);
+
+    movq(dst, kScratchRegister);
+    xor_(dst, src2);
+    // Result was positive zero.
+    j(positive, &zero_correct_result, Label::kNear);
+
+    bind(&failure);  // Reused failure exit, restores src1.
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+
+    bind(&zero_correct_result);
+    Set(dst, 0);
+
+    bind(&correct_result);
+  } else {
+    SmiToInteger64(dst, src1);
+    imul(dst, src2);
+    j(overflow, on_not_smi_result, near_jump);
+    // Check for negative zero result.  If product is zero, and one
+    // argument is negative, go to slow case.
+    Label correct_result;
+    testq(dst, dst);
+    j(not_zero, &correct_result, Label::kNear);
+    // One of src1 and src2 is zero, the check whether the other is
+    // negative.
+    movq(kScratchRegister, src1);
+    xor_(kScratchRegister, src2);
+    j(negative, on_not_smi_result, near_jump);
+    bind(&correct_result);
+  }
+}
+
+
+void MacroAssembler::SmiDiv(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+
+  // Check for 0 divisor (result is +/-Infinity).
+  testq(src2, src2);
+  j(zero, on_not_smi_result, near_jump);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  // We need to rule out dividing Smi::kMinValue by -1, since that would
+  // overflow in idiv and raise an exception.
+  // We combine this with negative zero test (negative zero only happens
+  // when dividing zero by a negative number).
+
+  // We overshoot a little and go to slow case if we divide min-value
+  // by any negative value, not just -1.
+  Label safe_div;
+  testl(rax, Immediate(0x7fffffff));
+  j(not_zero, &safe_div, Label::kNear);
+  testq(src2, src2);
+  if (src1.is(rax)) {
+    j(positive, &safe_div, Label::kNear);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+  } else {
+    j(negative, on_not_smi_result, near_jump);
+  }
+  bind(&safe_div);
+
+  SmiToInteger32(src2, src2);
+  // Sign extend src1 into edx:eax.
+  cdq();
+  idivl(src2);
+  Integer32ToSmi(src2, src2);
+  // Check that the remainder is zero.
+  testl(rdx, rdx);
+  if (src1.is(rax)) {
+    Label smi_result;
+    j(zero, &smi_result, Label::kNear);
+    movq(src1, kScratchRegister);
+    jmp(on_not_smi_result, near_jump);
+    bind(&smi_result);
+  } else {
+    j(not_zero, on_not_smi_result, near_jump);
+  }
+  if (!dst.is(src1) && src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  Integer32ToSmi(dst, rax);
+}
+
+
+void MacroAssembler::SmiMod(Register dst,
+                            Register src1,
+                            Register src2,
+                            Label* on_not_smi_result,
+                            Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!src2.is(rax));
+  ASSERT(!src2.is(rdx));
+  ASSERT(!src1.is(rdx));
+  ASSERT(!src1.is(src2));
+
+  testq(src2, src2);
+  j(zero, on_not_smi_result, near_jump);
+
+  if (src1.is(rax)) {
+    movq(kScratchRegister, src1);
+  }
+  SmiToInteger32(rax, src1);
+  SmiToInteger32(src2, src2);
+
+  // Test for the edge case of dividing Smi::kMinValue by -1 (will overflow).
+  Label safe_div;
+  cmpl(rax, Immediate(Smi::kMinValue));
+  j(not_equal, &safe_div, Label::kNear);
+  cmpl(src2, Immediate(-1));
+  j(not_equal, &safe_div, Label::kNear);
+  // Retag inputs and go slow case.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  jmp(on_not_smi_result, near_jump);
+  bind(&safe_div);
+
+  // Sign extend eax into edx:eax.
+  cdq();
+  idivl(src2);
+  // Restore smi tags on inputs.
+  Integer32ToSmi(src2, src2);
+  if (src1.is(rax)) {
+    movq(src1, kScratchRegister);
+  }
+  // Check for a negative zero result.  If the result is zero, and the
+  // dividend is negative, go slow to return a floating point negative zero.
+  Label smi_result;
+  testl(rdx, rdx);
+  j(not_zero, &smi_result, Label::kNear);
+  testq(src1, src1);
+  j(negative, on_not_smi_result, near_jump);
+  bind(&smi_result);
+  Integer32ToSmi(dst, rdx);
+}
+
+
 void MacroAssembler::SmiNot(Register dst, Register src) {
   ASSERT(!dst.is(kScratchRegister));
   ASSERT(!src.is(kScratchRegister));
@@ -1387,11 +1879,28 @@
 }
 
 
+void MacroAssembler::SmiShiftLogicalRightConstant(
+    Register dst, Register src, int shift_value,
+    Label* on_not_smi_result, Label::Distance near_jump) {
+  // Logic right shift interprets its result as an *unsigned* number.
+  if (dst.is(src)) {
+    UNIMPLEMENTED();  // Not used.
+  } else {
+    movq(dst, src);
+    if (shift_value == 0) {
+      testq(dst, dst);
+      j(negative, on_not_smi_result, near_jump);
+    }
+    shr(dst, Immediate(shift_value + kSmiShift));
+    shl(dst, Immediate(kSmiShift));
+  }
+}
+
+
 void MacroAssembler::SmiShiftLeft(Register dst,
                                   Register src1,
                                   Register src2) {
   ASSERT(!dst.is(rcx));
-  NearLabel result_ok;
   // Untag shift amount.
   if (!dst.is(src1)) {
     movq(dst, src1);
@@ -1403,6 +1912,45 @@
 }
 
 
+void MacroAssembler::SmiShiftLogicalRight(Register dst,
+                                          Register src1,
+                                          Register src2,
+                                          Label* on_not_smi_result,
+                                          Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(rcx));
+  // dst and src1 can be the same, because the one case that bails out
+  // is a shift by 0, which leaves dst, and therefore src1, unchanged.
+  if (src1.is(rcx) || src2.is(rcx)) {
+    movq(kScratchRegister, rcx);
+  }
+  if (!dst.is(src1)) {
+    movq(dst, src1);
+  }
+  SmiToInteger32(rcx, src2);
+  orl(rcx, Immediate(kSmiShift));
+  shr_cl(dst);  // Shift is rcx modulo 0x1f + 32.
+  shl(dst, Immediate(kSmiShift));
+  testq(dst, dst);
+  if (src1.is(rcx) || src2.is(rcx)) {
+    Label positive_result;
+    j(positive, &positive_result, Label::kNear);
+    if (src1.is(rcx)) {
+      movq(src1, kScratchRegister);
+    } else {
+      movq(src2, kScratchRegister);
+    }
+    jmp(on_not_smi_result, near_jump);
+    bind(&positive_result);
+  } else {
+    // src2 was zero and src1 negative.
+    j(negative, on_not_smi_result, near_jump);
+  }
+}
+
+
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
@@ -1430,6 +1978,45 @@
 }
 
 
+void MacroAssembler::SelectNonSmi(Register dst,
+                                  Register src1,
+                                  Register src2,
+                                  Label* on_not_smis,
+                                  Label::Distance near_jump) {
+  ASSERT(!dst.is(kScratchRegister));
+  ASSERT(!src1.is(kScratchRegister));
+  ASSERT(!src2.is(kScratchRegister));
+  ASSERT(!dst.is(src1));
+  ASSERT(!dst.is(src2));
+  // Both operands must not be smis.
+#ifdef DEBUG
+  if (allow_stub_calls()) {  // Check contains a stub call.
+    Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
+    Check(not_both_smis, "Both registers were smis in SelectNonSmi.");
+  }
+#endif
+  ASSERT_EQ(0, kSmiTag);
+  ASSERT_EQ(0, Smi::FromInt(0));
+  movl(kScratchRegister, Immediate(kSmiTagMask));
+  and_(kScratchRegister, src1);
+  testl(kScratchRegister, src2);
+  // If non-zero then both are smis.
+  j(not_zero, on_not_smis, near_jump);
+
+  // Exactly one operand is a smi.
+  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
+  subq(kScratchRegister, Immediate(1));
+  // If src1 is a smi, then scratch register all 1s, else it is all 0s.
+  movq(dst, src1);
+  xor_(dst, src2);
+  and_(dst, kScratchRegister);
+  // If src1 is a smi, dst holds src1 ^ src2, else it is zero.
+  xor_(dst, src1);
+  // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi.
+}
+
+
 SmiIndex MacroAssembler::SmiToIndex(Register dst,
                                     Register src,
                                     int shift) {
@@ -1471,6 +2058,97 @@
 }
 
 
+void MacroAssembler::JumpIfNotString(Register object,
+                                     Register object_map,
+                                     Label* not_string,
+                                     Label::Distance near_jump) {
+  Condition is_smi = CheckSmi(object);
+  j(is_smi, not_string, near_jump);
+  CmpObjectType(object, FIRST_NONSTRING_TYPE, object_map);
+  j(above_equal, not_string, near_jump);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
+    Register first_object,
+    Register second_object,
+    Register scratch1,
+    Register scratch2,
+    Label* on_fail,
+    Label::Distance near_jump) {
+  // Check that both objects are not smis.
+  Condition either_smi = CheckEitherSmi(first_object, second_object);
+  j(either_smi, on_fail, near_jump);
+
+  // Load instance type for both strings.
+  movq(scratch1, FieldOperand(first_object, HeapObject::kMapOffset));
+  movq(scratch2, FieldOperand(second_object, HeapObject::kMapOffset));
+  movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
+  movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail, near_jump);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
+    Register instance_type,
+    Register scratch,
+    Label* failure,
+    Label::Distance near_jump) {
+  if (!scratch.is(instance_type)) {
+    movl(scratch, instance_type);
+  }
+
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+
+  andl(scratch, Immediate(kFlatAsciiStringMask));
+  cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
+  j(not_equal, failure, near_jump);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
+    Register first_object_instance_type,
+    Register second_object_instance_type,
+    Register scratch1,
+    Register scratch2,
+    Label* on_fail,
+    Label::Distance near_jump) {
+  // Load instance type for both strings.
+  movq(scratch1, first_object_instance_type);
+  movq(scratch2, second_object_instance_type);
+
+  // Check that both are flat ascii strings.
+  ASSERT(kNotStringTag != 0);
+  const int kFlatAsciiStringMask =
+      kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+  const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
+
+  andl(scratch1, Immediate(kFlatAsciiStringMask));
+  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  // Interleave the bits to check both scratch1 and scratch2 in one test.
+  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+  cmpl(scratch1,
+       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+  j(not_equal, on_fail, near_jump);
+}
+
+
 
 void MacroAssembler::Move(Register dst, Register src) {
   if (!dst.is(src)) {
@@ -1604,12 +2282,14 @@
 }
 
 
-void MacroAssembler::Call(Handle<Code> code_object, RelocInfo::Mode rmode) {
+void MacroAssembler::Call(Handle<Code> code_object,
+                          RelocInfo::Mode rmode,
+                          unsigned ast_id) {
 #ifdef DEBUG
   int end_position = pc_offset() + CallSize(code_object);
 #endif
   ASSERT(RelocInfo::IsCodeTarget(rmode));
-  call(code_object, rmode);
+  call(code_object, rmode, ast_id);
 #ifdef DEBUG
   CHECK_EQ(end_position, pc_offset());
 #endif
@@ -1774,9 +2454,9 @@
   // Before returning we restore the context from the frame pointer if not NULL.
   // The frame pointer is NULL in the exception handler of a JS entry frame.
   Set(rsi, 0);  // Tentatively set context pointer to NULL
-  NearLabel skip;
+  Label skip;
   cmpq(rbp, Immediate(0));
-  j(equal, &skip);
+  j(equal, &skip, Label::kNear);
   movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   bind(&skip);
   ret(0);
@@ -1794,12 +2474,12 @@
   Load(rsp, handler_address);
 
   // Unwind the handlers until the ENTRY handler is found.
-  NearLabel loop, done;
+  Label loop, done;
   bind(&loop);
   // Load the type of the current stack handler.
   const int kStateOffset = StackHandlerConstants::kStateOffset;
   cmpq(Operand(rsp, kStateOffset), Immediate(StackHandler::ENTRY));
-  j(equal, &done);
+  j(equal, &done, Label::kNear);
   // Fetch the next handler in the list.
   const int kNextOffset = StackHandlerConstants::kNextOffset;
   movq(rsp, Operand(rsp, kNextOffset));
@@ -1881,8 +2561,8 @@
 void MacroAssembler::CheckMap(Register obj,
                               Handle<Map> map,
                               Label* fail,
-                              bool is_heap_object) {
-  if (!is_heap_object) {
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, fail);
   }
   Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
@@ -1890,19 +2570,75 @@
 }
 
 
+void MacroAssembler::ClampUint8(Register reg) {
+  Label done;
+  testl(reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  setcc(negative, reg);  // 1 if negative, 0 if positive.
+  decb(reg);  // 0 if negative, 255 if positive.
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(XMMRegister input_reg,
+                                        XMMRegister temp_xmm_reg,
+                                        Register result_reg,
+                                        Register temp_reg) {
+  Label done;
+  Set(result_reg, 0);
+  xorps(temp_xmm_reg, temp_xmm_reg);
+  ucomisd(input_reg, temp_xmm_reg);
+  j(below, &done, Label::kNear);
+  uint64_t one_half = BitCast<uint64_t, double>(0.5);
+  Set(temp_reg, one_half);
+  movq(temp_xmm_reg, temp_reg);
+  addsd(temp_xmm_reg, input_reg);
+  cvttsd2si(result_reg, temp_xmm_reg);
+  testl(result_reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  Set(result_reg, 255);
+  bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  movq(descriptors, FieldOperand(map,
+                                 Map::kInstanceDescriptorsOrBitField3Offset));
+  Label not_smi;
+  JumpIfNotSmi(descriptors, &not_smi, Label::kNear);
+  Move(descriptors, isolate()->factory()->empty_descriptor_array());
+  bind(&not_smi);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  j(equal, success, RelocInfo::CODE_TARGET);
+
+  bind(&fail);
+}
+
+
 void MacroAssembler::AbortIfNotNumber(Register object) {
-  NearLabel ok;
+  Label ok;
   Condition is_smi = CheckSmi(object);
-  j(is_smi, &ok);
+  j(is_smi, &ok, Label::kNear);
   Cmp(FieldOperand(object, HeapObject::kMapOffset),
-      FACTORY->heap_number_map());
+      isolate()->factory()->heap_number_map());
   Assert(equal, "Operand not a number");
   bind(&ok);
 }
 
 
 void MacroAssembler::AbortIfSmi(Register object) {
-  NearLabel ok;
   Condition is_smi = CheckSmi(object);
   Assert(NegateCondition(is_smi), "Operand is a smi");
 }
@@ -1965,10 +2701,10 @@
   j(not_equal, miss);
 
   // Make sure that the function has an instance prototype.
-  NearLabel non_instance;
+  Label non_instance;
   testb(FieldOperand(result, Map::kBitFieldOffset),
         Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance);
+  j(not_zero, &non_instance, Label::kNear);
 
   // Get the prototype or initial map from the function.
   movq(result,
@@ -1981,13 +2717,13 @@
   j(equal, miss);
 
   // If the function does not have an initial map, we're done.
-  NearLabel done;
+  Label done;
   CmpObjectType(result, MAP_TYPE, kScratchRegister);
-  j(not_equal, &done);
+  j(not_equal, &done, Label::kNear);
 
   // Get the prototype from the initial map.
   movq(result, FieldOperand(result, Map::kPrototypeOffset));
-  jmp(&done);
+  jmp(&done, Label::kNear);
 
   // Non-instance prototype: Fetch prototype from constructor field
   // in initial map.
@@ -2044,25 +2780,44 @@
 #endif  // ENABLE_DEBUGGER_SUPPORT
 
 
+void MacroAssembler::SetCallKind(Register dst, CallKind call_kind) {
+  // This macro takes the dst register to make the code more readable
+  // at the call sites. However, the dst register has to be rcx to
+  // follow the calling convention which requires the call type to be
+  // in rcx.
+  ASSERT(dst.is(rcx));
+  if (call_kind == CALL_AS_FUNCTION) {
+    LoadSmiConstant(dst, Smi::FromInt(1));
+  } else {
+    LoadSmiConstant(dst, Smi::FromInt(0));
+  }
+}
+
+
 void MacroAssembler::InvokeCode(Register code,
                                 const ParameterCount& expected,
                                 const ParameterCount& actual,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   InvokePrologue(expected,
                  actual,
                  Handle<Code>::null(),
                  code,
                  &done,
                  flag,
-                 call_wrapper);
+                 Label::kNear,
+                 call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
     call(code);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
     jmp(code);
   }
   bind(&done);
@@ -2074,8 +2829,9 @@
                                 const ParameterCount& actual,
                                 RelocInfo::Mode rmode,
                                 InvokeFlag flag,
-                                CallWrapper* call_wrapper) {
-  NearLabel done;
+                                const CallWrapper& call_wrapper,
+                                CallKind call_kind) {
+  Label done;
   Register dummy = rax;
   InvokePrologue(expected,
                  actual,
@@ -2083,13 +2839,17 @@
                  dummy,
                  &done,
                  flag,
-                 call_wrapper);
+                 Label::kNear,
+                 call_wrapper,
+                 call_kind);
   if (flag == CALL_FUNCTION) {
-    if (call_wrapper != NULL) call_wrapper->BeforeCall(CallSize(code));
+    call_wrapper.BeforeCall(CallSize(code));
+    SetCallKind(rcx, call_kind);
     Call(code, rmode);
-    if (call_wrapper != NULL) call_wrapper->AfterCall();
+    call_wrapper.AfterCall();
   } else {
     ASSERT(flag == JUMP_FUNCTION);
+    SetCallKind(rcx, call_kind);
     Jump(code, rmode);
   }
   bind(&done);
@@ -2099,7 +2859,8 @@
 void MacroAssembler::InvokeFunction(Register function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function.is(rdi));
   movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movq(rsi, FieldOperand(function, JSFunction::kContextOffset));
@@ -2110,14 +2871,15 @@
   movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
 
   ParameterCount expected(rbx);
-  InvokeCode(rdx, expected, actual, flag, call_wrapper);
+  InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
 }
 
 
 void MacroAssembler::InvokeFunction(JSFunction* function,
                                     const ParameterCount& actual,
                                     InvokeFlag flag,
-                                    CallWrapper* call_wrapper) {
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
   ASSERT(function->is_compiled());
   // Get the function and setup the context.
   Move(rdi, Handle<JSFunction>(function));
@@ -2128,7 +2890,7 @@
     // the Code object every time we call the function.
     movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
     ParameterCount expected(function->shared()->formal_parameter_count());
-    InvokeCode(rdx, expected, actual, flag, call_wrapper);
+    InvokeCode(rdx, expected, actual, flag, call_wrapper, call_kind);
   } else {
     // Invoke the cached code.
     Handle<Code> code(function->code());
@@ -2138,7 +2900,79 @@
                actual,
                RelocInfo::CODE_TARGET,
                flag,
-               call_wrapper);
+               call_wrapper,
+               call_kind);
+  }
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_register,
+                                    Label* done,
+                                    InvokeFlag flag,
+                                    Label::Distance near_jump,
+                                    const CallWrapper& call_wrapper,
+                                    CallKind call_kind) {
+  bool definitely_matches = false;
+  Label invoke;
+  if (expected.is_immediate()) {
+    ASSERT(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      Set(rax, actual.immediate());
+      if (expected.immediate() ==
+              SharedFunctionInfo::kDontAdaptArgumentsSentinel) {
+        // Don't worry about adapting arguments for built-ins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        Set(rbx, expected.immediate());
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      // Expected is in register, actual is immediate. This is the
+      // case when we invoke function values without going through the
+      // IC mechanism.
+      cmpq(expected.reg(), Immediate(actual.immediate()));
+      j(equal, &invoke, Label::kNear);
+      ASSERT(expected.reg().is(rbx));
+      Set(rax, actual.immediate());
+    } else if (!expected.reg().is(actual.reg())) {
+      // Both expected and actual are in (different) registers. This
+      // is the case when we invoke functions using call and apply.
+      cmpq(expected.reg(), actual.reg());
+      j(equal, &invoke, Label::kNear);
+      ASSERT(actual.reg().is(rax));
+      ASSERT(expected.reg().is(rbx));
+    }
+  }
+
+  if (!definitely_matches) {
+    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    if (!code_constant.is_null()) {
+      movq(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT);
+      addq(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    } else if (!code_register.is(rdx)) {
+      movq(rdx, code_register);
+    }
+
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      SetCallKind(rcx, call_kind);
+      Call(adaptor, RelocInfo::CODE_TARGET);
+      call_wrapper.AfterCall();
+      jmp(done, near_jump);
+    } else {
+      SetCallKind(rcx, call_kind);
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&invoke);
   }
 }
 
@@ -2152,7 +2986,7 @@
   push(kScratchRegister);
   if (emit_debug_code()) {
     movq(kScratchRegister,
-         FACTORY->undefined_value(),
+         isolate()->factory()->undefined_value(),
          RelocInfo::EMBEDDED_OBJECT);
     cmpq(Operand(rsp, 0), kScratchRegister);
     Check(not_equal, "code object not properly patched");
@@ -2320,7 +3154,7 @@
   // Check the context is a global context.
   if (emit_debug_code()) {
     Cmp(FieldOperand(scratch, HeapObject::kMapOffset),
-        FACTORY->global_context_map());
+        isolate()->factory()->global_context_map());
     Check(equal, "JSGlobalObject::global_context should be a global context.");
   }
 
@@ -2822,7 +3656,7 @@
   movq(map, FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   if (emit_debug_code()) {
     Label ok, fail;
-    CheckMap(map, FACTORY->meta_map(), &fail, false);
+    CheckMap(map, isolate()->factory()->meta_map(), &fail, DO_SMI_CHECK);
     jmp(&ok);
     bind(&fail);
     Abort("Global functions must have initial map");