Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 9952eb3..e72d40b 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -251,38 +251,9 @@
                                 Condition cc,
                                 Label* branch,
                                 Label::Distance distance) {
-  if (serializer_enabled()) {
-    // Can't do arithmetic on external references if it might get serialized.
-    // The mask isn't really an address.  We load it as an external reference in
-    // case the size of the new space is different between the snapshot maker
-    // and the running system.
-    if (scratch.is(object)) {
-      Move(kScratchRegister, ExternalReference::new_space_mask(isolate()));
-      andp(scratch, kScratchRegister);
-    } else {
-      Move(scratch, ExternalReference::new_space_mask(isolate()));
-      andp(scratch, object);
-    }
-    Move(kScratchRegister, ExternalReference::new_space_start(isolate()));
-    cmpp(scratch, kScratchRegister);
-    j(cc, branch, distance);
-  } else {
-    DCHECK(kPointerSize == kInt64Size
-        ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
-        : kPointerSize == kInt32Size);
-    intptr_t new_space_start =
-        reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart());
-    Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start),
-         Assembler::RelocInfoNone());
-    if (scratch.is(object)) {
-      addp(scratch, kScratchRegister);
-    } else {
-      leap(scratch, Operand(object, kScratchRegister, times_1, 0));
-    }
-    andp(scratch,
-         Immediate(static_cast<int32_t>(isolate()->heap()->NewSpaceMask())));
-    j(cc, branch, distance);
-  }
+  const int mask =
+      (1 << MemoryChunk::IN_FROM_SPACE) | (1 << MemoryChunk::IN_TO_SPACE);
+  CheckPageFlag(object, scratch, mask, cc, branch, distance);
 }
 
 
@@ -507,6 +478,90 @@
   }
 }
 
+void MacroAssembler::RecordWriteCodeEntryField(Register js_function,
+                                               Register code_entry,
+                                               Register scratch) {
+  const int offset = JSFunction::kCodeEntryOffset;
+
+  // The input registers are fixed to make calling the C write barrier function
+  // easier.
+  DCHECK(js_function.is(rdi));
+  DCHECK(code_entry.is(rcx));
+  DCHECK(scratch.is(rax));
+
+  // Since a code entry (value) is always in old space, we don't need to update
+  // remembered set. If incremental marking is off, there is nothing for us to
+  // do.
+  if (!FLAG_incremental_marking) return;
+
+  AssertNotSmi(js_function);
+
+  if (emit_debug_code()) {
+    Label ok;
+    leap(scratch, FieldOperand(js_function, offset));
+    cmpp(code_entry, Operand(scratch, 0));
+    j(equal, &ok, Label::kNear);
+    int3();
+    bind(&ok);
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis and stores into young gen.
+  Label done;
+
+  CheckPageFlag(code_entry, scratch,
+                MemoryChunk::kPointersToHereAreInterestingMask, zero, &done,
+                Label::kNear);
+  CheckPageFlag(js_function, scratch,
+                MemoryChunk::kPointersFromHereAreInterestingMask, zero, &done,
+                Label::kNear);
+
+  // Save input registers.
+  Push(js_function);
+  Push(code_entry);
+
+  const Register dst = scratch;
+  leap(dst, FieldOperand(js_function, offset));
+
+  // Save caller-saved registers.
+  PushCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  int argument_count = 3;
+  PrepareCallCFunction(argument_count);
+
+  // Load the argument registers.
+  if (arg_reg_1.is(rcx)) {
+    // Windows calling convention.
+    DCHECK(arg_reg_2.is(rdx) && arg_reg_3.is(r8));
+
+    movp(arg_reg_1, js_function);  // rcx gets rdi.
+    movp(arg_reg_2, dst);          // rdx gets rax.
+  } else {
+    // AMD64 calling convention.
+    DCHECK(arg_reg_1.is(rdi) && arg_reg_2.is(rsi) && arg_reg_3.is(rdx));
+
+    // rdi is already loaded with js_function.
+    movp(arg_reg_2, dst);  // rsi gets rax.
+  }
+  Move(arg_reg_3, ExternalReference::isolate_address(isolate()));
+
+  {
+    AllowExternalCallThatCantCauseGC scope(this);
+    CallCFunction(
+        ExternalReference::incremental_marking_record_write_code_entry_function(
+            isolate()),
+        argument_count);
+  }
+
+  // Restore caller-saved registers.
+  PopCallerSaved(kDontSaveFPRegs, js_function, code_entry);
+
+  // Restore input registers.
+  Pop(code_entry);
+  Pop(js_function);
+
+  bind(&done);
+}
 
 void MacroAssembler::Assert(Condition cc, BailoutReason reason) {
   if (emit_debug_code()) Check(cc, reason);
@@ -589,9 +644,9 @@
     // We don't actually want to generate a pile of code for this, so just
     // claim there is a stack frame, without generating one.
     FrameScope scope(this, StackFrame::NONE);
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   } else {
-    CallRuntime(Runtime::kAbort, 1);
+    CallRuntime(Runtime::kAbort);
   }
   // Control will not return here.
   int3();
@@ -690,18 +745,6 @@
 }
 
 
-void MacroAssembler::InvokeBuiltin(int native_context_index, InvokeFlag flag,
-                                   const CallWrapper& call_wrapper) {
-  // You can't call a builtin without a valid frame.
-  DCHECK(flag == JUMP_FUNCTION || has_frame());
-
-  // Fake a parameter count to avoid emitting code to do the check.
-  ParameterCount expected(0);
-  LoadNativeContextSlot(native_context_index, rdi);
-  InvokeFunctionCode(rdi, no_reg, expected, expected, flag, call_wrapper);
-}
-
-
 #define REG(Name) \
   { Register::kCode_##Name }
 
@@ -823,6 +866,30 @@
 }
 
 
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, Register src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vxorps(dst, dst, dst);
+    vcvtlsi2ss(dst, dst, src);
+  } else {
+    xorps(dst, dst);
+    cvtlsi2ss(dst, src);
+  }
+}
+
+
+void MacroAssembler::Cvtlsi2ss(XMMRegister dst, const Operand& src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vxorps(dst, dst, dst);
+    vcvtlsi2ss(dst, dst, src);
+  } else {
+    xorps(dst, dst);
+    cvtlsi2ss(dst, src);
+  }
+}
+
+
 void MacroAssembler::Cvtqsi2ss(XMMRegister dst, Register src) {
   if (CpuFeatures::IsSupported(AVX)) {
     CpuFeatureScope scope(this, AVX);
@@ -918,6 +985,26 @@
 }
 
 
+void MacroAssembler::Cvttss2si(Register dst, XMMRegister src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vcvttss2si(dst, src);
+  } else {
+    cvttss2si(dst, src);
+  }
+}
+
+
+void MacroAssembler::Cvttss2si(Register dst, const Operand& src) {
+  if (CpuFeatures::IsSupported(AVX)) {
+    CpuFeatureScope scope(this, AVX);
+    vcvttss2si(dst, src);
+  } else {
+    cvttss2si(dst, src);
+  }
+}
+
+
 void MacroAssembler::Cvttsd2si(Register dst, XMMRegister src) {
   if (CpuFeatures::IsSupported(AVX)) {
     CpuFeatureScope scope(this, AVX);
@@ -3865,6 +3952,19 @@
 }
 
 
+void MacroAssembler::AssertReceiver(Register object) {
+  if (emit_debug_code()) {
+    testb(object, Immediate(kSmiTagMask));
+    Check(not_equal, kOperandIsASmiAndNotAReceiver);
+    Push(object);
+    STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE);
+    CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, object);
+    Pop(object);
+    Check(above_equal, kOperandIsNotAReceiver);
+  }
+}
+
+
 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object) {
   if (emit_debug_code()) {
     Label done_checking;
@@ -4168,7 +4268,7 @@
     }
     Push(fun);
     Push(fun);
-    CallRuntime(Runtime::kDebugPrepareStepInIfStepping, 1);
+    CallRuntime(Runtime::kDebugPrepareStepInIfStepping);
     Pop(fun);
     if (new_target.is_valid()) {
       Pop(new_target);
@@ -5040,7 +5140,7 @@
     incp(source);
     incp(destination);
     decl(length);
-    j(not_zero, &short_loop);
+    j(not_zero, &short_loop, Label::kNear);
   }
 
   bind(&done);
@@ -5051,13 +5151,13 @@
                                                 Register end_address,
                                                 Register filler) {
   Label loop, entry;
-  jmp(&entry);
+  jmp(&entry, Label::kNear);
   bind(&loop);
   movp(Operand(current_address, 0), filler);
   addp(current_address, Immediate(kPointerSize));
   bind(&entry);
   cmpp(current_address, end_address);
-  j(below, &loop);
+  j(below, &loop, Label::kNear);
 }
 
 
@@ -5363,7 +5463,7 @@
 }
 
 
-void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+void MacroAssembler::CheckEnumCache(Label* call_runtime) {
   Label next, start;
   Register empty_fixed_array_value = r8;
   LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
@@ -5404,10 +5504,11 @@
 
   bind(&no_elements);
   movp(rcx, FieldOperand(rbx, Map::kPrototypeOffset));
-  cmpp(rcx, null_value);
+  CompareRoot(rcx, Heap::kNullValueRootIndex);
   j(not_equal, &next);
 }
 
+
 void MacroAssembler::TestJSArrayForAllocationMemento(
     Register receiver_reg,
     Register scratch_reg,