Revert "Revert "Upgrade to 5.0.71.48"" DO NOT MERGE

This reverts commit f2e3994fa5148cc3d9946666f0b0596290192b0e,
and updates the x64 makefile properly so it doesn't break that
build.

FPIIM-449

Change-Id: Ib83e35bfbae6af627451c926a9650ec57c045605
(cherry picked from commit 109988c7ccb6f3fd1a58574fa3dfb88beaef6632)
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 4d6aacd..21c34fc 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -30,8 +30,8 @@
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/js-builtin-reducer.h"
 #include "src/compiler/js-call-reducer.h"
-#include "src/compiler/js-context-relaxation.h"
 #include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-create-lowering.h"
 #include "src/compiler/js-frame-specialization.h"
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-global-object-specialization.h"
@@ -276,11 +276,8 @@
         info()->isolate(), instruction_zone(), instruction_blocks);
   }
 
-  void InitializeRegisterAllocationData(const RegisterConfiguration* config,
-                                        CallDescriptor* descriptor,
-                                        const char* debug_name) {
+  void InitializeFrameData(CallDescriptor* descriptor) {
     DCHECK(frame_ == nullptr);
-    DCHECK(register_allocation_data_ == nullptr);
     int fixed_frame_size = 0;
     if (descriptor != nullptr) {
       fixed_frame_size = (descriptor->IsCFunctionCall())
@@ -289,6 +286,12 @@
                              : StandardFrameConstants::kFixedSlotCount;
     }
     frame_ = new (instruction_zone()) Frame(fixed_frame_size, descriptor);
+  }
+
+  void InitializeRegisterAllocationData(const RegisterConfiguration* config,
+                                        CallDescriptor* descriptor,
+                                        const char* debug_name) {
+    DCHECK(register_allocation_data_ == nullptr);
     register_allocation_data_ = new (register_allocation_zone())
         RegisterAllocationData(config, register_allocation_zone(), frame(),
                                sequence(), debug_name);
@@ -512,7 +515,7 @@
     if (data->info()->shared_info()->HasBytecodeArray()) {
       BytecodeGraphBuilder graph_builder(temp_zone, data->info(),
                                          data->jsgraph());
-      succeeded = graph_builder.CreateGraph(stack_check);
+      succeeded = graph_builder.CreateGraph();
     } else {
       AstGraphBuilderWithPositions graph_builder(
           temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
@@ -536,7 +539,7 @@
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
                                          data->common(), data->machine());
-    JSCallReducer call_reducer(data->jsgraph(),
+    JSCallReducer call_reducer(&graph_reducer, data->jsgraph(),
                                data->info()->is_deoptimization_enabled()
                                    ? JSCallReducer::kDeoptimizationEnabled
                                    : JSCallReducer::kNoFlags,
@@ -549,17 +552,19 @@
     JSFrameSpecialization frame_specialization(data->info()->osr_frame(),
                                                data->jsgraph());
     JSGlobalObjectSpecialization global_object_specialization(
-        &graph_reducer, data->jsgraph(),
-        data->info()->is_deoptimization_enabled()
-            ? JSGlobalObjectSpecialization::kDeoptimizationEnabled
-            : JSGlobalObjectSpecialization::kNoFlags,
-        data->native_context(), data->info()->dependencies());
+        &graph_reducer, data->jsgraph(), data->native_context(),
+        data->info()->dependencies());
+    JSNativeContextSpecialization::Flags flags =
+        JSNativeContextSpecialization::kNoFlags;
+    if (data->info()->is_bailout_on_uninitialized()) {
+      flags |= JSNativeContextSpecialization::kBailoutOnUninitialized;
+    }
+    if (data->info()->is_deoptimization_enabled()) {
+      flags |= JSNativeContextSpecialization::kDeoptimizationEnabled;
+    }
     JSNativeContextSpecialization native_context_specialization(
-        &graph_reducer, data->jsgraph(),
-        data->info()->is_deoptimization_enabled()
-            ? JSNativeContextSpecialization::kDeoptimizationEnabled
-            : JSNativeContextSpecialization::kNoFlags,
-        data->native_context(), data->info()->dependencies(), temp_zone);
+        &graph_reducer, data->jsgraph(), flags, data->native_context(),
+        data->info()->dependencies(), temp_zone);
     JSInliningHeuristic inlining(&graph_reducer,
                                  data->info()->is_inlining_enabled()
                                      ? JSInliningHeuristic::kGeneralInlining
@@ -570,7 +575,9 @@
     if (data->info()->is_frame_specializing()) {
       AddReducer(data, &graph_reducer, &frame_specialization);
     }
-    AddReducer(data, &graph_reducer, &global_object_specialization);
+    if (data->info()->is_deoptimization_enabled()) {
+      AddReducer(data, &graph_reducer, &global_object_specialization);
+    }
     AddReducer(data, &graph_reducer, &native_context_specialization);
     AddReducer(data, &graph_reducer, &context_specialization);
     AddReducer(data, &graph_reducer, &call_reducer);
@@ -610,6 +617,13 @@
                                               data->common());
     LoadElimination load_elimination(&graph_reducer);
     JSBuiltinReducer builtin_reducer(&graph_reducer, data->jsgraph());
+    MaybeHandle<LiteralsArray> literals_array =
+        data->info()->is_native_context_specializing()
+            ? handle(data->info()->closure()->literals(), data->isolate())
+            : MaybeHandle<LiteralsArray>();
+    JSCreateLowering create_lowering(
+        &graph_reducer, data->info()->dependencies(), data->jsgraph(),
+        literals_array, temp_zone);
     JSTypedLowering::Flags typed_lowering_flags = JSTypedLowering::kNoFlags;
     if (data->info()->is_deoptimization_enabled()) {
       typed_lowering_flags |= JSTypedLowering::kDeoptimizationEnabled;
@@ -629,6 +643,9 @@
                                          data->common(), data->machine());
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &builtin_reducer);
+    if (data->info()->is_deoptimization_enabled()) {
+      AddReducer(data, &graph_reducer, &create_lowering);
+    }
     AddReducer(data, &graph_reducer, &typed_lowering);
     AddReducer(data, &graph_reducer, &intrinsic_lowering);
     AddReducer(data, &graph_reducer, &load_elimination);
@@ -664,8 +681,11 @@
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     EscapeAnalysisReducer escape_reducer(&graph_reducer, data->jsgraph(),
                                          &escape_analysis, temp_zone);
+    escape_reducer.SetExistsVirtualAllocate(
+        escape_analysis.ExistsVirtualAllocate());
     AddReducer(data, &graph_reducer, &escape_reducer);
     graph_reducer.ReduceGraph();
+    escape_reducer.VerifyReplacement();
   }
 };
 
@@ -677,6 +697,13 @@
     SimplifiedLowering lowering(data->jsgraph(), temp_zone,
                                 data->source_positions());
     lowering.LowerAllNodes();
+
+    // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+    if (lowering.abort_compilation_) {
+      data->set_compilation_failed();
+      return;
+    }
+
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
@@ -772,7 +799,6 @@
 
   void Run(PipelineData* data, Zone* temp_zone) {
     JSGraphReducer graph_reducer(data->jsgraph(), temp_zone);
-    JSContextRelaxation context_relaxing;
     DeadCodeElimination dead_code_elimination(&graph_reducer, data->graph(),
                                               data->common());
     CommonOperatorReducer common_reducer(&graph_reducer, data->graph(),
@@ -782,7 +808,6 @@
     SelectLowering select_lowering(data->jsgraph()->graph(),
                                    data->jsgraph()->common());
     TailCallOptimization tco(data->common(), data->graph());
-    AddReducer(data, &graph_reducer, &context_relaxing);
     AddReducer(data, &graph_reducer, &dead_code_elimination);
     AddReducer(data, &graph_reducer, &common_reducer);
     AddReducer(data, &graph_reducer, &generic_lowering);
@@ -813,7 +838,7 @@
   void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
     InstructionSelector selector(
         temp_zone, data->graph()->NodeCount(), linkage, data->sequence(),
-        data->schedule(), data->source_positions(),
+        data->schedule(), data->source_positions(), data->frame(),
         data->info()->is_source_positions_enabled()
             ? InstructionSelector::kAllSourcePositions
             : InstructionSelector::kCallSourcePositions);
@@ -979,9 +1004,10 @@
 struct JumpThreadingPhase {
   static const char* phase_name() { return "jump threading"; }
 
-  void Run(PipelineData* data, Zone* temp_zone) {
+  void Run(PipelineData* data, Zone* temp_zone, bool frame_at_start) {
     ZoneVector<RpoNumber> result(temp_zone);
-    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
+    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence(),
+                                         frame_at_start)) {
       JumpThreading::ApplyForwarding(result, data->sequence());
     }
   }
@@ -1053,13 +1079,6 @@
 
 
 Handle<Code> Pipeline::GenerateCode() {
-  // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
-  // the correct solution is to restore the context register after invoking
-  // builtins from full-codegen.
-  if (Context::IsJSBuiltin(isolate()->native_context(), info()->closure())) {
-    return Handle<Code>::null();
-  }
-
   ZonePool zone_pool;
   base::SmartPointer<PipelineStatistics> pipeline_statistics;
 
@@ -1073,13 +1092,14 @@
     if (json_file != nullptr) {
       OFStream json_of(json_file);
       Handle<Script> script = info()->script();
-      FunctionLiteral* function = info()->literal();
       base::SmartArrayPointer<char> function_name = info()->GetDebugName();
       int pos = info()->shared_info()->start_position();
       json_of << "{\"function\":\"" << function_name.get()
               << "\", \"sourcePosition\":" << pos << ", \"source\":\"";
-      if (!script->IsUndefined() && !script->source()->IsUndefined()) {
+      if (info()->has_literal() && !script->IsUndefined() &&
+          !script->source()->IsUndefined()) {
         DisallowHeapAllocation no_allocation;
+        FunctionLiteral* function = info()->literal();
         int start = function->start_position();
         int len = function->end_position() - start;
         String::SubStringRange source(String::cast(script->source()), start,
@@ -1204,6 +1224,9 @@
   // Kill the Typer and thereby uninstall the decorator (if any).
   typer.Reset(nullptr);
 
+  // TODO(bmeurer): See comment on SimplifiedLowering::abort_compilation_.
+  if (data.compilation_failed()) return Handle<Code>::null();
+
   return ScheduleAndGenerateCode(
       Linkage::ComputeIncoming(data.instruction_zone(), info()));
 }
@@ -1212,10 +1235,9 @@
 Handle<Code> Pipeline::GenerateCodeForCodeStub(Isolate* isolate,
                                                CallDescriptor* call_descriptor,
                                                Graph* graph, Schedule* schedule,
-                                               Code::Kind kind,
+                                               Code::Flags flags,
                                                const char* debug_name) {
-  CompilationInfo info(debug_name, isolate, graph->zone());
-  info.set_output_code_kind(kind);
+  CompilationInfo info(debug_name, isolate, graph->zone(), flags);
 
   // Construct a pipeline for scheduling and code generation.
   ZonePool zone_pool;
@@ -1286,6 +1308,7 @@
   PipelineData data(&zone_pool, &info, sequence);
   Pipeline pipeline(&info);
   pipeline.data_ = &data;
+  pipeline.data_->InitializeFrameData(nullptr);
   pipeline.AllocateRegisters(config, nullptr, run_verifier);
   return !data.compilation_failed();
 }
@@ -1308,6 +1331,7 @@
 
   data->InitializeInstructionSequence();
 
+  data->InitializeFrameData(call_descriptor);
   // Select and schedule instructions covering the scheduled graph.
   Linkage linkage(call_descriptor);
   Run<InstructionSelectionPhase>(&linkage);
@@ -1329,6 +1353,7 @@
   BeginPhaseKind("register allocation");
 
   bool run_verifier = FLAG_turbo_verify_allocation;
+
   // Allocate registers.
   AllocateRegisters(
       RegisterConfiguration::ArchDefault(RegisterConfiguration::TURBOFAN),
@@ -1339,10 +1364,16 @@
   }
 
   BeginPhaseKind("code generation");
-
+  // TODO(mtrofin): move this off to the register allocator.
+  bool generate_frame_at_start =
+      !FLAG_turbo_frame_elision || !data_->info()->IsStub() ||
+      !data_->frame()->needs_frame() ||
+      data_->sequence()->instruction_blocks().front()->needs_frame() ||
+      linkage.GetIncomingDescriptor()->CalleeSavedFPRegisters() != 0 ||
+      linkage.GetIncomingDescriptor()->CalleeSavedRegisters() != 0;
   // Optimimize jumps.
   if (FLAG_turbo_jt) {
-    Run<JumpThreadingPhase>();
+    Run<JumpThreadingPhase>(generate_frame_at_start);
   }
 
   // Generate final machine code.
@@ -1446,7 +1477,8 @@
     Run<MergeSplintersPhase>();
   }
 
-  if (FLAG_turbo_frame_elision) {
+  // We plan to enable frame elision only for stubs and bytecode handlers.
+  if (FLAG_turbo_frame_elision && info()->IsStub()) {
     Run<LocateSpillSlotsPhase>();
     Run<FrameElisionPhase>();
   }
@@ -1482,6 +1514,8 @@
   data->DeleteRegisterAllocationZone();
 }
 
+Isolate* Pipeline::isolate() const { return info()->isolate(); }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8