Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/mips64/deoptimizer-mips64.cc b/src/mips64/deoptimizer-mips64.cc
index d7a7f05..8daba04 100644
--- a/src/mips64/deoptimizer-mips64.cc
+++ b/src/mips64/deoptimizer-mips64.cc
@@ -2,11 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #include "src/codegen.h"
 #include "src/deoptimizer.h"
-#include "src/full-codegen.h"
+#include "src/full-codegen/full-codegen.h"
+#include "src/register-configuration.h"
 #include "src/safepoint-table.h"
 
 namespace v8 {
@@ -39,14 +38,15 @@
     } else {
       pointer = code->instruction_start();
     }
-    CodePatcher patcher(pointer, 1);
+    CodePatcher patcher(isolate, pointer, 1);
     patcher.masm()->break_(0xCC);
 
     DeoptimizationInputData* data =
         DeoptimizationInputData::cast(code->deoptimization_data());
     int osr_offset = data->OsrPcOffset()->value();
     if (osr_offset > 0) {
-      CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+      CodePatcher osr_patcher(isolate, code->instruction_start() + osr_offset,
+                              1);
       osr_patcher.masm()->break_(0xCC);
     }
   }
@@ -67,7 +67,7 @@
     int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
     DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
     DCHECK(call_size_in_bytes <= patch_size());
-    CodePatcher patcher(call_address, call_size_in_words);
+    CodePatcher patcher(isolate, call_address, call_size_in_words);
     patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
     DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
@@ -90,7 +90,7 @@
   }
   input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
   input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
-  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; i++) {
     input_->SetDoubleRegister(i, 0.0);
   }
 
@@ -131,7 +131,7 @@
 
 // This code tries to be close to ia32 code so that any changes can be
 // easily ported.
-void Deoptimizer::EntryGenerator::Generate() {
+void Deoptimizer::TableEntryGenerator::Generate() {
   GeneratePrologue();
 
   // Unlike on ARM we don't save all the registers, just the useful ones.
@@ -141,14 +141,16 @@
   RegList restored_regs = kJSCallerSaved | kCalleeSaved;
   RegList saved_regs = restored_regs | sp.bit() | ra.bit();
 
-  const int kDoubleRegsSize =
-      kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+  const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kMaxNumRegisters;
 
   // Save all FPU registers before messing with them.
   __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
-  for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
-    FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
-    int offset = i * kDoubleSize;
+  const RegisterConfiguration* config =
+      RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+    int offset = code * kDoubleSize;
     __ sdc1(fpu_reg, MemOperand(sp, offset));
   }
 
@@ -161,6 +163,9 @@
     }
   }
 
+  __ li(a2, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  __ sd(fp, MemOperand(a2));
+
   const int kSavedRegistersAreaSize =
       (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
 
@@ -220,9 +225,10 @@
   int double_regs_offset = FrameDescription::double_registers_offset();
   // Copy FPU registers to
   // double_registers_[DoubleRegister::kNumAllocatableRegisters]
-  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
-    int dst_offset = i * kDoubleSize + double_regs_offset;
-    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    int dst_offset = code * kDoubleSize + double_regs_offset;
+    int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
     __ ldc1(f0, MemOperand(sp, src_offset));
     __ sdc1(f0, MemOperand(a1, dst_offset));
   }
@@ -269,12 +275,12 @@
   __ ld(a4, MemOperand(a0, Deoptimizer::output_offset()));  // a4 is output_.
   __ dsll(a1, a1, kPointerSizeLog2);  // Count to offset.
   __ daddu(a1, a4, a1);  // a1 = one past the last FrameDescription**.
-  __ jmp(&outer_loop_header);
+  __ BranchShort(&outer_loop_header);
   __ bind(&outer_push_loop);
   // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
   __ ld(a2, MemOperand(a4, 0));  // output_[ix]
   __ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
-  __ jmp(&inner_loop_header);
+  __ BranchShort(&inner_loop_header);
   __ bind(&inner_push_loop);
   __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
   __ Daddu(a6, a2, Operand(a3));
@@ -288,9 +294,10 @@
   __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
 
   __ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
-  for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
-    const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
-    int src_offset = i * kDoubleSize + double_regs_offset;
+  for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+    int code = config->GetAllocatableDoubleCode(i);
+    const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+    int src_offset = code * kDoubleSize + double_regs_offset;
     __ ldc1(fpu_reg, MemOperand(a1, src_offset));
   }
 
@@ -326,39 +333,66 @@
 
 
 // Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 11 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
 
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
 
   // Create a sequence of deoptimization entries.
   // Note that registers are still live when jumping to an entry.
-  Label table_start;
+  Label table_start, done, done_special, trampoline_jump;
   __ bind(&table_start);
-  for (int i = 0; i < count(); i++) {
-    Label start;
-    __ bind(&start);
-    __ daddiu(sp, sp, -1 * kPointerSize);
-    // Jump over the remaining deopt entries (including this one).
-    // This code is always reached by calling Jump, which puts the target (label
-    // start) into t9.
-    const int remaining_entries = (count() - i) * table_entry_size_;
-    __ Daddu(t9, t9, remaining_entries);
-    // 'at' was clobbered so we can only load the current entry value here.
-    __ li(t8, i);
-    __ jr(t9);  // Expose delay slot.
-    __ sd(t8, MemOperand(sp, 0 * kPointerSize));  // In the delay slot.
+  int kMaxEntriesBranchReach =
+      (1 << (kImm16Bits - 2)) / (table_entry_size_ / Assembler::kInstrSize);
 
-    // Pad the rest of the code.
-    while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
-      __ nop();
+  if (count() <= kMaxEntriesBranchReach) {
+    // Common case.
+    for (int i = 0; i < count(); i++) {
+      Label start;
+      __ bind(&start);
+      DCHECK(is_int16(i));
+      __ BranchShort(USE_DELAY_SLOT, &done);  // Expose delay slot.
+      __ li(at, i);                      // In the delay slot.
+
+      DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
     }
 
-    DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
-  }
+    DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+              count() * table_entry_size_);
+    __ bind(&done);
+    __ Push(at);
+  } else {
+    // Uncommon case, the branch cannot reach.
+    // Create mini trampoline and adjust id constants to get proper value at
+    // the end of table.
+    for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+      Label start;
+      __ bind(&start);
+      DCHECK(is_int16(i));
+      __ BranchShort(USE_DELAY_SLOT, &trampoline_jump);  // Expose delay slot.
+      __ li(at, -i);                                // In the delay slot.
+      DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+    }
+    // Entry with id == kMaxEntriesBranchReach - 1.
+    __ bind(&trampoline_jump);
+    __ BranchShort(USE_DELAY_SLOT, &done_special);
+    __ li(at, -1);
 
-  DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
-      count() * table_entry_size_);
+    for (int i = kMaxEntriesBranchReach; i < count(); i++) {
+      Label start;
+      __ bind(&start);
+      DCHECK(is_int16(i));
+      __ Branch(USE_DELAY_SLOT, &done);  // Expose delay slot.
+      __ li(at, i);                      // In the delay slot.
+    }
+
+    DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+              count() * table_entry_size_);
+    __ bind(&done_special);
+    __ daddiu(at, at, kMaxEntriesBranchReach);
+    __ bind(&done);
+    __ Push(at);
+  }
 }
 
 
@@ -373,7 +407,7 @@
 
 
 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
-  // No out-of-line constant pool support.
+  // No embedded constant pool support.
   UNREACHABLE();
 }
 
@@ -381,4 +415,5 @@
 #undef __
 
 
-} }  // namespace v8::internal
+}  // namespace internal
+}  // namespace v8