Merge V8 5.2.361.47  DO NOT MERGE

https://chromium.googlesource.com/v8/v8/+/5.2.361.47

FPIIM-449

Change-Id: Ibec421b85a9b88cb3a432ada642e469fe7e78346
(cherry picked from commit bcf72ee8e3b26f1d0726869c7ddb3921c68b09a8)
diff --git a/test/cctest/heap/test-heap.cc b/test/cctest/heap/test-heap.cc
index 424e987..ef4b217 100644
--- a/test/cctest/heap/test-heap.cc
+++ b/test/cctest/heap/test-heap.cc
@@ -31,6 +31,7 @@
 #include "src/compilation-cache.h"
 #include "src/context-measure.h"
 #include "src/deoptimizer.h"
+#include "src/elements.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/field-type.h"
@@ -577,7 +578,7 @@
 static bool WeakPointerCleared = false;
 
 static void TestWeakGlobalHandleCallback(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
+    const v8::WeakCallbackInfo<void>& data) {
   std::pair<v8::Persistent<v8::Value>*, int>* p =
       reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
           data.GetParameter());
@@ -610,9 +611,9 @@
   }
 
   std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
-  GlobalHandles::MakeWeak(h2.location(),
-                          reinterpret_cast<void*>(&handle_and_id),
-                          &TestWeakGlobalHandleCallback);
+  GlobalHandles::MakeWeak(
+      h2.location(), reinterpret_cast<void*>(&handle_and_id),
+      &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
 
   // Scavenge treats weak pointers as normal roots.
   heap->CollectGarbage(NEW_SPACE);
@@ -657,9 +658,9 @@
   CHECK(!heap->InNewSpace(*h1) && !heap->InNewSpace(*h2));
 
   std::pair<Handle<Object>*, int> handle_and_id(&h2, 1234);
-  GlobalHandles::MakeWeak(h2.location(),
-                          reinterpret_cast<void*>(&handle_and_id),
-                          &TestWeakGlobalHandleCallback);
+  GlobalHandles::MakeWeak(
+      h2.location(), reinterpret_cast<void*>(&handle_and_id),
+      &TestWeakGlobalHandleCallback, v8::WeakCallbackType::kParameter);
   CHECK(!GlobalHandles::IsNearDeath(h1.location()));
   CHECK(!GlobalHandles::IsNearDeath(h2.location()));
 
@@ -695,9 +696,9 @@
   }
 
   std::pair<Handle<Object>*, int> handle_and_id(&h, 1234);
-  GlobalHandles::MakeWeak(h.location(),
-                          reinterpret_cast<void*>(&handle_and_id),
-                          &TestWeakGlobalHandleCallback);
+  GlobalHandles::MakeWeak(h.location(), reinterpret_cast<void*>(&handle_and_id),
+                          &TestWeakGlobalHandleCallback,
+                          v8::WeakCallbackType::kParameter);
 
   // Scanvenge does not recognize weak reference.
   heap->CollectGarbage(NEW_SPACE);
@@ -1546,10 +1547,7 @@
   Handle<Object> g_value =
       Object::GetProperty(isolate->global_object(), g_name).ToHandleChecked();
   Handle<JSFunction> g_function = Handle<JSFunction>::cast(g_value);
-  // TODO(mvstanton): change to check that g is *not* compiled when optimized
-  // cache
-  // map lookup moves to the compile lazy builtin.
-  CHECK(g_function->is_compiled());
+  CHECK(!g_function->is_compiled());
 
   SimulateIncrementalMarking(heap);
   CompileRun("%OptimizeFunctionOnNextCall(f); f();");
@@ -1590,25 +1588,11 @@
     CompileRun(raw_source);
   }
 
-  // On first compilation, only a hash is inserted in the code cache. We can't
-  // find that value.
+  // The script should be in the cache now.
   MaybeHandle<SharedFunctionInfo> info = compilation_cache->LookupScript(
       source, Handle<Object>(), 0, 0,
       v8::ScriptOriginOptions(false, true, false), native_context,
       language_mode);
-  CHECK(info.is_null());
-
-  {
-    v8::HandleScope scope(CcTest::isolate());
-    CompileRun(raw_source);
-  }
-
-  // On second compilation, the hash is replaced by a real cache entry mapping
-  // the source to the shared function info containing the code.
-  info = compilation_cache->LookupScript(
-      source, Handle<Object>(), 0, 0,
-      v8::ScriptOriginOptions(false, true, false), native_context,
-      language_mode);
   CHECK(!info.is_null());
 
   // Check that the code cache entry survives at least on GC.
@@ -1640,36 +1624,6 @@
       v8::ScriptOriginOptions(false, true, false), native_context,
       language_mode);
   CHECK(info.is_null());
-
-  {
-    v8::HandleScope scope(CcTest::isolate());
-    CompileRun(raw_source);
-  }
-
-  // On first compilation, only a hash is inserted in the code cache. We can't
-  // find that value.
-  info = compilation_cache->LookupScript(
-      source, Handle<Object>(), 0, 0,
-      v8::ScriptOriginOptions(false, true, false), native_context,
-      language_mode);
-  CHECK(info.is_null());
-
-  for (int i = 0; i < CompilationCacheTable::kHashGenerations; i++) {
-    compilation_cache->MarkCompactPrologue();
-  }
-
-  {
-    v8::HandleScope scope(CcTest::isolate());
-    CompileRun(raw_source);
-  }
-
-  // If we aged the cache before caching the script, ensure that we didn't cache
-  // on next compilation.
-  info = compilation_cache->LookupScript(
-      source, Handle<Object>(), 0, 0,
-      v8::ScriptOriginOptions(false, true, false), native_context,
-      language_mode);
-  CHECK(info.is_null());
 }
 
 
@@ -1692,7 +1646,7 @@
   Object* object = CcTest::heap()->native_contexts_list();
   while (!object->IsUndefined()) {
     count++;
-    object = Context::cast(object)->get(Context::NEXT_CONTEXT_LINK);
+    object = Context::cast(object)->next_context_link();
   }
   return count;
 }
@@ -1830,8 +1784,7 @@
     count++;
     if (count == n) heap->CollectAllGarbage();
     object =
-        Handle<Object>(Context::cast(*object)->get(Context::NEXT_CONTEXT_LINK),
-                       isolate);
+        Handle<Object>(Context::cast(*object)->next_context_link(), isolate);
   }
   return count;
 }
@@ -2296,16 +2249,20 @@
   // on the heap.
   if (size_of_objects_1 > size_of_objects_2) {
     intptr_t delta = size_of_objects_1 - size_of_objects_2;
-    PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
-           "Iterator: %" V8_PTR_PREFIX "d, "
-           "delta: %" V8_PTR_PREFIX "d\n",
+    PrintF("Heap::SizeOfObjects: %" V8PRIdPTR
+           ", "
+           "Iterator: %" V8PRIdPTR
+           ", "
+           "delta: %" V8PRIdPTR "\n",
            size_of_objects_1, size_of_objects_2, delta);
     CHECK_GT(size_of_objects_1 / 20, delta);
   } else {
     intptr_t delta = size_of_objects_2 - size_of_objects_1;
-    PrintF("Heap::SizeOfObjects: %" V8_PTR_PREFIX "d, "
-           "Iterator: %" V8_PTR_PREFIX "d, "
-           "delta: %" V8_PTR_PREFIX "d\n",
+    PrintF("Heap::SizeOfObjects: %" V8PRIdPTR
+           ", "
+           "Iterator: %" V8PRIdPTR
+           ", "
+           "delta: %" V8PRIdPTR "\n",
            size_of_objects_1, size_of_objects_2, delta);
     CHECK_GT(size_of_objects_2 / 20, delta);
   }
@@ -2657,6 +2614,14 @@
   CcTest::heap()->CollectGarbage(OLD_SPACE);
 }
 
+namespace {
+
+int GetProfilerTicks(SharedFunctionInfo* shared) {
+  return FLAG_ignition ? shared->profiler_ticks()
+                       : shared->code()->profiler_ticks();
+}
+
+}  // namespace
 
 TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
   i::FLAG_stress_compaction = false;
@@ -2687,16 +2652,18 @@
           CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
   CHECK(f->IsOptimized());
 
-  IncrementalMarking* marking = CcTest::heap()->incremental_marking();
-  marking->Stop();
+  // Make sure incremental marking it not running.
+  CcTest::heap()->incremental_marking()->Stop();
+
   CcTest::heap()->StartIncrementalMarking();
   // The following calls will increment CcTest::heap()->global_ic_age().
   CcTest::isolate()->ContextDisposedNotification();
   SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage();
+
   CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
   CHECK_EQ(0, f->shared()->opt_count());
-  CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+  CHECK_EQ(0, GetProfilerTicks(f->shared()));
 }
 
 
@@ -2727,9 +2694,9 @@
   i::Handle<JSFunction> f = i::Handle<JSFunction>::cast(
       v8::Utils::OpenHandle(*v8::Local<v8::Function>::Cast(
           CcTest::global()->Get(ctx, v8_str("f")).ToLocalChecked())));
-
   CHECK(f->IsOptimized());
 
+  // Make sure incremental marking it not running.
   CcTest::heap()->incremental_marking()->Stop();
 
   // The following two calls will increment CcTest::heap()->global_ic_age().
@@ -2738,7 +2705,7 @@
 
   CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
   CHECK_EQ(0, f->shared()->opt_count());
-  CHECK_EQ(0, f->shared()->code()->profiler_ticks());
+  CHECK_EQ(0, GetProfilerTicks(f->shared()));
 }
 
 
@@ -3557,6 +3524,8 @@
   // Concurrent sweeping adds non determinism, depending on when memory is
   // available for further reuse.
   i::FLAG_concurrent_sweeping = false;
+  // Fast evacuation of pages may result in a different page count in old space.
+  i::FLAG_page_promotion = false;
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
@@ -5547,19 +5516,21 @@
   isolate->Enter();
   {
     i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+    Heap* heap = i_isolate->heap();
     HandleScope handle_scope(i_isolate);
-    PagedSpace* old_space = i_isolate->heap()->old_space();
+    PagedSpace* old_space = heap->old_space();
     const int kMaxObjects = 10000;
     const int kFixedArrayLen = 512;
     Handle<FixedArray> objects[kMaxObjects];
-    for (int i = 0; (i < kMaxObjects) && old_space->CanExpand(Page::kPageSize);
+    for (int i = 0; (i < kMaxObjects) &&
+                    heap->CanExpandOldGeneration(old_space->AreaSize());
          i++) {
       objects[i] = i_isolate->factory()->NewFixedArray(kFixedArrayLen, TENURED);
       Page::FromAddress(objects[i]->address())
           ->SetFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
     }
     SimulateFullSpace(old_space);
-    i_isolate->heap()->CollectGarbage(OLD_SPACE);
+    heap->CollectGarbage(OLD_SPACE);
     // If we get this far, we've successfully aborted compaction. Any further
     // allocations might trigger OOM.
   }
@@ -6304,6 +6275,28 @@
       "check(g1, g2);");
 }
 
+TEST(RemoveCodeFromSharedFunctionInfoButNotFromClosure) {
+  CcTest::InitializeVM();
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  v8::Local<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
+  global->Set(isolate, "check", v8::FunctionTemplate::New(
+                                    isolate, CheckEqualSharedFunctionInfos));
+  global->Set(isolate, "remove",
+              v8::FunctionTemplate::New(isolate, RemoveCodeAndGC));
+  v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
+  v8::Context::Scope cscope(context);
+  CompileRun(
+      "function f() { return function g() {}; }"
+      "var g1 = f();"
+      "var g2 = f();"
+      "check(g1, g2);"
+      "g1();"
+      "g2();"
+      "remove(g1);"
+      "g2();"
+      "check(g1, g2);");
+}
 
 TEST(OldGenerationAllocationThroughput) {
   CcTest::InitializeVM();
@@ -6604,6 +6597,151 @@
   heap->CollectGarbage(OLD_SPACE);
 }
 
+UNINITIALIZED_TEST(PagePromotion) {
+  FLAG_page_promotion = true;
+  FLAG_page_promotion_threshold = 0;  // %
+  i::FLAG_min_semi_space_size = 8 * (Page::kPageSize / MB);
+  // We cannot optimize for size as we require a new space with more than one
+  // page.
+  i::FLAG_optimize_for_size = false;
+  // Set max_semi_space_size because it could've been initialized by an
+  // implication of optimize_for_size.
+  i::FLAG_max_semi_space_size = i::FLAG_min_semi_space_size;
+  v8::Isolate::CreateParams create_params;
+  create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+  v8::Isolate* isolate = v8::Isolate::New(create_params);
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  {
+    v8::Isolate::Scope isolate_scope(isolate);
+    v8::HandleScope handle_scope(isolate);
+    v8::Context::New(isolate)->Enter();
+    Heap* heap = i_isolate->heap();
+    std::vector<Handle<FixedArray>> handles;
+    SimulateFullSpace(heap->new_space(), &handles);
+    heap->CollectGarbage(NEW_SPACE);
+    CHECK_GT(handles.size(), 0u);
+    // First object in handle should be on the first page.
+    Handle<FixedArray> first_object = handles.front();
+    Page* first_page = Page::FromAddress(first_object->address());
+    // The age mark should not be on the first page.
+    CHECK(!first_page->ContainsLimit(heap->new_space()->age_mark()));
+    // To perform a sanity check on live bytes we need to mark the heap.
+    SimulateIncrementalMarking(heap, true);
+    // Sanity check that the page meets the requirements for promotion.
+    const int threshold_bytes =
+        FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
+    CHECK_GE(first_page->LiveBytes(), threshold_bytes);
+
+    // Actual checks: The page is in new space first, but is moved to old space
+    // during a full GC.
+    CHECK(heap->new_space()->ContainsSlow(first_page->address()));
+    CHECK(!heap->old_space()->ContainsSlow(first_page->address()));
+    heap->CollectGarbage(OLD_SPACE);
+    CHECK(!heap->new_space()->ContainsSlow(first_page->address()));
+    CHECK(heap->old_space()->ContainsSlow(first_page->address()));
+  }
+}
+
+TEST(Regress598319) {
+  // This test ensures that no white objects can cross the progress bar of large
+  // objects during incremental marking. It checks this by using Shift() during
+  // incremental marking.
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+  Heap* heap = CcTest::heap();
+  Isolate* isolate = heap->isolate();
+
+  const int kNumberOfObjects = Page::kMaxRegularHeapObjectSize / kPointerSize;
+
+  struct Arr {
+    Arr(Isolate* isolate, int number_of_objects) {
+      root = isolate->factory()->NewFixedArray(1, TENURED);
+      {
+        // Temporary scope to avoid getting any other objects into the root set.
+        v8::HandleScope scope(CcTest::isolate());
+        Handle<FixedArray> tmp =
+            isolate->factory()->NewFixedArray(number_of_objects);
+        root->set(0, *tmp);
+        for (int i = 0; i < get()->length(); i++) {
+          tmp = isolate->factory()->NewFixedArray(100, TENURED);
+          get()->set(i, *tmp);
+        }
+      }
+    }
+
+    FixedArray* get() { return FixedArray::cast(root->get(0)); }
+
+    Handle<FixedArray> root;
+  } arr(isolate, kNumberOfObjects);
+
+  CHECK_EQ(arr.get()->length(), kNumberOfObjects);
+  CHECK(heap->lo_space()->Contains(arr.get()));
+  LargePage* page = heap->lo_space()->FindPage(arr.get()->address());
+  CHECK_NOT_NULL(page);
+
+  // GC to cleanup state
+  heap->CollectGarbage(OLD_SPACE);
+  MarkCompactCollector* collector = heap->mark_compact_collector();
+  if (collector->sweeping_in_progress()) {
+    collector->EnsureSweepingCompleted();
+  }
+
+  CHECK(heap->lo_space()->Contains(arr.get()));
+  CHECK(Marking::IsWhite(Marking::MarkBitFrom(arr.get())));
+  for (int i = 0; i < arr.get()->length(); i++) {
+    CHECK(Marking::IsWhite(
+        Marking::MarkBitFrom(HeapObject::cast(arr.get()->get(i)))));
+  }
+
+  // Start incremental marking.
+  IncrementalMarking* marking = heap->incremental_marking();
+  CHECK(marking->IsMarking() || marking->IsStopped());
+  if (marking->IsStopped()) {
+    heap->StartIncrementalMarking();
+  }
+  CHECK(marking->IsMarking());
+
+  // Check that we have not marked the interesting array during root scanning.
+  for (int i = 0; i < arr.get()->length(); i++) {
+    CHECK(Marking::IsWhite(
+        Marking::MarkBitFrom(HeapObject::cast(arr.get()->get(i)))));
+  }
+
+  // Now we search for a state where we are in incremental marking and have
+  // only partially marked the large object.
+  while (!marking->IsComplete()) {
+    marking->Step(i::KB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+    if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->progress_bar() > 0) {
+      CHECK_NE(page->progress_bar(), arr.get()->Size());
+      {
+        // Shift by 1, effectively moving one white object across the progress
+        // bar, meaning that we will miss marking it.
+        v8::HandleScope scope(CcTest::isolate());
+        Handle<JSArray> js_array = isolate->factory()->NewJSArrayWithElements(
+            Handle<FixedArray>(arr.get()));
+        js_array->GetElementsAccessor()->Shift(js_array);
+      }
+      break;
+    }
+  }
+
+  // Finish marking with bigger steps to speed up test.
+  while (!marking->IsComplete()) {
+    marking->Step(10 * i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+    if (marking->IsReadyToOverApproximateWeakClosure()) {
+      marking->FinalizeIncrementally();
+    }
+  }
+  CHECK(marking->IsComplete());
+
+  // All objects need to be black after marking. If a white object crossed the
+  // progress bar, we would fail here.
+  for (int i = 0; i < arr.get()->length(); i++) {
+    CHECK(Marking::IsBlack(
+        Marking::MarkBitFrom(HeapObject::cast(arr.get()->get(i)))));
+  }
+}
+
 TEST(Regress609761) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());