Upgrade V8 to version 4.9.385.28

https://chromium.googlesource.com/v8/v8/+/4.9.385.28

FPIIM-449

Change-Id: I4b2e74289d4bf3667f2f3dc8aa2e541f63e26eb4
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
index d81d253..3023fbf 100644
--- a/src/heap/spaces-inl.h
+++ b/src/heap/spaces-inl.h
@@ -5,10 +5,11 @@
 #ifndef V8_HEAP_SPACES_INL_H_
 #define V8_HEAP_SPACES_INL_H_
 
+#include "src/heap/incremental-marking.h"
 #include "src/heap/spaces.h"
-#include "src/heap-profiler.h"
 #include "src/isolate.h"
 #include "src/msan.h"
+#include "src/profiler/heap-profiler.h"
 #include "src/v8memory.h"
 
 namespace v8 {
@@ -28,7 +29,6 @@
 // -----------------------------------------------------------------------------
 // PageIterator
 
-
 PageIterator::PageIterator(PagedSpace* space)
     : space_(space),
       prev_page_(&space->anchor_),
@@ -47,8 +47,32 @@
 
 
 // -----------------------------------------------------------------------------
-// NewSpacePageIterator
+// SemiSpaceIterator
 
+HeapObject* SemiSpaceIterator::Next() {
+  while (current_ != limit_) {
+    if (NewSpacePage::IsAtEnd(current_)) {
+      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+      page = page->next_page();
+      DCHECK(!page->is_anchor());
+      current_ = page->area_start();
+      if (current_ == limit_) return nullptr;
+    }
+    HeapObject* object = HeapObject::FromAddress(current_);
+    current_ += object->Size();
+    if (!object->IsFiller()) {
+      return object;
+    }
+  }
+  return nullptr;
+}
+
+
+HeapObject* SemiSpaceIterator::next_object() { return Next(); }
+
+
+// -----------------------------------------------------------------------------
+// NewSpacePageIterator
 
 NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
     : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
@@ -81,6 +105,19 @@
 
 // -----------------------------------------------------------------------------
 // HeapObjectIterator
+
+HeapObject* HeapObjectIterator::Next() {
+  do {
+    HeapObject* next_obj = FromCurrentPage();
+    if (next_obj != NULL) return next_obj;
+  } while (AdvanceToNextPage());
+  return NULL;
+}
+
+
+HeapObject* HeapObjectIterator::next_object() { return Next(); }
+
+
 HeapObject* HeapObjectIterator::FromCurrentPage() {
   while (cur_addr_ != cur_end_) {
     if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
@@ -88,11 +125,22 @@
       continue;
     }
     HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    int obj_size = obj->Size();
     cur_addr_ += obj_size;
     DCHECK(cur_addr_ <= cur_end_);
+    // TODO(hpayer): Remove the debugging code.
+    if (cur_addr_ > cur_end_) {
+      space_->heap()->isolate()->PushStackTraceAndDie(0xaaaaaaaa, obj, NULL,
+                                                      obj_size);
+    }
+
     if (!obj->IsFiller()) {
-      DCHECK_OBJECT_SIZE(obj_size);
+      if (obj->IsCode()) {
+        DCHECK_EQ(space_, space_->heap()->code_space());
+        DCHECK_CODEOBJECT_SIZE(obj_size, space_);
+      } else {
+        DCHECK_OBJECT_SIZE(obj_size);
+      }
       return obj;
     }
   }
@@ -132,11 +180,22 @@
 
 
 // --------------------------------------------------------------------------
+// AllocationResult
+
+AllocationSpace AllocationResult::RetrySpace() {
+  DCHECK(IsRetry());
+  return static_cast<AllocationSpace>(Smi::cast(object_)->value());
+}
+
+
+// --------------------------------------------------------------------------
 // PagedSpace
+
 Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
                        PagedSpace* owner) {
   Page* page = reinterpret_cast<Page*>(chunk);
-  DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
+  page->mutex_ = new base::Mutex();
+  DCHECK(page->area_size() <= kAllocatableMemory);
   DCHECK(chunk->owner() == owner);
   owner->IncreaseCapacity(page->area_size());
   owner->Free(page->area_start(), page->area_size());
@@ -154,6 +213,9 @@
 }
 
 
+bool PagedSpace::Contains(HeapObject* o) { return Contains(o->address()); }
+
+
 void MemoryChunk::set_scan_on_scavenge(bool scan) {
   if (scan) {
     if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
@@ -186,35 +248,50 @@
 }
 
 
-void MemoryChunk::UpdateHighWaterMark(Address mark) {
-  if (mark == NULL) return;
-  // Need to subtract one from the mark because when a chunk is full the
-  // top points to the next address after the chunk, which effectively belongs
-  // to another chunk. See the comment to Page::FromAllocationTop.
-  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
-  int new_mark = static_cast<int>(mark - chunk->address());
-  if (new_mark > chunk->high_water_mark_) {
-    chunk->high_water_mark_ = new_mark;
-  }
-}
-
-
 PointerChunkIterator::PointerChunkIterator(Heap* heap)
-    : state_(kOldPointerState),
-      old_pointer_iterator_(heap->old_pointer_space()),
+    : state_(kOldSpaceState),
+      old_iterator_(heap->old_space()),
       map_iterator_(heap->map_space()),
       lo_iterator_(heap->lo_space()) {}
 
 
-Page* Page::next_page() {
-  DCHECK(next_chunk()->owner() == owner());
-  return static_cast<Page*>(next_chunk());
-}
-
-
-Page* Page::prev_page() {
-  DCHECK(prev_chunk()->owner() == owner());
-  return static_cast<Page*>(prev_chunk());
+MemoryChunk* PointerChunkIterator::next() {
+  switch (state_) {
+    case kOldSpaceState: {
+      if (old_iterator_.has_next()) {
+        return old_iterator_.next();
+      }
+      state_ = kMapState;
+      // Fall through.
+    }
+    case kMapState: {
+      if (map_iterator_.has_next()) {
+        return map_iterator_.next();
+      }
+      state_ = kLargeObjectState;
+      // Fall through.
+    }
+    case kLargeObjectState: {
+      HeapObject* heap_object;
+      do {
+        heap_object = lo_iterator_.Next();
+        if (heap_object == NULL) {
+          state_ = kFinishedState;
+          return NULL;
+        }
+        // Fixed arrays are the only pointer-containing objects in large
+        // object space.
+      } while (!heap_object->IsFixedArray());
+      MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+      return answer;
+    }
+    case kFinishedState:
+      return NULL;
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return NULL;
 }
 
 
@@ -244,8 +321,45 @@
 }
 
 
+AllocationResult LocalAllocationBuffer::AllocateRawAligned(
+    int size_in_bytes, AllocationAlignment alignment) {
+  Address current_top = allocation_info_.top();
+  int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+  Address new_top = current_top + filler_size + size_in_bytes;
+  if (new_top > allocation_info_.limit()) return AllocationResult::Retry();
+
+  allocation_info_.set_top(new_top);
+  if (filler_size > 0) {
+    return heap_->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+                                    filler_size);
+  }
+
+  return AllocationResult(HeapObject::FromAddress(current_top));
+}
+
+
+HeapObject* PagedSpace::AllocateLinearlyAligned(int* size_in_bytes,
+                                                AllocationAlignment alignment) {
+  Address current_top = allocation_info_.top();
+  int filler_size = Heap::GetFillToAlign(current_top, alignment);
+
+  Address new_top = current_top + filler_size + *size_in_bytes;
+  if (new_top > allocation_info_.limit()) return NULL;
+
+  allocation_info_.set_top(new_top);
+  if (filler_size > 0) {
+    *size_in_bytes += filler_size;
+    return heap()->PrecedeWithFiller(HeapObject::FromAddress(current_top),
+                                     filler_size);
+  }
+
+  return HeapObject::FromAddress(current_top);
+}
+
+
 // Raw allocation.
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
+AllocationResult PagedSpace::AllocateRawUnaligned(int size_in_bytes) {
   HeapObject* object = AllocateLinearly(size_in_bytes);
 
   if (object == NULL) {
@@ -267,28 +381,134 @@
 }
 
 
+AllocationResult PagedSpace::AllocateRawUnalignedSynchronized(
+    int size_in_bytes) {
+  base::LockGuard<base::Mutex> lock_guard(&space_mutex_);
+  return AllocateRawUnaligned(size_in_bytes);
+}
+
+
+// Raw allocation.
+AllocationResult PagedSpace::AllocateRawAligned(int size_in_bytes,
+                                                AllocationAlignment alignment) {
+  DCHECK(identity() == OLD_SPACE);
+  int allocation_size = size_in_bytes;
+  HeapObject* object = AllocateLinearlyAligned(&allocation_size, alignment);
+
+  if (object == NULL) {
+    // We don't know exactly how much filler we need to align until space is
+    // allocated, so assume the worst case.
+    int filler_size = Heap::GetMaximumFillToAlign(alignment);
+    allocation_size += filler_size;
+    object = free_list_.Allocate(allocation_size);
+    if (object == NULL) {
+      object = SlowAllocateRaw(allocation_size);
+    }
+    if (object != NULL && filler_size != 0) {
+      object = heap()->AlignWithFiller(object, size_in_bytes, allocation_size,
+                                       alignment);
+      // Filler objects are initialized, so mark only the aligned object memory
+      // as uninitialized.
+      allocation_size = size_in_bytes;
+    }
+  }
+
+  if (object != NULL) {
+    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), allocation_size);
+    return object;
+  }
+
+  return AllocationResult::Retry(identity());
+}
+
+
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes,
+                                         AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+  return alignment == kDoubleAligned
+             ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+             : AllocateRawUnaligned(size_in_bytes);
+#else
+  return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
 // -----------------------------------------------------------------------------
 // NewSpace
 
 
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top();
+AllocationResult NewSpace::AllocateRawAligned(int size_in_bytes,
+                                              AllocationAlignment alignment) {
+  Address top = allocation_info_.top();
+  int filler_size = Heap::GetFillToAlign(top, alignment);
+  int aligned_size_in_bytes = size_in_bytes + filler_size;
 
-  if (allocation_info_.limit() - old_top < size_in_bytes) {
-    return SlowAllocateRaw(size_in_bytes);
+  if (allocation_info_.limit() - top < aligned_size_in_bytes) {
+    // See if we can create room.
+    if (!EnsureAllocation(size_in_bytes, alignment)) {
+      return AllocationResult::Retry();
+    }
+
+    top = allocation_info_.top();
+    filler_size = Heap::GetFillToAlign(top, alignment);
+    aligned_size_in_bytes = size_in_bytes + filler_size;
   }
 
-  HeapObject* obj = HeapObject::FromAddress(old_top);
-  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
+  HeapObject* obj = HeapObject::FromAddress(top);
+  allocation_info_.set_top(top + aligned_size_in_bytes);
   DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
 
-  // The slow path above ultimately goes through AllocateRaw, so this suffices.
+  if (filler_size > 0) {
+    obj = heap()->PrecedeWithFiller(obj, filler_size);
+  }
+
   MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
 
   return obj;
 }
 
 
+AllocationResult NewSpace::AllocateRawUnaligned(int size_in_bytes) {
+  Address top = allocation_info_.top();
+  if (allocation_info_.limit() < top + size_in_bytes) {
+    // See if we can create room.
+    if (!EnsureAllocation(size_in_bytes, kWordAligned)) {
+      return AllocationResult::Retry();
+    }
+
+    top = allocation_info_.top();
+  }
+
+  HeapObject* obj = HeapObject::FromAddress(top);
+  allocation_info_.set_top(top + size_in_bytes);
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+
+  return obj;
+}
+
+
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes,
+                                       AllocationAlignment alignment) {
+#ifdef V8_HOST_ARCH_32_BIT
+  return alignment == kDoubleAligned
+             ? AllocateRawAligned(size_in_bytes, kDoubleAligned)
+             : AllocateRawUnaligned(size_in_bytes);
+#else
+  return AllocateRawUnaligned(size_in_bytes);
+#endif
+}
+
+
+MUST_USE_RESULT inline AllocationResult NewSpace::AllocateRawSynchronized(
+    int size_in_bytes, AllocationAlignment alignment) {
+  base::LockGuard<base::Mutex> guard(&mutex_);
+  return AllocateRaw(size_in_bytes, alignment);
+}
+
+
 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
   heap->incremental_marking()->SetOldSpacePageFlags(chunk);
   return static_cast<LargePage*>(chunk);
@@ -300,14 +520,34 @@
 }
 
 
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  Map* map = object->map();
-  Heap* heap = object->GetHeap();
-  return map == heap->raw_unchecked_free_space_map() ||
-         map == heap->raw_unchecked_one_pointer_filler_map() ||
-         map == heap->raw_unchecked_two_pointer_filler_map();
+LocalAllocationBuffer LocalAllocationBuffer::InvalidBuffer() {
+  return LocalAllocationBuffer(nullptr, AllocationInfo(nullptr, nullptr));
 }
+
+
+LocalAllocationBuffer LocalAllocationBuffer::FromResult(Heap* heap,
+                                                        AllocationResult result,
+                                                        intptr_t size) {
+  if (result.IsRetry()) return InvalidBuffer();
+  HeapObject* obj = nullptr;
+  bool ok = result.To(&obj);
+  USE(ok);
+  DCHECK(ok);
+  Address top = HeapObject::cast(obj)->address();
+  return LocalAllocationBuffer(heap, AllocationInfo(top, top + size));
 }
-}  // namespace v8::internal
+
+
+bool LocalAllocationBuffer::TryMerge(LocalAllocationBuffer* other) {
+  if (allocation_info_.top() == other->allocation_info_.limit()) {
+    allocation_info_.set_top(other->allocation_info_.top());
+    other->allocation_info_.Reset(nullptr, nullptr);
+    return true;
+  }
+  return false;
+}
+
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_HEAP_SPACES_INL_H_