Upgrade V8 to 5.1.281.57  DO NOT MERGE

FPIIM-449

Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/heap/heap.h b/src/heap/heap.h
index 4a76777..9457453 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -10,6 +10,7 @@
 
 // Clients of this interface shouldn't depend on lots of heap internals.
 // Do not include anything from src/heap here!
+#include "include/v8.h"
 #include "src/allocation.h"
 #include "src/assert-scope.h"
 #include "src/atomic-utils.h"
@@ -23,6 +24,8 @@
 namespace v8 {
 namespace internal {
 
+using v8::MemoryPressureLevel;
+
 // Defines all the roots in Heap.
 #define STRONG_ROOT_LIST(V)                                                    \
   V(Map, byte_array_map, ByteArrayMap)                                         \
@@ -30,7 +33,6 @@
   V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
   V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
   /* Cluster the most popular ones in a few cache lines here at the top.    */ \
-  V(Smi, store_buffer_top, StoreBufferTop)                                     \
   V(Oddball, undefined_value, UndefinedValue)                                  \
   V(Oddball, the_hole_value, TheHoleValue)                                     \
   V(Oddball, null_value, NullValue)                                            \
@@ -75,6 +77,7 @@
   V(Oddball, arguments_marker, ArgumentsMarker)                                \
   V(Oddball, exception, Exception)                                             \
   V(Oddball, termination_exception, TerminationException)                      \
+  V(Oddball, optimized_out, OptimizedOut)                                      \
   V(FixedArray, number_string_cache, NumberStringCache)                        \
   V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
   V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
@@ -134,6 +137,7 @@
   V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
   V(Map, catch_context_map, CatchContextMap)                                   \
   V(Map, with_context_map, WithContextMap)                                     \
+  V(Map, debug_evaluate_context_map, DebugEvaluateContextMap)                  \
   V(Map, block_context_map, BlockContextMap)                                   \
   V(Map, module_context_map, ModuleContextMap)                                 \
   V(Map, script_context_map, ScriptContextMap)                                 \
@@ -147,6 +151,7 @@
   V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap)   \
   V(Map, exception_map, ExceptionMap)                                          \
   V(Map, termination_exception_map, TerminationExceptionMap)                   \
+  V(Map, optimized_out_map, OptimizedOutMap)                                   \
   V(Map, message_object_map, JSMessageObjectMap)                               \
   V(Map, foreign_map, ForeignMap)                                              \
   V(Map, neander_map, NeanderMap)                                              \
@@ -270,6 +275,10 @@
   V(JSMessageObjectMap)                 \
   V(ForeignMap)                         \
   V(NeanderMap)                         \
+  V(NanValue)                           \
+  V(InfinityValue)                      \
+  V(MinusZeroValue)                     \
+  V(MinusInfinityValue)                 \
   V(EmptyWeakCell)                      \
   V(empty_string)                       \
   PRIVATE_SYMBOL_LIST(V)
@@ -326,7 +335,7 @@
     // If the limit is not on the same page, we can ignore it.
     if (Page::FromAllocationTop(limit) != GetHeadPage()) return;
 
-    limit_ = reinterpret_cast<intptr_t*>(limit);
+    limit_ = reinterpret_cast<struct Entry*>(limit);
 
     if (limit_ <= rear_) {
       return;
@@ -348,7 +357,7 @@
     }
     // If the to space top pointer is smaller or equal than the promotion
     // queue head, then the to-space objects are below the promotion queue.
-    return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
+    return reinterpret_cast<struct Entry*>(to_space_top) <= rear_;
   }
 
   bool is_empty() {
@@ -356,44 +365,49 @@
            (emergency_stack_ == NULL || emergency_stack_->length() == 0);
   }
 
-  inline void insert(HeapObject* target, int size);
+  inline void insert(HeapObject* target, int32_t size, bool was_marked_black);
 
-  void remove(HeapObject** target, int* size) {
+  void remove(HeapObject** target, int32_t* size, bool* was_marked_black) {
     DCHECK(!is_empty());
     if (front_ == rear_) {
       Entry e = emergency_stack_->RemoveLast();
       *target = e.obj_;
       *size = e.size_;
+      *was_marked_black = e.was_marked_black_;
       return;
     }
 
-    *target = reinterpret_cast<HeapObject*>(*(--front_));
-    *size = static_cast<int>(*(--front_));
+    struct Entry* entry = reinterpret_cast<struct Entry*>(--front_);
+    *target = entry->obj_;
+    *size = entry->size_;
+    *was_marked_black = entry->was_marked_black_;
+
     // Assert no underflow.
     SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
                                 reinterpret_cast<Address>(front_));
   }
 
  private:
-  // The front of the queue is higher in the memory page chain than the rear.
-  intptr_t* front_;
-  intptr_t* rear_;
-  intptr_t* limit_;
-
-  static const int kEntrySizeInWords = 2;
-
   struct Entry {
-    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
+    Entry(HeapObject* obj, int32_t size, bool was_marked_black)
+        : obj_(obj), size_(size), was_marked_black_(was_marked_black) {}
 
     HeapObject* obj_;
-    int size_;
+    int32_t size_ : 31;
+    bool was_marked_black_ : 1;
   };
+
+  void RelocateQueueHead();
+
+  // The front of the queue is higher in the memory page chain than the rear.
+  struct Entry* front_;
+  struct Entry* rear_;
+  struct Entry* limit_;
+
   List<Entry>* emergency_stack_;
 
   Heap* heap_;
 
-  void RelocateQueueHead();
-
   DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
 };
 
@@ -403,6 +417,7 @@
   INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
 };
 
+enum class ClearRecordedSlots { kYes, kNo };
 
 class Heap {
  public:
@@ -536,6 +551,7 @@
 
   STATIC_ASSERT(kUndefinedValueRootIndex ==
                 Internals::kUndefinedValueRootIndex);
+  STATIC_ASSERT(kTheHoleValueRootIndex == Internals::kTheHoleValueRootIndex);
   STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
   STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
   STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
@@ -582,10 +598,6 @@
   // by pointer size.
   static inline void CopyBlock(Address dst, Address src, int byte_size);
 
-  // Optimized version of memmove for blocks with pointer size aligned sizes and
-  // pointer size aligned addresses.
-  static inline void MoveBlock(Address dst, Address src, int byte_size);
-
   // Determines a static visitor id based on the given {map} that can then be
   // stored on the map to facilitate fast dispatch for {StaticVisitorBase}.
   static int GetStaticVisitorIdForMap(Map* map);
@@ -632,8 +644,10 @@
   void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
 
   // Initialize a filler object to keep the ability to iterate over the heap
-  // when introducing gaps within pages.
-  void CreateFillerObjectAt(Address addr, int size);
+  // when introducing gaps within pages. If slots could have been recorded in
+  // the freed area, then pass ClearRecordedSlots::kYes as the mode. Otherwise,
+  // pass ClearRecordedSlots::kNo.
+  void CreateFillerObjectAt(Address addr, int size, ClearRecordedSlots mode);
 
   bool CanMoveObjectStart(HeapObject* object);
 
@@ -649,7 +663,7 @@
   void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
 
   // Converts the given boolean condition to JavaScript boolean value.
-  inline Object* ToBoolean(bool condition);
+  inline Oddball* ToBoolean(bool condition);
 
   // Check whether the heap is currently iterable.
   bool IsHeapIterable();
@@ -726,6 +740,10 @@
   bool IdleNotification(double deadline_in_seconds);
   bool IdleNotification(int idle_time_in_ms);
 
+  void MemoryPressureNotification(MemoryPressureLevel level,
+                                  bool is_isolate_locked);
+  void CheckMemoryPressure();
+
   double MonotonicallyIncreasingTimeInMs();
 
   void RecordStats(HeapStats* stats, bool take_snapshot = false);
@@ -740,6 +758,8 @@
 
     if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
 
+    if (HighMemoryPressure()) return true;
+
     return false;
   }
 
@@ -823,7 +843,12 @@
 
   void SetOptimizeForLatency() { optimize_for_memory_usage_ = false; }
   void SetOptimizeForMemoryUsage();
-  bool ShouldOptimizeForMemoryUsage() { return optimize_for_memory_usage_; }
+  bool ShouldOptimizeForMemoryUsage() {
+    return optimize_for_memory_usage_ || HighMemoryPressure();
+  }
+  bool HighMemoryPressure() {
+    return memory_pressure_level_.Value() != MemoryPressureLevel::kNone;
+  }
 
   // ===========================================================================
   // Initialization. ===========================================================
@@ -853,10 +878,6 @@
   // Getters for spaces. =======================================================
   // ===========================================================================
 
-  // Return the starting address and a mask for the new space.  And-masking an
-  // address with the mask will result in the start address of the new space
-  // for all addresses in either semispace.
-  Address NewSpaceStart() { return new_space_.start(); }
   Address NewSpaceTop() { return new_space_.top(); }
 
   NewSpace* new_space() { return &new_space_; }
@@ -895,11 +916,21 @@
   const char* GetSpaceName(int idx);
 
   // ===========================================================================
+  // API. ======================================================================
+  // ===========================================================================
+
+  void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+  void RegisterExternallyReferencedObject(Object** object);
+
+  // ===========================================================================
   // Getters to other components. ==============================================
   // ===========================================================================
 
   GCTracer* tracer() { return tracer_; }
 
+  EmbedderHeapTracer* embedder_heap_tracer() { return embedder_heap_tracer_; }
+
   PromotionQueue* promotion_queue() { return &promotion_queue_; }
 
   inline Isolate* isolate();
@@ -974,6 +1005,10 @@
   // jslimit_/real_jslimit_ variable in the StackGuard.
   void SetStackLimits();
 
+  // The stack limit is thread-dependent. To be able to reproduce the same
+  // snapshot blob, we need to reset it before serializing.
+  void ClearStackLimits();
+
   // Generated code can treat direct references to this root as constant.
   bool RootCanBeTreatedAsConstant(RootListIndex root_index);
 
@@ -1039,14 +1074,14 @@
   // Iterates over all the other roots in the heap.
   void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
 
-  // Iterate pointers to from semispace of new space found in memory interval
-  // from start to end within |object|.
-  void IteratePointersToFromSpace(HeapObject* target, int size,
-                                  ObjectSlotCallback callback);
+  // Iterate pointers of promoted objects.
+  void IteratePromotedObject(HeapObject* target, int size,
+                             bool was_marked_black,
+                             ObjectSlotCallback callback);
 
-  void IterateAndMarkPointersToFromSpace(HeapObject* object, Address start,
-                                         Address end, bool record_slots,
-                                         ObjectSlotCallback callback);
+  void IteratePromotedObjectPointers(HeapObject* object, Address start,
+                                     Address end, bool record_slots,
+                                     ObjectSlotCallback callback);
 
   // ===========================================================================
   // Store buffer API. =========================================================
@@ -1055,12 +1090,10 @@
   // Write barrier support for object[offset] = o;
   inline void RecordWrite(Object* object, int offset, Object* o);
 
-  Address* store_buffer_top_address() {
-    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
-  }
+  Address* store_buffer_top_address() { return store_buffer()->top_address(); }
 
   void ClearRecordedSlot(HeapObject* object, Object** slot);
-  void ClearRecordedSlotRange(HeapObject* object, Object** start, Object** end);
+  void ClearRecordedSlotRange(Address start, Address end);
 
   // ===========================================================================
   // Incremental marking API. ==================================================
@@ -1081,6 +1114,8 @@
 
   bool TryFinalizeIdleIncrementalMarking(double idle_time_in_ms);
 
+  void RegisterReservationsForBlackAllocation(Reservation* reservations);
+
   IncrementalMarking* incremental_marking() { return incremental_marking_; }
 
   // ===========================================================================
@@ -1144,16 +1179,11 @@
   // GC statistics. ============================================================
   // ===========================================================================
 
-  // Returns the maximum amount of memory reserved for the heap.  For
-  // the young generation, we reserve 4 times the amount needed for a
-  // semi space.  The young generation consists of two semi spaces and
-  // we reserve twice the amount needed for those in order to ensure
-  // that new space can be aligned to its size.
+  // Returns the maximum amount of memory reserved for the heap.
   intptr_t MaxReserved() {
-    return 4 * reserved_semispace_size_ + max_old_generation_size_;
+    return 2 * max_semi_space_size_ + max_old_generation_size_;
   }
   int MaxSemiSpaceSize() { return max_semi_space_size_; }
-  int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
   int InitialSemiSpaceSize() { return initial_semispace_size_; }
   intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
   intptr_t MaxExecutableSize() { return max_executable_size_; }
@@ -1618,6 +1648,8 @@
 
   void CompactRetainedMaps(ArrayList* retained_maps);
 
+  void CollectGarbageOnMemoryPressure(const char* source);
+
   // Attempt to over-approximate the weak closure by marking object groups and
   // implicit references from global handles, but don't atomically complete
   // marking. If we continue to mark incrementally, we might have marked
@@ -1672,6 +1704,7 @@
   void ProcessYoungWeakReferences(WeakObjectRetainer* retainer);
   void ProcessNativeContexts(WeakObjectRetainer* retainer);
   void ProcessAllocationSites(WeakObjectRetainer* retainer);
+  void ProcessWeakListRoots(WeakObjectRetainer* retainer);
 
   // ===========================================================================
   // GC statistics. ============================================================
@@ -1962,10 +1995,8 @@
   Object* roots_[kRootListLength];
 
   size_t code_range_size_;
-  int reserved_semispace_size_;
   int max_semi_space_size_;
   int initial_semispace_size_;
-  int target_semispace_size_;
   intptr_t max_old_generation_size_;
   intptr_t initial_old_generation_size_;
   bool old_generation_size_configured_;
@@ -1983,6 +2014,10 @@
   // count, as scopes can be acquired from multiple tasks (read: threads).
   AtomicNumber<size_t> always_allocate_scope_count_;
 
+  // Stores the memory pressure level that set by MemoryPressureNotification
+  // and reset by a mark-compact garbage collection.
+  AtomicValue<MemoryPressureLevel> memory_pressure_level_;
+
   // For keeping track of context disposals.
   int contexts_disposed_;
 
@@ -2069,6 +2104,7 @@
   int deferred_counters_[v8::Isolate::kUseCounterFeatureCount];
 
   GCTracer* tracer_;
+  EmbedderHeapTracer* embedder_heap_tracer_;
 
   int high_survival_rate_period_length_;
   intptr_t promoted_objects_size_;
@@ -2210,7 +2246,7 @@
   friend class HeapIterator;
   friend class IdleScavengeObserver;
   friend class IncrementalMarking;
-  friend class IteratePointersToFromSpaceVisitor;
+  friend class IteratePromotedObjectsVisitor;
   friend class MarkCompactCollector;
   friend class MarkCompactMarkingVisitor;
   friend class NewSpace;