Upgrade V8 to 5.1.281.57  DO NOT MERGE

FPIIM-449

Change-Id: Id981b686b4d587ac31697662eb98bb34be42ad90
(cherry picked from commit 3b9bc31999c9787eb726ecdbfd5796bfdec32a18)
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
index c0d399f..93a81cc 100644
--- a/src/heap/spaces.h
+++ b/src/heap/spaces.h
@@ -27,11 +27,14 @@
 class Isolate;
 class MemoryAllocator;
 class MemoryChunk;
+class NewSpacePage;
+class Page;
 class PagedSpace;
 class SemiSpace;
 class SkipList;
 class SlotsBuffer;
 class SlotSet;
+class TypedSlotSet;
 class Space;
 
 // -----------------------------------------------------------------------------
@@ -105,10 +108,6 @@
 #define DCHECK_PAGE_OFFSET(offset) \
   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
 
-#define DCHECK_MAP_PAGE_INDEX(index) \
-  DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-
 class MarkBit {
  public:
   typedef uint32_t CellType;
@@ -204,6 +203,8 @@
 
   static inline void Clear(MemoryChunk* chunk);
 
+  static inline void SetAllBits(MemoryChunk* chunk);
+
   static void PrintWord(uint32_t word, uint32_t himask = 0) {
     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
       if ((mask & himask) != 0) PrintF("[");
@@ -288,6 +289,113 @@
   }
 };
 
+enum FreeListCategoryType {
+  kTiniest,
+  kTiny,
+  kSmall,
+  kMedium,
+  kLarge,
+  kHuge,
+
+  kFirstCategory = kTiniest,
+  kLastCategory = kHuge,
+  kNumberOfCategories = kLastCategory + 1,
+  kInvalidCategory
+};
+
+enum FreeMode { kLinkCategory, kDoNotLinkCategory };
+
+// A free list category maintains a linked list of free memory blocks.
+class FreeListCategory {
+ public:
+  static const int kSize = kIntSize +      // FreeListCategoryType type_
+                           kIntSize +      // int available_
+                           kPointerSize +  // FreeSpace* top_
+                           kPointerSize +  // FreeListCategory* prev_
+                           kPointerSize;   // FreeListCategory* next_
+
+  FreeListCategory()
+      : type_(kInvalidCategory),
+        available_(0),
+        top_(nullptr),
+        prev_(nullptr),
+        next_(nullptr) {}
+
+  void Initialize(FreeListCategoryType type) {
+    type_ = type;
+    available_ = 0;
+    top_ = nullptr;
+    prev_ = nullptr;
+    next_ = nullptr;
+  }
+
+  void Invalidate();
+
+  void Reset();
+
+  void ResetStats() { Reset(); }
+
+  void RepairFreeList(Heap* heap);
+
+  // Relinks the category into the currently owning free list. Requires that the
+  // category is currently unlinked.
+  void Relink();
+
+  bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
+
+  // Picks a node from the list and stores its size in |node_size|. Returns
+  // nullptr if the category is empty.
+  FreeSpace* PickNodeFromList(int* node_size);
+
+  // Performs a single try to pick a node of at least |minimum_size| from the
+  // category. Stores the actual size in |node_size|. Returns nullptr if no
+  // node is found.
+  FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
+
+  // Picks a node of at least |minimum_size| from the category. Stores the
+  // actual size in |node_size|. Returns nullptr if no node is found.
+  FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
+
+  inline FreeList* owner();
+  inline bool is_linked();
+  bool is_empty() { return top() == nullptr; }
+  int available() const { return available_; }
+
+#ifdef DEBUG
+  intptr_t SumFreeList();
+  int FreeListLength();
+#endif
+
+ private:
+  // For debug builds we accurately compute free lists lengths up until
+  // {kVeryLongFreeList} by manually walking the list.
+  static const int kVeryLongFreeList = 500;
+
+  inline Page* page();
+
+  FreeSpace* top() { return top_; }
+  void set_top(FreeSpace* top) { top_ = top; }
+  FreeListCategory* prev() { return prev_; }
+  void set_prev(FreeListCategory* prev) { prev_ = prev; }
+  FreeListCategory* next() { return next_; }
+  void set_next(FreeListCategory* next) { next_ = next; }
+
+  // |type_|: The type of this free list category.
+  FreeListCategoryType type_;
+
+  // |available_|: Total available bytes in all blocks of this free list
+  // category.
+  int available_;
+
+  // |top_|: Points to the top FreeSpace* in the free list category.
+  FreeSpace* top_;
+
+  FreeListCategory* prev_;
+  FreeListCategory* next_;
+
+  friend class FreeList;
+  friend class PagedSpace;
+};
 
 // MemoryChunk represents a memory region owned by a specific space.
 // It is divided into the header and the body. Chunk start is always
@@ -303,9 +411,7 @@
     IN_TO_SPACE,    // All pages in new space has one of these two set.
     NEW_SPACE_BELOW_AGE_MARK,
     EVACUATION_CANDIDATE,
-    RESCAN_ON_EVACUATION,
     NEVER_EVACUATE,  // May contain immortal immutables.
-    POPULAR_PAGE,    // Slots buffer of this page overflowed on the previous GC.
 
     // Large objects can have a progress bar in their page header. These object
     // are scanned in increments and will be kept black while being scanned.
@@ -313,6 +419,11 @@
     // to grey transition is performed in the value.
     HAS_PROGRESS_BAR,
 
+    // A black page has all mark bits set to 1 (black). A black page currently
+    // cannot be iterated because it is not swept. Moreover live bytes are also
+    // not updated.
+    BLACK_PAGE,
+
     // This flag is intended to be used for testing. Works only when both
     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
     // are set. It forces the page to become an evacuation candidate at next
@@ -334,19 +445,6 @@
     NUM_MEMORY_CHUNK_FLAGS
   };
 
-  // |kCompactionDone|: Initial compaction state of a |MemoryChunk|.
-  // |kCompactingInProgress|:  Parallel compaction is currently in progress.
-  // |kCompactingFinalize|: Parallel compaction is done but the chunk needs to
-  //   be finalized.
-  // |kCompactingAborted|: Parallel compaction has been aborted, which should
-  //   for now only happen in OOM scenarios.
-  enum ParallelCompactingState {
-    kCompactingDone,
-    kCompactingInProgress,
-    kCompactingFinalize,
-    kCompactingAborted,
-  };
-
   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
   //   not be performed on that page. Sweeper threads that are done with their
   //   work will set this value and not touch the page anymore.
@@ -372,8 +470,7 @@
   static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
 
   static const int kSkipEvacuationSlotsRecordingMask =
-      (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
-      (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+      (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
 
   static const intptr_t kAlignment =
       (static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -382,6 +479,8 @@
 
   static const intptr_t kSizeOffset = 0;
 
+  static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
+
   static const intptr_t kLiveBytesOffset =
       kSizeOffset + kPointerSize  // size_t size
       + kIntptrSize               // intptr_t flags_
@@ -392,25 +491,26 @@
       + kPointerSize              // Heap* heap_
       + kIntSize;                 // int progress_bar_
 
-  static const size_t kSlotsBufferOffset =
+  static const size_t kOldToNewSlotsOffset =
       kLiveBytesOffset + kIntSize;  // int live_byte_count_
 
   static const size_t kWriteBarrierCounterOffset =
-      kSlotsBufferOffset + kPointerSize  // SlotsBuffer* slots_buffer_;
-      + kPointerSize                     // SlotSet* old_to_new_slots_;
-      + kPointerSize                     // SlotSet* old_to_old_slots_;
-      + kPointerSize;                    // SkipList* skip_list_;
+      kOldToNewSlotsOffset + kPointerSize  // SlotSet* old_to_new_slots_;
+      + kPointerSize                       // SlotSet* old_to_old_slots_;
+      + kPointerSize   // TypedSlotSet* typed_old_to_old_slots_;
+      + kPointerSize;  // SkipList* skip_list_;
 
   static const size_t kMinHeaderSize =
       kWriteBarrierCounterOffset +
       kIntptrSize         // intptr_t write_barrier_counter_
       + kPointerSize      // AtomicValue high_water_mark_
       + kPointerSize      // base::Mutex* mutex_
-      + kPointerSize      // base::AtomicWord parallel_sweeping_
-      + kPointerSize      // AtomicValue parallel_compaction_
+      + kPointerSize      // base::AtomicWord concurrent_sweeping_
       + 2 * kPointerSize  // AtomicNumber free-list statistics
       + kPointerSize      // AtomicValue next_chunk_
-      + kPointerSize;     // AtomicValue prev_chunk_
+      + kPointerSize      // AtomicValue prev_chunk_
+      // FreeListCategory categories_[kNumberOfCategories]
+      + FreeListCategory::kSize * kNumberOfCategories;
 
   // We add some more space to the computed header size to amount for missing
   // alignment requirements in our computation.
@@ -428,7 +528,11 @@
       kBodyOffset - 1 +
       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
 
-  static const int kFlagsOffset = kPointerSize;
+  // Page size in bytes.  This must be a multiple of the OS page size.
+  static const int kPageSize = 1 << kPageSizeBits;
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+
+  static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
 
   static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
   static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
@@ -474,20 +578,18 @@
     return concurrent_sweeping_;
   }
 
-  AtomicValue<ParallelCompactingState>& parallel_compaction_state() {
-    return parallel_compaction_;
-  }
-
   // Manage live byte count, i.e., count of bytes in black objects.
   inline void ResetLiveBytes();
   inline void IncrementLiveBytes(int by);
 
   int LiveBytes() {
-    DCHECK_LE(static_cast<size_t>(live_byte_count_), size_);
+    DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+    DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
     return live_byte_count_;
   }
 
   void SetLiveBytes(int live_bytes) {
+    if (IsFlagSet(BLACK_PAGE)) return;
     DCHECK_GE(live_bytes, 0);
     DCHECK_LE(static_cast<size_t>(live_bytes), size_);
     live_byte_count_ = live_bytes;
@@ -509,17 +611,18 @@
 
   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
 
-  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
-
-  inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
-
   inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
   inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
+  inline TypedSlotSet* typed_old_to_old_slots() {
+    return typed_old_to_old_slots_;
+  }
 
   void AllocateOldToNewSlots();
   void ReleaseOldToNewSlots();
   void AllocateOldToOldSlots();
   void ReleaseOldToOldSlots();
+  void AllocateTypedOldToOldSlots();
+  void ReleaseTypedOldToOldSlots();
 
   Address area_start() { return area_start_; }
   Address area_end() { return area_end_; }
@@ -591,17 +694,6 @@
     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
   }
 
-  void MarkEvacuationCandidate() {
-    DCHECK(!IsFlagSet(NEVER_EVACUATE));
-    DCHECK_NULL(slots_buffer_);
-    SetFlag(EVACUATION_CANDIDATE);
-  }
-
-  void ClearEvacuationCandidate() {
-    DCHECK(slots_buffer_ == NULL);
-    ClearFlag(EVACUATION_CANDIDATE);
-  }
-
   bool ShouldSkipEvacuationSlotRecording() {
     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
   }
@@ -683,13 +775,12 @@
   // Count of bytes marked black on page.
   int live_byte_count_;
 
-  SlotsBuffer* slots_buffer_;
-
   // A single slot set for small pages (of size kPageSize) or an array of slot
   // set for large pages. In the latter case the number of entries in the array
   // is ceil(size() / kPageSize).
   SlotSet* old_to_new_slots_;
   SlotSet* old_to_old_slots_;
+  TypedSlotSet* typed_old_to_old_slots_;
 
   SkipList* skip_list_;
 
@@ -702,7 +793,6 @@
   base::Mutex* mutex_;
 
   AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
-  AtomicValue<ParallelCompactingState> parallel_compaction_;
 
   // PagedSpace free-list statistics.
   AtomicNumber<intptr_t> available_in_free_list_;
@@ -713,6 +803,8 @@
   // prev_chunk_ holds a pointer of type MemoryChunk
   AtomicValue<MemoryChunk*> prev_chunk_;
 
+  FreeListCategory categories_[kNumberOfCategories];
+
  private:
   void InitializeReservedMemory() { reservation_.Reset(); }
 
@@ -720,17 +812,6 @@
   friend class MemoryChunkValidator;
 };
 
-enum FreeListCategoryType {
-  kSmall,
-  kMedium,
-  kLarge,
-  kHuge,
-
-  kFirstCategory = kSmall,
-  kLastCategory = kHuge,
-  kNumberOfCategories = kLastCategory + 1
-};
-
 // -----------------------------------------------------------------------------
 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
 //
@@ -790,9 +871,6 @@
 
   // ---------------------------------------------------------------------
 
-  // Page size in bytes.  This must be a multiple of the OS page size.
-  static const int kPageSize = 1 << kPageSizeBits;
-
   // Maximum object size that gets allocated into regular pages. Objects larger
   // than that size are allocated in large object space and are never moved in
   // memory. This also applies to new space allocation, since objects are never
@@ -802,11 +880,6 @@
   // short living objects >256K.
   static const int kMaxRegularHeapObjectSize = 600 * KB;
 
-  static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
-
-  // Page size mask.
-  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
   inline void ClearGCFields();
 
   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
@@ -834,6 +907,17 @@
                             available_in_free_list());
   }
 
+  template <typename Callback>
+  inline void ForAllFreeListCategories(Callback callback) {
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      callback(&categories_[i]);
+    }
+  }
+
+  FreeListCategory* free_list_category(FreeListCategoryType type) {
+    return &categories_[type];
+  }
+
 #define FRAGMENTATION_STATS_ACCESSORS(type, name)        \
   type name() { return name##_.Value(); }                \
   void set_##name(type name) { name##_.SetValue(name); } \
@@ -848,6 +932,13 @@
   void Print();
 #endif  // DEBUG
 
+  inline void MarkNeverAllocateForTesting();
+  inline void MarkEvacuationCandidate();
+  inline void ClearEvacuationCandidate();
+
+ private:
+  inline void InitializeFreeListCategories();
+
   friend class MemoryAllocator;
 };
 
@@ -862,6 +953,12 @@
 
   inline void set_next_page(LargePage* page) { set_next_chunk(page); }
 
+  // A limit to guarantee that we do not overflow typed slot offset in
+  // the old to old remembered set.
+  // Note that this limit is higher than what assembler already imposes on
+  // x64 and ia32 architectures.
+  static const int kMaxCodePageSize = 512 * MB;
+
  private:
   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
 
@@ -977,8 +1074,8 @@
   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
   STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
                 offsetof(MemoryChunk, live_byte_count_));
-  STATIC_ASSERT(MemoryChunk::kSlotsBufferOffset ==
-                offsetof(MemoryChunk, slots_buffer_));
+  STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
+                offsetof(MemoryChunk, old_to_new_slots_));
   STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
                 offsetof(MemoryChunk, write_barrier_counter_));
 
@@ -1104,7 +1201,14 @@
     int start_region = RegionNumber(addr);
     int end_region = RegionNumber(addr + size - kPointerSize);
     for (int idx = start_region; idx <= end_region; idx++) {
-      if (starts_[idx] > addr) starts_[idx] = addr;
+      if (starts_[idx] > addr) {
+        starts_[idx] = addr;
+      } else {
+        // In the first region, there may already be an object closer to the
+        // start of the region. Do not change the start in that case. If this
+        // is not the first region, you probably added overlapping objects.
+        DCHECK_EQ(start_region, idx);
+      }
     }
   }
 
@@ -1143,6 +1247,11 @@
 //
 class MemoryAllocator {
  public:
+  enum AllocationMode {
+    kRegular,
+    kPooled,
+  };
+
   explicit MemoryAllocator(Isolate* isolate);
 
   // Initializes its internal bookkeeping structures.
@@ -1151,8 +1260,13 @@
 
   void TearDown();
 
-  Page* AllocatePage(intptr_t size, PagedSpace* owner,
-                     Executability executable);
+  // Allocates either Page or NewSpacePage from the allocator. AllocationMode
+  // is used to indicate whether pooled allocation, which only works for
+  // MemoryChunk::kPageSize, should be tried first.
+  template <typename PageType, MemoryAllocator::AllocationMode mode = kRegular,
+            typename SpaceType>
+  PageType* AllocatePage(intptr_t size, SpaceType* owner,
+                         Executability executable);
 
   LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
                                Executability executable);
@@ -1164,8 +1278,9 @@
   // FreeMemory can be called concurrently when PreFree was executed before.
   void PerformFreeMemory(MemoryChunk* chunk);
 
-  // Free is a wrapper method, which calls PreFree and PerformFreeMemory
-  // together.
+  // Free is a wrapper method. For kRegular AllocationMode it  calls PreFree and
+  // PerformFreeMemory together. For kPooled it will dispatch to pooled free.
+  template <MemoryAllocator::AllocationMode mode = kRegular>
   void Free(MemoryChunk* chunk);
 
   // Returns allocated spaces in bytes.
@@ -1219,8 +1334,6 @@
 
   bool CommitMemory(Address addr, size_t size, Executability executable);
 
-  void FreeNewSpaceMemory(Address addr, base::VirtualMemory* reservation,
-                          Executability executable);
   void FreeMemory(base::VirtualMemory* reservation, Executability executable);
   void FreeMemory(Address addr, size_t size, Executability executable);
 
@@ -1273,6 +1386,14 @@
                                               size_t reserved_size);
 
  private:
+  // See AllocatePage for public interface. Note that currently we only support
+  // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
+  template <typename SpaceType>
+  MemoryChunk* AllocatePagePooled(SpaceType* owner);
+
+  // Free that chunk into the pool.
+  void FreePooled(MemoryChunk* chunk);
+
   Isolate* isolate_;
 
   // Maximum space size in bytes.
@@ -1326,6 +1447,8 @@
     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
   }
 
+  List<MemoryChunk*> chunk_pool_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
 };
 
@@ -1481,12 +1604,6 @@
 
   void ClearSize() { size_ = capacity_; }
 
-  // Reset the allocation statistics (i.e., available = capacity with no wasted
-  // or allocated bytes).
-  void Reset() {
-    size_ = 0;
-  }
-
   // Accessors for the allocation statistics.
   intptr_t Capacity() { return capacity_; }
   intptr_t MaxCapacity() { return max_capacity_; }
@@ -1513,13 +1630,13 @@
   void ShrinkSpace(int size_in_bytes) {
     capacity_ -= size_in_bytes;
     size_ -= size_in_bytes;
-    CHECK(size_ >= 0);
+    CHECK_GE(size_, 0);
   }
 
   // Allocate from available bytes (available -> size).
   void AllocateBytes(intptr_t size_in_bytes) {
     size_ += size_in_bytes;
-    CHECK(size_ >= 0);
+    CHECK_GE(size_, 0);
   }
 
   // Free allocated bytes, making them available (size -> available).
@@ -1558,80 +1675,6 @@
   intptr_t size_;
 };
 
-
-// A free list category maintains a linked list of free memory blocks.
-class FreeListCategory {
- public:
-  FreeListCategory() : top_(nullptr), end_(nullptr), available_(0) {}
-
-  void Initialize(FreeList* owner, FreeListCategoryType type) {
-    owner_ = owner;
-    type_ = type;
-  }
-
-  // Concatenates {category} into {this}.
-  //
-  // Note: Thread-safe.
-  intptr_t Concatenate(FreeListCategory* category);
-
-  void Reset();
-
-  void Free(FreeSpace* node, int size_in_bytes);
-
-  // Pick a node from the list.
-  FreeSpace* PickNodeFromList(int* node_size);
-
-  // Pick a node from the list and compare it against {size_in_bytes}. If the
-  // node's size is greater or equal return the node and null otherwise.
-  FreeSpace* PickNodeFromList(int size_in_bytes, int* node_size);
-
-  // Search for a node of size {size_in_bytes}.
-  FreeSpace* SearchForNodeInList(int size_in_bytes, int* node_size);
-
-  intptr_t EvictFreeListItemsInList(Page* p);
-  bool ContainsPageFreeListItemsInList(Page* p);
-
-  void RepairFreeList(Heap* heap);
-
-  bool IsEmpty() { return top() == nullptr; }
-
-  FreeList* owner() { return owner_; }
-  int available() const { return available_; }
-
-#ifdef DEBUG
-  intptr_t SumFreeList();
-  int FreeListLength();
-  bool IsVeryLong();
-#endif
-
- private:
-  // For debug builds we accurately compute free lists lengths up until
-  // {kVeryLongFreeList} by manually walking the list.
-  static const int kVeryLongFreeList = 500;
-
-  FreeSpace* top() { return top_.Value(); }
-  void set_top(FreeSpace* top) { top_.SetValue(top); }
-
-  FreeSpace* end() const { return end_; }
-  void set_end(FreeSpace* end) { end_ = end; }
-
-  // |type_|: The type of this free list category.
-  FreeListCategoryType type_;
-
-  // |top_|: Points to the top FreeSpace* in the free list category.
-  AtomicValue<FreeSpace*> top_;
-
-  // |end_|: Points to the end FreeSpace* in the free list category.
-  FreeSpace* end_;
-
-  // |available_|: Total available bytes in all blocks of this free list
-  //   category.
-  int available_;
-
-  // |owner_|: The owning free list of this category.
-  FreeList* owner_;
-};
-
 // A free list maintaining free blocks of memory. The free list is organized in
 // a way to encourage objects allocated around the same time to be near each
 // other. The normal way to allocate is intended to be by bumping a 'top'
@@ -1641,9 +1684,10 @@
 // categories would scatter allocation more.
 
 // The free list is organized in categories as follows:
-// 1-31 words (too small): Such small free areas are discarded for efficiency
-//   reasons. They can be reclaimed by the compactor. However the distance
-//   between top and limit may be this small.
+// kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
+//   allocation, when categories >= small do not have entries anymore.
+// 11-31 words (tiny): The tiny blocks are only used for allocation, when
+//   categories >= small do not have entries anymore.
 // 32-255 words (small): Used for allocating free space between 1-31 words in
 //   size.
 // 256-2047 words (medium): Used for allocating free space between 32-255 words
@@ -1657,8 +1701,12 @@
   // This method returns how much memory can be allocated after freeing
   // maximum_freed memory.
   static inline int GuaranteedAllocatable(int maximum_freed) {
-    if (maximum_freed <= kSmallListMin) {
+    if (maximum_freed <= kTiniestListMax) {
+      // Since we are not iterating over all list entries, we cannot guarantee
+      // that we can find the maximum freed block in that free list.
       return 0;
+    } else if (maximum_freed <= kTinyListMax) {
+      return kTinyAllocationMax;
     } else if (maximum_freed <= kSmallListMax) {
       return kSmallAllocationMax;
     } else if (maximum_freed <= kMediumListMax) {
@@ -1671,19 +1719,13 @@
 
   explicit FreeList(PagedSpace* owner);
 
-  // The method concatenates {other} into {this} and returns the added bytes,
-  // including waste.
-  //
-  // Note: Thread-safe.
-  intptr_t Concatenate(FreeList* other);
-
   // Adds a node on the free list. The block of size {size_in_bytes} starting
   // at {start} is placed on the free list. The return value is the number of
   // bytes that were not added to the free list, because they freed memory block
   // was too small. Bookkeeping information will be written to the block, i.e.,
   // its contents will be destroyed. The start address should be word aligned,
   // and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
+  int Free(Address start, int size_in_bytes, FreeMode mode);
 
   // Allocate a block of size {size_in_bytes} from the free list. The block is
   // unitialized. A failure is returned if no block is available. The size
@@ -1693,70 +1735,118 @@
   // Clear the free list.
   void Reset();
 
-  void ResetStats() { wasted_bytes_ = 0; }
+  void ResetStats() {
+    wasted_bytes_.SetValue(0);
+    ForAllFreeListCategories(
+        [](FreeListCategory* category) { category->ResetStats(); });
+  }
 
   // Return the number of bytes available on the free list.
   intptr_t Available() {
     intptr_t available = 0;
-    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-      available += category_[i].available();
-    }
+    ForAllFreeListCategories([&available](FreeListCategory* category) {
+      available += category->available();
+    });
     return available;
   }
 
-  // The method tries to find a {FreeSpace} node of at least {size_in_bytes}
-  // size in the free list category exactly matching the size. If no suitable
-  // node could be found, the method falls back to retrieving a {FreeSpace}
-  // from the large or huge free list category.
-  //
-  // Can be used concurrently.
-  MUST_USE_RESULT FreeSpace* TryRemoveMemory(intptr_t hint_size_in_bytes);
-
   bool IsEmpty() {
-    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
-      if (!category_[i].IsEmpty()) return false;
-    }
-    return true;
+    bool empty = true;
+    ForAllFreeListCategories([&empty](FreeListCategory* category) {
+      if (!category->is_empty()) empty = false;
+    });
+    return empty;
   }
 
   // Used after booting the VM.
   void RepairLists(Heap* heap);
 
-  intptr_t EvictFreeListItems(Page* p);
-  bool ContainsPageFreeListItems(Page* p);
+  intptr_t EvictFreeListItems(Page* page);
+  bool ContainsPageFreeListItems(Page* page);
 
   PagedSpace* owner() { return owner_; }
-  intptr_t wasted_bytes() { return wasted_bytes_; }
-  base::Mutex* mutex() { return &mutex_; }
+  intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
+
+  template <typename Callback>
+  void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
+    FreeListCategory* current = categories_[type];
+    while (current != nullptr) {
+      FreeListCategory* next = current->next();
+      callback(current);
+      current = next;
+    }
+  }
+
+  template <typename Callback>
+  void ForAllFreeListCategories(Callback callback) {
+    for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
+      ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
+    }
+  }
+
+  bool AddCategory(FreeListCategory* category);
+  void RemoveCategory(FreeListCategory* category);
+  void PrintCategories(FreeListCategoryType type);
 
 #ifdef DEBUG
-  void Zap();
   intptr_t SumFreeLists();
   bool IsVeryLong();
 #endif
 
  private:
+  class FreeListCategoryIterator {
+   public:
+    FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
+        : current_(free_list->categories_[type]) {}
+
+    bool HasNext() { return current_ != nullptr; }
+
+    FreeListCategory* Next() {
+      DCHECK(HasNext());
+      FreeListCategory* tmp = current_;
+      current_ = current_->next();
+      return tmp;
+    }
+
+   private:
+    FreeListCategory* current_;
+  };
+
   // The size range of blocks, in bytes.
   static const int kMinBlockSize = 3 * kPointerSize;
   static const int kMaxBlockSize = Page::kAllocatableMemory;
 
-  static const int kSmallListMin = 0x1f * kPointerSize;
+  static const int kTiniestListMax = 0xa * kPointerSize;
+  static const int kTinyListMax = 0x1f * kPointerSize;
   static const int kSmallListMax = 0xff * kPointerSize;
   static const int kMediumListMax = 0x7ff * kPointerSize;
   static const int kLargeListMax = 0x3fff * kPointerSize;
-  static const int kSmallAllocationMax = kSmallListMin;
+  static const int kTinyAllocationMax = kTiniestListMax;
+  static const int kSmallAllocationMax = kTinyListMax;
   static const int kMediumAllocationMax = kSmallListMax;
   static const int kLargeAllocationMax = kMediumListMax;
 
   FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
-  FreeSpace* FindNodeIn(FreeListCategoryType category, int* node_size);
 
-  FreeListCategory* GetFreeListCategory(FreeListCategoryType category) {
-    return &category_[category];
-  }
+  // Walks all available categories for a given |type| and tries to retrieve
+  // a node. Returns nullptr if the category is empty.
+  FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
+
+  // Tries to retrieve a node from the first category in a given |type|.
+  // Returns nullptr if the category is empty.
+  FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
+                           int minimum_size);
+
+  // Searches a given |type| for a node of at least |minimum_size|.
+  FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
+                                 int minimum_size);
 
   FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
-    if (size_in_bytes <= kSmallListMax) {
+    if (size_in_bytes <= kTiniestListMax) {
+      return kTiniest;
+    } else if (size_in_bytes <= kTinyListMax) {
+      return kTiny;
+    } else if (size_in_bytes <= kSmallListMax) {
       return kSmall;
     } else if (size_in_bytes <= kMediumListMax) {
       return kMedium;
@@ -1766,6 +1856,7 @@
     return kHuge;
   }
 
+  // The tiny categories are not used for fast allocation.
   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
       size_t size_in_bytes) {
     if (size_in_bytes <= kSmallAllocationMax) {
@@ -1778,10 +1869,13 @@
     return kHuge;
   }
 
+  FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
+
   PagedSpace* owner_;
-  base::Mutex mutex_;
-  intptr_t wasted_bytes_;
-  FreeListCategory category_[kNumberOfCategories];
+  AtomicNumber<intptr_t> wasted_bytes_;
+  FreeListCategory* categories_[kNumberOfCategories];
+
+  friend class FreeListCategory;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
 };
@@ -1883,7 +1977,6 @@
   AllocationInfo allocation_info_;
 };
 
-
 class PagedSpace : public Space {
  public:
   static const intptr_t kCompactionMemoryWanted = 500 * KB;
@@ -1940,11 +2033,6 @@
     ResetFreeListStatistics();
   }
 
-  // Increases the number of available bytes of that space.
-  void AddToAccountingStats(intptr_t bytes) {
-    accounting_stats_.DeallocateBytes(bytes);
-  }
-
   // Available bytes without growing.  These are the bytes on the free list.
   // The bytes in the linear allocation area are not included in this total
   // because updating the stats would slow down allocation.  New pages are
@@ -1977,10 +2065,13 @@
     return allocation_info_.limit_address();
   }
 
+  enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
+
   // Allocate the requested number of bytes in the space if possible, return a
-  // failure object if not.
+  // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
+  // to be manually updated later.
   MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
-      int size_in_bytes);
+      int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
 
   MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
       int size_in_bytes);
@@ -2000,11 +2091,16 @@
   // If add_to_freelist is false then just accounting stats are updated and
   // no attempt to add area to free list is made.
   int Free(Address start, int size_in_bytes) {
-    int wasted = free_list_.Free(start, size_in_bytes);
+    int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
     accounting_stats_.DeallocateBytes(size_in_bytes);
     return size_in_bytes - wasted;
   }
 
+  int UnaccountedFree(Address start, int size_in_bytes) {
+    int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
+    return size_in_bytes - wasted;
+  }
+
   void ResetFreeList() { free_list_.Reset(); }
 
   // Set space allocation info.
@@ -2029,7 +2125,7 @@
   void IncreaseCapacity(int size);
 
   // Releases an unused page and shrinks the space.
-  void ReleasePage(Page* page, bool evict_free_list_items);
+  void ReleasePage(Page* page);
 
   // The dummy page that anchors the linked list of pages.
   Page* anchor() { return &anchor_; }
@@ -2085,17 +2181,18 @@
   // sweeper.
   virtual void RefillFreeList();
 
+  FreeList* free_list() { return &free_list_; }
+
+  base::Mutex* mutex() { return &space_mutex_; }
+
+  inline void UnlinkFreeListCategories(Page* page);
+  inline intptr_t RelinkFreeListCategories(Page* page);
+
  protected:
-  void AddMemory(Address start, intptr_t size);
-
-  void MoveOverFreeMemory(PagedSpace* other);
-
   // PagedSpaces that should be included in snapshots have different, i.e.,
   // smaller, initial pages.
   virtual bool snapshotable() { return true; }
 
-  FreeList* free_list() { return &free_list_; }
-
   bool HasPages() { return anchor_.next_page() != &anchor_; }
 
   // Cleans up the space, frees all pages in this space except those belonging
@@ -2143,6 +2240,7 @@
   // Mutex guarding any concurrent access to the space.
   base::Mutex space_mutex_;
 
+  friend class IncrementalMarking;
   friend class MarkCompactCollector;
   friend class PageIterator;
 
@@ -2191,29 +2289,9 @@
 
 class NewSpacePage : public MemoryChunk {
  public:
-  // GC related flags copied from from-space to to-space when
-  // flipping semispaces.
-  static const intptr_t kCopyOnFlipFlagsMask =
-      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-  static const int kAreaSize = Page::kAllocatableMemory;
-
-  inline NewSpacePage* next_page() {
-    return static_cast<NewSpacePage*>(next_chunk());
-  }
-
-  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
-
-  inline NewSpacePage* prev_page() {
-    return static_cast<NewSpacePage*>(prev_chunk());
-  }
-
-  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
-
-  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
-
-  bool is_anchor() { return !this->InNewSpace(); }
+  static inline NewSpacePage* Initialize(Heap* heap, MemoryChunk* chunk,
+                                         Executability executable,
+                                         SemiSpace* owner);
 
   static bool IsAtStart(Address addr) {
     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
@@ -2224,8 +2302,6 @@
     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
   }
 
-  Address address() { return reinterpret_cast<Address>(this); }
-
   // Finds the NewSpacePage containing the given address.
   static inline NewSpacePage* FromAddress(Address address_in_page) {
     Address page_start =
@@ -2247,14 +2323,33 @@
            NewSpacePage::FromAddress(address2);
   }
 
+  inline NewSpacePage* next_page() {
+    return static_cast<NewSpacePage*>(next_chunk());
+  }
+
+  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
+
+  inline NewSpacePage* prev_page() {
+    return static_cast<NewSpacePage*>(prev_chunk());
+  }
+
+  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
+
+  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
+
+  bool is_anchor() { return !this->InNewSpace(); }
+
  private:
+  // GC related flags copied from from-space to to-space when
+  // flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
   // Create a NewSpacePage object that is only used as anchor
   // for the doubly-linked list of real pages.
   explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
 
-  static NewSpacePage* Initialize(Heap* heap, Address start,
-                                  SemiSpace* semi_space);
-
   // Intialize a fake NewSpacePage used as sentinel at the ends
   // of a doubly-linked list of real NewSpacePages.
   // Only uses the prev/next links, and sets flags to not be in new-space.
@@ -2280,7 +2375,6 @@
         current_capacity_(0),
         maximum_capacity_(0),
         minimum_capacity_(0),
-        start_(nullptr),
         age_mark_(nullptr),
         committed_(false),
         id_(semispace),
@@ -2291,39 +2385,38 @@
   inline bool Contains(Object* o);
   inline bool ContainsSlow(Address a);
 
-  // Creates a space in the young generation. The constructor does not
-  // allocate memory from the OS.
-  void SetUp(Address start, int initial_capacity, int maximum_capacity);
-
-  // Tear down the space.  Heap memory was not allocated by the space, so it
-  // is not deallocated here.
+  void SetUp(int initial_capacity, int maximum_capacity);
   void TearDown();
+  bool HasBeenSetUp() { return maximum_capacity_ != 0; }
 
-  // True if the space has been set up but not torn down.
-  bool HasBeenSetUp() { return start_ != nullptr; }
+  bool Commit();
+  bool Uncommit();
+  bool is_committed() { return committed_; }
 
-  // Grow the semispace to the new capacity.  The new capacity
-  // requested must be larger than the current capacity and less than
-  // the maximum capacity.
+  // Grow the semispace to the new capacity.  The new capacity requested must
+  // be larger than the current capacity and less than the maximum capacity.
   bool GrowTo(int new_capacity);
 
-  // Shrinks the semispace to the new capacity.  The new capacity
-  // requested must be more than the amount of used memory in the
-  // semispace and less than the current capacity.
+  // Shrinks the semispace to the new capacity.  The new capacity requested
+  // must be more than the amount of used memory in the semispace and less
+  // than the current capacity.
   bool ShrinkTo(int new_capacity);
 
   // Returns the start address of the first page of the space.
   Address space_start() {
-    DCHECK_NE(anchor_.next_page(), &anchor_);
+    DCHECK_NE(anchor_.next_page(), anchor());
     return anchor_.next_page()->area_start();
   }
 
-  // Returns the start address of the current page of the space.
-  Address page_low() { return current_page_->area_start(); }
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
 
   // Returns one past the end address of the space.
   Address space_end() { return anchor_.prev_page()->area_end(); }
 
+  // Returns the start address of the current page of the space.
+  Address page_low() { return current_page_->area_start(); }
+
   // Returns one past the end address of the current page of the space.
   Address page_high() { return current_page_->area_end(); }
 
@@ -2341,17 +2434,10 @@
   Address age_mark() { return age_mark_; }
   void set_age_mark(Address mark);
 
-  bool is_committed() { return committed_; }
-  bool Commit();
-  bool Uncommit();
-
-  NewSpacePage* first_page() { return anchor_.next_page(); }
-  NewSpacePage* current_page() { return current_page_; }
-
-  // Returns the current total capacity of the semispace.
+  // Returns the current capacity of the semispace.
   int current_capacity() { return current_capacity_; }
 
-  // Returns the maximum total capacity of the semispace.
+  // Returns the maximum capacity of the semispace.
   int maximum_capacity() { return maximum_capacity_; }
 
   // Returns the initial capacity of the semispace.
@@ -2393,11 +2479,9 @@
 #endif
 
  private:
-  NewSpacePage* anchor() { return &anchor_; }
+  void RewindPages(NewSpacePage* start, int num_pages);
 
-  void set_current_capacity(int new_capacity) {
-    current_capacity_ = new_capacity;
-  }
+  inline NewSpacePage* anchor() { return &anchor_; }
 
   // Copies the flags into the masked positions on all pages in the space.
   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
@@ -2408,11 +2492,9 @@
   // The maximum capacity that can be used by this space.
   int maximum_capacity_;
 
-  // The mimnimum capacity for the space. A space cannot shrink below this size.
+  // The minimum capacity for the space. A space cannot shrink below this size.
   int minimum_capacity_;
 
-  // The start address of the space.
-  Address start_;
   // Used to govern object promotion during mark-compact collection.
   Address age_mark_;
 
@@ -2488,20 +2570,21 @@
 
 class NewSpace : public Space {
  public:
-  // Constructor.
   explicit NewSpace(Heap* heap)
       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
         to_space_(heap, kToSpace),
         from_space_(heap, kFromSpace),
         reservation_(),
-        top_on_previous_step_(0) {}
+        pages_used_(0),
+        top_on_previous_step_(0),
+        allocated_histogram_(nullptr),
+        promoted_histogram_(nullptr) {}
 
   inline bool Contains(HeapObject* o);
   inline bool ContainsSlow(Address a);
   inline bool Contains(Object* o);
 
-  // Sets up the new space using the given chunk.
-  bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
+  bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
 
   // Tears down the space.  Heap memory was not allocated by the space, so it
   // is not deallocated here.
@@ -2524,7 +2607,7 @@
 
   // Return the allocated bytes in the active semispace.
   intptr_t Size() override {
-    return pages_used_ * NewSpacePage::kAreaSize +
+    return pages_used_ * NewSpacePage::kAllocatableMemory +
            static_cast<int>(top() - to_space_.page_low());
   }
 
@@ -2537,7 +2620,7 @@
   intptr_t Capacity() {
     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
     return (to_space_.current_capacity() / Page::kPageSize) *
-           NewSpacePage::kAreaSize;
+           NewSpacePage::kAllocatableMemory;
   }
 
   // Return the current size of a semispace, allocatable and non-allocatable
@@ -2564,22 +2647,40 @@
   // Return the available bytes without growing.
   intptr_t Available() override { return Capacity() - Size(); }
 
-  intptr_t PagesFromStart(Address addr) {
-    return static_cast<intptr_t>(addr - bottom()) / Page::kPageSize;
-  }
-
   size_t AllocatedSinceLastGC() {
-    intptr_t allocated = top() - to_space_.age_mark();
-    if (allocated < 0) {
-      // Runtime has lowered the top below the age mark.
+    bool seen_age_mark = false;
+    Address age_mark = to_space_.age_mark();
+    NewSpacePage* current_page = to_space_.first_page();
+    NewSpacePage* age_mark_page = NewSpacePage::FromAddress(age_mark);
+    NewSpacePage* last_page = NewSpacePage::FromAddress(top() - kPointerSize);
+    if (age_mark_page == last_page) {
+      if (top() - age_mark >= 0) {
+        return top() - age_mark;
+      }
+      // Top was reset at some point, invalidating this metric.
       return 0;
     }
-    // Correctly account for non-allocatable regions at the beginning of
-    // each page from the age_mark() to the top().
-    intptr_t pages =
-        PagesFromStart(top()) - PagesFromStart(to_space_.age_mark());
-    allocated -= pages * (NewSpacePage::kObjectStartOffset);
-    DCHECK(0 <= allocated && allocated <= Size());
+    while (current_page != last_page) {
+      if (current_page == age_mark_page) {
+        seen_age_mark = true;
+        break;
+      }
+      current_page = current_page->next_page();
+    }
+    if (!seen_age_mark) {
+      // Top was reset at some point, invalidating this metric.
+      return 0;
+    }
+    intptr_t allocated = age_mark_page->area_end() - age_mark;
+    DCHECK_EQ(current_page, age_mark_page);
+    current_page = age_mark_page->next_page();
+    while (current_page != last_page) {
+      allocated += NewSpacePage::kAllocatableMemory;
+      current_page = current_page->next_page();
+    }
+    allocated += top() - current_page->area_start();
+    DCHECK_LE(0, allocated);
+    DCHECK_LE(allocated, Size());
     return static_cast<size_t>(allocated);
   }
 
@@ -2617,10 +2718,6 @@
   // Set the age mark in the active semispace.
   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
 
-  // The start address of the space and a bit mask. Anding an address in the
-  // new space with the mask will result in the start address.
-  Address start() { return start_; }
-
   // The allocation top and limit address.
   Address* allocation_top_address() { return allocation_info_.top_address(); }
 
@@ -2735,18 +2832,12 @@
 
   base::Mutex mutex_;
 
-  Address chunk_base_;
-  uintptr_t chunk_size_;
-
   // The semispaces.
   SemiSpace to_space_;
   SemiSpace from_space_;
   base::VirtualMemory reservation_;
   int pages_used_;
 
-  // Start address and bit mask for containment testing.
-  Address start_;
-
   // Allocation pointer and limit for normal allocation and allocation during
   // mark-compact collection.
   AllocationInfo allocation_info_;
@@ -2792,8 +2883,6 @@
 
   bool is_local() override { return true; }
 
-  void RefillFreeList() override;
-
  protected:
   // The space is temporary and not included in any snapshots.
   bool snapshotable() override { return false; }
@@ -2856,12 +2945,7 @@
  public:
   // Creates a map space object.
   MapSpace(Heap* heap, AllocationSpace id)
-      : PagedSpace(heap, id, NOT_EXECUTABLE),
-        max_map_space_pages_(kMaxMapPageIndex - 1) {}
-
-  // Given an index, returns the page address.
-  // TODO(1600): this limit is artifical just to keep code compilable
-  static const int kMaxMapPageIndex = 1 << 16;
+      : PagedSpace(heap, id, NOT_EXECUTABLE) {}
 
   int RoundSizeDownToObjectAlignment(int size) override {
     if (base::bits::IsPowerOfTwo32(Map::kSize)) {
@@ -2874,16 +2958,6 @@
 #ifdef VERIFY_HEAP
   void VerifyObject(HeapObject* obj) override;
 #endif
-
- private:
-  static const int kMapsPerPage = Page::kAllocatableMemory / Map::kSize;
-
-  // Do map space compaction if there is a page gap.
-  int CompactionThreshold() {
-    return kMapsPerPage * (max_map_space_pages_ - 1);
-  }
-
-  const int max_map_space_pages_;
 };
 
 
@@ -2950,6 +3024,8 @@
   // Checks whether the space is empty.
   bool IsEmpty() { return first_page_ == NULL; }
 
+  void AdjustLiveBytes(int by) { objects_size_ += by; }
+
   LargePage* first_page() { return first_page_; }
 
 #ifdef VERIFY_HEAP
@@ -2988,25 +3064,42 @@
   LargePage* current_;
 };
 
+class LargePageIterator BASE_EMBEDDED {
+ public:
+  explicit inline LargePageIterator(LargeObjectSpace* space);
+
+  inline LargePage* next();
+
+ private:
+  LargePage* next_page_;
+};
 
 // Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
+// pointers to new space or to evacuation candidates.
+class MemoryChunkIterator BASE_EMBEDDED {
  public:
-  inline explicit PointerChunkIterator(Heap* heap);
+  enum Mode { ALL, ALL_BUT_MAP_SPACE, ALL_BUT_CODE_SPACE };
+  inline explicit MemoryChunkIterator(Heap* heap, Mode mode);
 
   // Return NULL when the iterator is done.
   inline MemoryChunk* next();
 
  private:
-  enum State { kOldSpaceState, kMapState, kLargeObjectState, kFinishedState };
+  enum State {
+    kOldSpaceState,
+    kMapState,
+    kCodeState,
+    kLargeObjectState,
+    kFinishedState
+  };
   State state_;
+  const Mode mode_;
   PageIterator old_iterator_;
+  PageIterator code_iterator_;
   PageIterator map_iterator_;
-  LargeObjectIterator lo_iterator_;
+  LargePageIterator lo_iterator_;
 };
 
-
 #ifdef DEBUG
 struct CommentStatistic {
   const char* comment;