Reduce mark stack / allocation stack / live stack address space usage.

We now have upper bounds on the stack sizes so that we don't run out of
virtual addresses with large heaps.

Rename mark stack to atomic stack, which now takes any data type.

Added behaviour to force GC when the allocation stack becomes too full.

Added a new special map for reserving the oat file address range.

Change-Id: I5169dd98b5f5072ac67637798da50cb8fc68af2b
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 5ca76ac..b03b52b 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -198,7 +198,6 @@
 	src/jobject_comparator.cc \
 	src/locks.cc \
 	src/logging.cc \
-	src/mark_stack.cc \
 	src/mark_sweep.cc \
 	src/mem_map.cc \
 	src/memory_region.cc \
diff --git a/src/atomic_integer.h b/src/atomic_integer.h
index 61b93cc..54d5fd8 100644
--- a/src/atomic_integer.h
+++ b/src/atomic_integer.h
@@ -25,8 +25,14 @@
  public:
   AtomicInteger(int32_t value) : value_(value) { }
 
+  // Unsafe = operator for non atomic operations on the integer.
+  AtomicInteger& operator = (int32_t new_value) {
+    value_ = new_value;
+    return *this;
+  }
+
   operator int32_t () const {
-    return get();
+    return value_;
   }
 
   int32_t get() const {
@@ -49,11 +55,11 @@
     return android_atomic_and(-value, &value_);
   }
 
-  int32_t operator ++ () {
+  int32_t operator ++ (int32_t) {
     return android_atomic_inc(&value_);
   }
 
-  int32_t operator -- () {
+  int32_t operator -- (int32_t) {
     return android_atomic_dec(&value_);
   }
  private:
diff --git a/src/atomic_stack.h b/src/atomic_stack.h
new file mode 100644
index 0000000..3494861
--- /dev/null
+++ b/src/atomic_stack.h
@@ -0,0 +1,173 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ATOMIC_STACK_H_
+#define ART_SRC_ATOMIC_STACK_H_
+
+#include <string>
+
+#include "atomic_integer.h"
+#include "UniquePtr.h"
+#include "logging.h"
+#include "macros.h"
+#include "mem_map.h"
+#include "utils.h"
+
+namespace art {
+
+template <typename T>
+class AtomicStack {
+ public:
+  // Capacity is how many elements we can store in the stack.
+  static AtomicStack* Create(const std::string& name, size_t capacity) {
+    UniquePtr<AtomicStack> mark_stack(new AtomicStack(name, capacity));
+    mark_stack->Init();
+    return mark_stack.release();
+  }
+
+  ~AtomicStack(){
+
+  }
+
+  void Reset() {
+    DCHECK(mem_map_.get() != NULL);
+    DCHECK(begin_ != NULL);
+    front_index_ = 0;
+    back_index_ = 0;
+    int result = madvise(begin_, sizeof(T) * capacity_, MADV_DONTNEED);
+    if (result == -1) {
+      PLOG(WARNING) << "madvise failed";
+    }
+  }
+
+  // Beware: Mixing atomic pushes and atomic pops will cause ABA problem.
+
+  // Returns false if we overflowed the stack.
+  bool AtomicPushBack(const T& value) {
+    const int32_t index = back_index_++;
+    if (UNLIKELY(static_cast<size_t>(index) >= capacity_)) {
+      // Stack overflow.
+      back_index_--;
+      return false;
+    }
+    begin_[index] = value;
+    return true;
+  }
+
+  void PushBack(const T& value) {
+    int32_t index = back_index_;
+    DCHECK_LT(static_cast<size_t>(index), capacity_);
+    back_index_ = index + 1;
+    begin_[index] = value;
+  }
+
+  T PopBack() {
+    DCHECK_GT(back_index_, front_index_);
+    // Decrement the back index non atomically.
+    back_index_ = back_index_ - 1;
+    return begin_[back_index_];
+  }
+
+  T AtomicPopBack() {
+    // Decrement the back index non atomically.
+    int back_index = back_index_--;
+    DCHECK_GT(back_index, front_index_);
+    return begin_[back_index - 1];
+  }
+
+  // Take an item from the front of the stack.
+  T PopFront() {
+    int32_t index = front_index_;
+    DCHECK_LT(index, back_index_.get());
+    front_index_ = front_index_ + 1;
+    return begin_[index];
+  }
+
+  bool IsEmpty() const {
+    return Size() == 0;
+  }
+
+  size_t Size() const {
+    DCHECK_LE(front_index_, back_index_);
+    return back_index_ - front_index_;
+  }
+
+  T* Begin() {
+    return const_cast<Object**>(begin_ + front_index_);
+  }
+
+  T* End() {
+    return const_cast<Object**>(begin_ + back_index_);
+  }
+
+  size_t Capacity() const {
+    return capacity_;
+  }
+
+  // Will clear the stack.
+  void Resize(size_t new_capacity) {
+    capacity_ = new_capacity;
+    Init();
+  }
+
+ private:
+  AtomicStack(const std::string& name, const size_t capacity)
+      : name_(name),
+        back_index_(0),
+        front_index_(0),
+        begin_(NULL),
+        capacity_(capacity) {
+
+  }
+
+  // Size in number of elements.
+  void Init() {
+    mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, capacity_ * sizeof(T), PROT_READ | PROT_WRITE));
+    if (mem_map_.get() == NULL) {
+      std::string maps;
+      ReadFileToString("/proc/self/maps", &maps);
+      LOG(FATAL) << "couldn't allocate mark stack\n" << maps;
+    }
+    byte* addr = mem_map_->Begin();
+    CHECK(addr != NULL);
+    begin_ = reinterpret_cast<T*>(addr);
+    Reset();
+  }
+
+  // Name of the mark stack.
+  std::string name_;
+
+  // Memory mapping of the atomic stack.
+  UniquePtr<MemMap> mem_map_;
+
+  // Back index (index after the last element pushed).
+  AtomicInteger back_index_;
+
+  // Front index, used for implementing PopFront.
+  AtomicInteger front_index_;
+
+  // Base of the atomic stack.
+  T* begin_;
+
+  // Maximum number of elements.
+  size_t capacity_;
+
+  DISALLOW_COPY_AND_ASSIGN(AtomicStack);
+};
+
+}  // namespace art
+
+#endif  // ART_SRC_MARK_STACK_H_
diff --git a/src/class_linker.cc b/src/class_linker.cc
index 8a655bd..929582a 100644
--- a/src/class_linker.cc
+++ b/src/class_linker.cc
@@ -702,6 +702,7 @@
   std::string oat_filename;
   oat_filename += runtime->GetHostPrefix();
   oat_filename += oat_location->ToModifiedUtf8();
+  runtime->GetHeap()->UnReserveOatFileAddressRange();
   OatFile* oat_file = OatFile::Open(oat_filename, oat_filename,
                                     image_header.GetOatBegin(),
                                     OatFile::kRelocNone);
diff --git a/src/heap.cc b/src/heap.cc
index f7cb68d..e2190ec 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -22,6 +22,7 @@
 #include <limits>
 #include <vector>
 
+#include "atomic_stack.h"
 #include "card_table.h"
 #include "debugger.h"
 #include "heap_bitmap.h"
@@ -121,6 +122,10 @@
   return true;
 }
 
+void Heap::UnReserveOatFileAddressRange() {
+  oat_file_map_.reset(NULL);
+}
+
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
            const std::string& original_image_file_name, bool concurrent_gc)
     : alloc_space_(NULL),
@@ -149,6 +154,7 @@
       last_trim_time_(0),
       try_running_gc_(false),
       requesting_gc_(false),
+      max_allocation_stack_size_(MB),
       reference_referent_offset_(0),
       reference_queue_offset_(0),
       reference_queueNext_offset_(0),
@@ -187,21 +193,29 @@
         image_space = ImageSpace::Create(image_file_name);
       }
     }
+
     CHECK(image_space != NULL) << "Failed to create space from " << image_file_name;
     AddSpace(image_space);
     // Oat files referenced by image files immediately follow them in memory, ensure alloc space
     // isn't going to get in the middle
-    byte* oat_end_addr = GetImageSpace()->GetImageHeader().GetOatEnd();
-    CHECK_GT(oat_end_addr, GetImageSpace()->End());
+    byte* oat_end_addr = image_space->GetImageHeader().GetOatEnd();
+    CHECK_GT(oat_end_addr, image_space->End());
+
+    // Reserve address range from image_space->End() to image_space->GetImageHeader().GetOatEnd()
+    uintptr_t reserve_begin = RoundUp(reinterpret_cast<uintptr_t>(image_space->End()), kPageSize);
+    uintptr_t reserve_end = RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr), kPageSize);
+    oat_file_map_.reset(MemMap::MapAnonymous("oat file reserve",
+                                             reinterpret_cast<byte*>(reserve_begin),
+                                             reserve_end - reserve_begin, PROT_READ));
+
     if (oat_end_addr > requested_begin) {
       requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
                                                           kPageSize));
     }
   }
 
-  // Allocate the large object space (placed after the alloc space).
-  large_object_space_.reset(FreeListSpace::Create("large object space", requested_begin + capacity,
-                                                  capacity));
+  // Allocate the large object space.
+  large_object_space_.reset(FreeListSpace::Create("large object space", NULL, capacity));
   live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects());
   mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects());
 
@@ -242,12 +256,12 @@
   num_bytes_allocated_ = 0;
 
   // Max stack size in bytes.
-  static const size_t max_stack_size = capacity / SpaceBitmap::kAlignment * kWordSize;
-
-  // TODO: Rename MarkStack to a more generic name?
-  mark_stack_.reset(MarkStack::Create("dalvik-mark-stack", max_stack_size));
-  allocation_stack_.reset(MarkStack::Create("dalvik-allocation-stack", max_stack_size));
-  live_stack_.reset(MarkStack::Create("dalvik-live-stack", max_stack_size));
+  static const size_t default_mark_stack_size = 64 * KB;
+  mark_stack_.reset(ObjectStack::Create("dalvik-mark-stack", default_mark_stack_size));
+  allocation_stack_.reset(ObjectStack::Create("dalvik-allocation-stack",
+                                            max_allocation_stack_size_));
+  live_stack_.reset(ObjectStack::Create("dalvik-live-stack",
+                                      max_allocation_stack_size_));
 
   // It's still too early to take a lock because there are no threads yet,
   // but we can create the heap lock now. We don't create it earlier to
@@ -523,7 +537,7 @@
   GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
 }
 
-void Heap::RecordAllocation(size_t size, const Object* obj) {
+void Heap::RecordAllocation(size_t size, Object* obj) {
   DCHECK(obj != NULL);
   DCHECK_GT(size, 0u);
   num_bytes_allocated_ += size;
@@ -539,7 +553,15 @@
     global_stats->allocated_bytes += size;
   }
 
-  allocation_stack_->AtomicPush(obj);
+  // This is safe to do since the GC will never free objects which are neither in the allocation
+  // stack or the live bitmap.
+  while (!allocation_stack_->AtomicPushBack(obj)) {
+    Thread* self = Thread::Current();
+    self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
+    // If we actually ran a different type of Gc than requested, we can skip the index forwards.
+    CollectGarbageInternal(kGcTypeSticky, kGcCauseForAlloc, false);
+    self->TransitionFromSuspendedToRunnable();
+  }
 }
 
 void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
@@ -784,10 +806,10 @@
   return num_bytes_allocated_;
 }
 
-void Heap::MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, MarkStack* stack) {
-  const size_t count = stack->Size();
-  for (size_t i = 0; i < count; ++i) {
-    const Object* obj = stack->Get(i);
+void Heap::MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) {
+  Object** limit = stack->End();
+  for (Object** it = stack->Begin(); it != limit; ++it) {
+    const Object* obj = *it;
     DCHECK(obj != NULL);
     if (LIKELY(bitmap->HasAddress(obj))) {
       bitmap->Set(obj);
@@ -797,10 +819,10 @@
   }
 }
 
-void Heap::UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, MarkStack* stack) {
-  size_t count = stack->Size();
-  for (size_t i = 0; i < count; ++i) {
-    const Object* obj = stack->Get(i);
+void Heap::UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack) {
+  Object** limit = stack->End();
+  for (Object** it = stack->Begin(); it != limit; ++it) {
+    const Object* obj = *it;
     DCHECK(obj != NULL);
     if (LIKELY(bitmap->HasAddress(obj))) {
       bitmap->Clear(obj);
@@ -970,7 +992,7 @@
     timings.AddSplit("MarkRoots");
 
     // Roots are marked on the bitmap and the mark_stack is empty.
-    DCHECK(mark_sweep.IsMarkStackEmpty());
+    DCHECK(mark_stack_->IsEmpty());
 
     UpdateAndMarkModUnion(timings, gc_type);
 
@@ -1121,8 +1143,8 @@
     // Verify that the reference is live.
     if (ref != NULL && !IsLive(ref)) {
       CardTable* card_table = heap_->GetCardTable();
-      MarkStack* alloc_stack = heap_->allocation_stack_.get();
-      MarkStack* live_stack = heap_->live_stack_.get();
+      ObjectStack* alloc_stack = heap_->allocation_stack_.get();
+      ObjectStack* live_stack = heap_->live_stack_.get();
 
       byte* card_addr = card_table->CardFromAddr(obj);
       LOG(ERROR) << "Object " << obj << " references dead object " << ref << " on IsDirty = "
@@ -1161,7 +1183,7 @@
           IdentityFunctor());
 
       // Try and see if a mark sweep collector scans the reference.
-      MarkStack* mark_stack = heap_->mark_stack_.get();
+      ObjectStack* mark_stack = heap_->mark_stack_.get();
       MarkSweep ms(mark_stack);
       ms.Init();
       mark_stack->Reset();
@@ -1198,7 +1220,7 @@
       heap_->DumpSpaces();
       LOG(ERROR) << "Object " << obj << " not found in any spaces";
     }
-    MarkStack* alloc_stack = heap_->allocation_stack_.get();
+    ObjectStack* alloc_stack = heap_->allocation_stack_.get();
     // At this point we need to search the allocation since things in the live stack may get swept.
     if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), const_cast<Object*>(obj))) {
       return true;
@@ -1271,7 +1293,7 @@
       // If the object is not dirty and it is referencing something in the live stack other than
       // class, then it must be on a dirty card.
       if (!card_table->IsDirty(obj)) {
-        MarkStack* live_stack = heap_->live_stack_.get();
+        ObjectStack* live_stack = heap_->live_stack_.get();
         if (std::binary_search(live_stack->Begin(), live_stack->End(), ref) && !ref->IsClass()) {
           if (std::binary_search(live_stack->Begin(), live_stack->End(), obj)) {
             LOG(ERROR) << "Object " << obj << " found in live stack";
@@ -1379,7 +1401,7 @@
 }
 
 void Heap::SwapStacks() {
-  MarkStack* temp = allocation_stack_.release();
+  ObjectStack* temp = allocation_stack_.release();
   allocation_stack_.reset(live_stack_.release());
   live_stack_.reset(temp);
 
@@ -1464,7 +1486,7 @@
       WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
 
       for (Object** it = live_stack_->Begin(); it != live_stack_->End(); ++it) {
-        CHECK(!GetLiveBitmap()->Test(*it));
+        DCHECK(!GetLiveBitmap()->Test(*it));
       }
 
       if (gc_type == kGcTypePartial) {
@@ -1507,7 +1529,7 @@
     }
 
     // Roots are marked on the bitmap and the mark_stack is empty.
-    DCHECK(mark_sweep.IsMarkStackEmpty());
+    DCHECK(mark_stack_->IsEmpty());
 
     // Allow mutators to go again, acquire share on mutator_lock_ to continue.
     thread_list->ResumeAll();
diff --git a/src/heap.h b/src/heap.h
index 7a96fd6..b4bc22f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -21,6 +21,7 @@
 #include <string>
 #include <vector>
 
+#include "atomic_stack.h"
 #include "atomic_integer.h"
 #include "card_table.h"
 #include "globals.h"
@@ -44,7 +45,6 @@
 class HeapBitmap;
 class ImageSpace;
 class LargeObjectSpace;
-class MarkStack;
 class ModUnionTable;
 class Mutex;
 class Object;
@@ -53,6 +53,7 @@
 class Thread;
 class TimingLogger;
 
+typedef AtomicStack<Object*> ObjectStack;
 typedef std::vector<ContinuousSpace*> Spaces;
 
 // The ordering of the enum matters, it is used to determine which GCs are run first.
@@ -273,11 +274,11 @@
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Mark all the objects in the allocation stack in the specified bitmap.
-  void MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, MarkStack* stack)
+  void MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Unmark all the objects in the allocation stack in the specified bitmap.
-  void UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, MarkStack* stack)
+  void UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   // Update and mark mod union table based on gc type.
@@ -293,6 +294,9 @@
   }
   void DumpSpaces();
 
+  // UnReserve the address range where the oat file will be placed.
+  void UnReserveOatFileAddressRange();
+
  private:
   // Allocates uninitialized storage. Passing in a null space tries to place the object in the
   // large object space.
@@ -314,8 +318,9 @@
   // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
   void SwapBitmaps(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
 
-  void RecordAllocation(size_t size, const Object* object)
-      LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_);
+  void RecordAllocation(size_t size, Object* object)
+      LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
   // which type of Gc was actually ran.
@@ -355,6 +360,9 @@
 
   Spaces spaces_;
 
+  // A map that we use to temporarily reserve address range for the oat file.
+  UniquePtr<MemMap> oat_file_map_;
+
   // The alloc space which we are currently allocating into.
   AllocSpace* alloc_space_;
 
@@ -447,14 +455,15 @@
   volatile bool requesting_gc_;
 
   // Mark stack that we reuse to avoid re-allocating the mark stack.
-  UniquePtr<MarkStack> mark_stack_;
+  UniquePtr<ObjectStack> mark_stack_;
 
   // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
   // to use the live bitmap as the old mark bitmap.
-  UniquePtr<MarkStack> allocation_stack_;
+  const size_t max_allocation_stack_size_;
+  UniquePtr<ObjectStack> allocation_stack_;
 
   // Second allocation stack so that we can process allocation with the heap unlocked.
-  UniquePtr<MarkStack> live_stack_;
+  UniquePtr<ObjectStack> live_stack_;
 
   // offset of java.lang.ref.Reference.referent
   MemberOffset reference_referent_offset_;
diff --git a/src/mark_stack.cc b/src/mark_stack.cc
deleted file mode 100644
index 2f18898..0000000
--- a/src/mark_stack.cc
+++ /dev/null
@@ -1,65 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "mark_stack.h"
-
-#include "globals.h"
-#include "logging.h"
-#include "UniquePtr.h"
-#include "utils.h"
-
-namespace art {
-
-MarkStack* MarkStack::Create(const std::string& name, size_t length) {
-  UniquePtr<MarkStack> mark_stack(new MarkStack(name));
-  mark_stack->Init(length);
-  return mark_stack.release();
-}
-
-void MarkStack::Init(size_t length) {
-  mem_map_.reset(MemMap::MapAnonymous(name_.c_str(), NULL, length, PROT_READ | PROT_WRITE));
-  if (mem_map_.get() == NULL) {
-    std::string maps;
-    ReadFileToString("/proc/self/maps", &maps);
-    LOG(FATAL) << "couldn't allocate mark stack\n" << maps;
-  }
-  byte* addr = mem_map_->Begin();
-  CHECK(addr != NULL);
-
-  begin_ = reinterpret_cast<const Object**>(addr);
-  limit_ = reinterpret_cast<const Object**>(addr + length);
-
-  Reset();
-}
-
-void MarkStack::Reset() {
-  DCHECK(mem_map_.get() != NULL);
-  DCHECK(begin_ != NULL);
-  DCHECK(limit_ != NULL);
-  byte* addr = const_cast<byte*>(reinterpret_cast<const byte*>(begin_));
-  const size_t length = limit_ - begin_;
-  ptr_ = reinterpret_cast<const Object**>(addr);
-  int result = madvise(addr, length, MADV_DONTNEED);
-  if (result == -1) {
-    PLOG(WARNING) << "madvise failed";
-  }
-}
-
-MarkStack::~MarkStack() {
-  CHECK(IsEmpty());
-}
-
-}  // namespace art
diff --git a/src/mark_stack.h b/src/mark_stack.h
deleted file mode 100644
index 7e4cec0..0000000
--- a/src/mark_stack.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_SRC_MARK_STACK_H_
-#define ART_SRC_MARK_STACK_H_
-
-#include <string>
-
-#include "atomic.h"
-#include "UniquePtr.h"
-#include "logging.h"
-#include "macros.h"
-#include "mem_map.h"
-
-namespace art {
-
-class Object;
-
-class MarkStack {
- public:
-  // Length is in bytes.
-  static MarkStack* Create(const std::string& name, size_t length);
-
-  ~MarkStack();
-
-  void Push(const Object* obj) {
-    DCHECK(obj != NULL);
-    DCHECK_NE(ptr_, limit_);
-    *ptr_ = obj;
-    ++ptr_;
-  }
-
-  // Beware: Atomic pushes and pops don't mix well.
-  void AtomicPush(const Object* obj) {
-    DCHECK(obj != NULL);
-    DCHECK_NE(ptr_, limit_);
-    DCHECK_EQ(sizeof(ptr_), sizeof(int32_t));
-    int32_t* ptr = reinterpret_cast<int32_t*>(reinterpret_cast<size_t>(&ptr_));
-    *reinterpret_cast<const Object**>(android_atomic_add(sizeof(*ptr_), ptr)) = obj;
-  }
-
-  const Object* Pop() {
-    DCHECK_NE(ptr_, begin_);
-    --ptr_;
-    DCHECK(*ptr_ != NULL);
-    return *ptr_;
-  }
-
-  bool IsEmpty() const {
-    return ptr_ == begin_;
-  }
-
-  size_t Size() const {
-    DCHECK_GE(ptr_, begin_);
-    return ptr_ - begin_;
-  }
-
-  const Object* Get(size_t index) const {
-    DCHECK_LT(index, Size());
-    return begin_[index];
-  }
-
-  Object** Begin() {
-    return const_cast<Object**>(begin_);
-  }
-
-  Object** End() {
-    return const_cast<Object**>(ptr_);
-  }
-
-  void Reset();
-
- private:
-  MarkStack(const std::string& name) : name_(name), begin_(NULL), limit_(NULL), ptr_(NULL) {}
-
-  void Init(size_t length);
-
-  // Name of the mark stack.
-  const std::string& name_;
-
-  // Memory mapping of the mark stack.
-  UniquePtr<MemMap> mem_map_;
-
-  // Base of the mark stack.
-  const Object* const* begin_;
-
-  // Exclusive limit of the mark stack.
-  const Object* const* limit_;
-
-  // Pointer to the top of the mark stack.
-  const Object** ptr_;
-
-  DISALLOW_COPY_AND_ASSIGN(MarkStack);
-};
-
-}  // namespace art
-
-#endif  // ART_SRC_MARK_STACK_H_
diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc
index 96223a9..34e5e2c 100644
--- a/src/mark_sweep.cc
+++ b/src/mark_sweep.cc
@@ -28,7 +28,6 @@
 #include "jni_internal.h"
 #include "logging.h"
 #include "macros.h"
-#include "mark_stack.h"
 #include "monitor.h"
 #include "mutex.h"
 #include "object.h"
@@ -37,7 +36,7 @@
 #include "timing_logger.h"
 #include "thread.h"
 
-#define MARK_STACK_PREFETCH 1
+static const bool kUseMarkStackPrefetch = true;
 
 namespace art {
 
@@ -55,7 +54,7 @@
   MarkSweep* const mark_sweep_;
 };
 
-MarkSweep::MarkSweep(MarkStack* mark_stack)
+MarkSweep::MarkSweep(ObjectStack* mark_stack)
     : current_mark_bitmap_(NULL),
       mark_stack_(mark_stack),
       heap_(NULL),
@@ -123,8 +122,17 @@
   if (!current_mark_bitmap_->Test(obj)) {
     current_mark_bitmap_->Set(obj);
     if (check_finger && obj < finger_) {
+      // Do we need to expand the mark stack?
+      if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+        std::vector<Object*> temp;
+        temp.insert(temp.begin(), mark_stack_->Begin(), mark_stack_->End());
+        mark_stack_->Resize(mark_stack_->Capacity() * 2);
+        for (size_t i = 0; i < temp.size(); ++i) {
+          mark_stack_->PushBack(temp[i]);
+        }
+      }
       // The object must be pushed on to the mark stack.
-      mark_stack_->Push(obj);
+      mark_stack_->PushBack(const_cast<Object*>(obj));
     }
   }
 }
@@ -489,7 +497,7 @@
   }
 }
 
-void MarkSweep::SweepArray(TimingLogger& logger, MarkStack* allocations, bool swap_bitmaps) {
+void MarkSweep::SweepArray(TimingLogger& logger, ObjectStack* allocations, bool swap_bitmaps) {
   size_t freed_bytes = 0;
   AllocSpace* space = heap_->GetAllocSpace();
 
@@ -512,7 +520,7 @@
 
   size_t freed_large_objects = 0;
   size_t count = allocations->Size();
-  Object** objects = allocations->Begin();
+  Object** objects = const_cast<Object**>(allocations->Begin());
   Object** out = objects;
 
   // Empty the allocation stack.
@@ -796,44 +804,44 @@
 
 // Scan anything that's on the mark stack.
 void MarkSweep::ProcessMarkStack() {
-#if MARK_STACK_PREFETCH
-  const size_t fifo_size = 4;
-  const size_t fifo_mask = fifo_size - 1;
-  const Object* fifo[fifo_size];
-  for (size_t i = 0;i < fifo_size;++i) {
-    fifo[i] = NULL;
-  }
-  size_t fifo_pos = 0;
-  size_t fifo_count = 0;
-  for (;;) {
-    const Object* obj = fifo[fifo_pos & fifo_mask];
-    if (obj != NULL) {
-      ScanObject(obj);
-      fifo[fifo_pos & fifo_mask] = NULL;
-      --fifo_count;
+  if (kUseMarkStackPrefetch) {
+    const size_t fifo_size = 4;
+    const size_t fifo_mask = fifo_size - 1;
+    const Object* fifo[fifo_size];
+    for (size_t i = 0;i < fifo_size;++i) {
+      fifo[i] = NULL;
     }
+    size_t fifo_pos = 0;
+    size_t fifo_count = 0;
+    for (;;) {
+      const Object* obj = fifo[fifo_pos & fifo_mask];
+      if (obj != NULL) {
+        ScanObject(obj);
+        fifo[fifo_pos & fifo_mask] = NULL;
+        --fifo_count;
+      }
 
-    if (!mark_stack_->IsEmpty()) {
-      const Object* obj = mark_stack_->Pop();
+      if (!mark_stack_->IsEmpty()) {
+        const Object* obj = mark_stack_->PopBack();
+        DCHECK(obj != NULL);
+        fifo[fifo_pos & fifo_mask] = obj;
+        __builtin_prefetch(obj);
+        fifo_count++;
+      }
+      fifo_pos++;
+
+      if (!fifo_count) {
+        CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
+        break;
+      }
+    }
+  } else {
+    while (!mark_stack_->IsEmpty()) {
+      const Object* obj = mark_stack_->PopBack();
       DCHECK(obj != NULL);
-      fifo[fifo_pos & fifo_mask] = obj;
-      __builtin_prefetch(obj);
-      fifo_count++;
-    }
-    fifo_pos++;
-
-    if (!fifo_count) {
-      CHECK(mark_stack_->IsEmpty()) << mark_stack_->Size();
-      break;
+      ScanObject(obj);
     }
   }
-#else
-  while (!mark_stack_->IsEmpty()) {
-    const Object* obj = mark_stack_->Pop();
-    DCHECK(obj != NULL);
-    ScanObject(obj);
-  }
-#endif
 }
 
 // Walks the reference list marking any references subject to the
diff --git a/src/mark_sweep.h b/src/mark_sweep.h
index 7ef6817..7b4ff6e 100644
--- a/src/mark_sweep.h
+++ b/src/mark_sweep.h
@@ -17,8 +17,8 @@
 #ifndef ART_SRC_MARK_SWEEP_H_
 #define ART_SRC_MARK_SWEEP_H_
 
+#include "atomic_stack.h"
 #include "macros.h"
-#include "mark_stack.h"
 #include "heap_bitmap.h"
 #include "object.h"
 #include "offsets.h"
@@ -37,7 +37,7 @@
 
 class MarkSweep {
  public:
-  explicit MarkSweep(MarkStack* mark_stack);
+  explicit MarkSweep(ObjectStack* mark_stack);
 
   ~MarkSweep();
 
@@ -52,10 +52,6 @@
   // Verify that image roots point to only marked objects within the alloc space.
   void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
-  bool IsMarkStackEmpty() const {
-    return mark_stack_->IsEmpty();
-  }
-
   // Builds a mark stack and recursively mark until it empties.
   void RecursiveMark(bool partial, TimingLogger& timings)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
@@ -103,7 +99,7 @@
       EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
 
   // Sweep only pointers within an array. WARNING: Trashes objects.
-  void SweepArray(TimingLogger& logger, MarkStack* allocation_stack_, bool swap_bitmaps)
+  void SweepArray(TimingLogger& logger, ObjectStack* allocation_stack_, bool swap_bitmaps)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
 
   Object* GetClearedReferences() {
@@ -373,7 +369,7 @@
   // Current space, we check this space first to avoid searching for the appropriate space for an object.
   SpaceBitmap* current_mark_bitmap_;
 
-  MarkStack* mark_stack_;
+  ObjectStack* mark_stack_;
 
   Heap* heap_;