Merge "More space refactoring." into dalvik-dev
diff --git a/build/Android.common.mk b/build/Android.common.mk
index 00ef7d3..0373b58 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -183,6 +183,7 @@
src/file_linux.cc \
src/gc/card_table.cc \
src/gc/heap_bitmap.cc \
+ src/gc/large_object_space.cc \
src/gc/mark_sweep.cc \
src/gc/mod_union_table.cc \
src/gc/space.cc \
diff --git a/src/debugger.cc b/src/debugger.cc
index 1b89d9b..1e1e7b6 100644
--- a/src/debugger.cc
+++ b/src/debugger.cc
@@ -23,6 +23,8 @@
#include "class_linker.h"
#include "class_loader.h"
#include "dex_instruction.h"
+#include "gc/large_object_space.h"
+#include "gc/space.h"
#if !defined(ART_USE_LLVM_COMPILER)
#include "oat/runtime/context.h" // For VmapTable
#endif
@@ -32,7 +34,6 @@
#include "ScopedPrimitiveArray.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
-#include "gc/space.h"
#include "stack_indirect_reference_table.h"
#include "thread_list.h"
#include "well_known_classes.h"
diff --git a/src/gc/large_object_space.cc b/src/gc/large_object_space.cc
new file mode 100644
index 0000000..72c6c73
--- /dev/null
+++ b/src/gc/large_object_space.cc
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "large_object_space.h"
+#include "UniquePtr.h"
+#include "dlmalloc.h"
+#include "file.h"
+#include "image.h"
+#include "logging.h"
+#include "os.h"
+#include "space_bitmap.h"
+#include "stl_util.h"
+#include "utils.h"
+
+namespace art {
+
+void LargeObjectSpace::SwapBitmaps() {
+ SpaceSetMap* temp_live_objects = live_objects_.release();
+ live_objects_.reset(mark_objects_.release());
+ mark_objects_.reset(temp_live_objects);
+ // Swap names to get more descriptive diagnostics.
+ std::string temp_name = live_objects_->GetName();
+ live_objects_->SetName(mark_objects_->GetName());
+ mark_objects_->SetName(temp_name);
+}
+
+LargeObjectSpace::LargeObjectSpace(const std::string& name)
+ : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
+ num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
+ total_objects_allocated_(0) {
+ live_objects_.reset(new SpaceSetMap("large live objects"));
+ mark_objects_.reset(new SpaceSetMap("large marked objects"));
+}
+
+
+void LargeObjectSpace::CopyLiveToMarked() {
+ mark_objects_->CopyFrom(*live_objects_.get());
+}
+
+LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
+ : LargeObjectSpace(name),
+ lock_("large object space lock", kAllocSpaceLock)
+{
+
+}
+
+LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
+ return new LargeObjectMapSpace(name);
+}
+
+Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
+ MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
+ if (mem_map == NULL) {
+ return NULL;
+ }
+ MutexLock mu(self, lock_);
+ Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
+ large_objects_.push_back(obj);
+ mem_maps_.Put(obj, mem_map);
+ size_t allocation_size = mem_map->Size();
+ num_bytes_allocated_ += allocation_size;
+ total_bytes_allocated_ += allocation_size;
+ ++num_objects_allocated_;
+ ++total_objects_allocated_;
+ return obj;
+}
+
+size_t LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
+ MutexLock mu(self, lock_);
+ MemMaps::iterator found = mem_maps_.find(ptr);
+ CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
+ DCHECK_GE(num_bytes_allocated_, found->second->Size());
+ size_t allocation_size = found->second->Size();
+ num_bytes_allocated_ -= allocation_size;
+ --num_objects_allocated_;
+ delete found->second;
+ mem_maps_.erase(found);
+ return allocation_size;
+}
+
+size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
+ MutexLock mu(Thread::Current(), lock_);
+ MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
+ CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
+ return found->second->Size();
+}
+
+size_t LargeObjectSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+ size_t total = 0;
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ if (kDebugSpaces) {
+ CHECK(Contains(ptrs[i]));
+ }
+ total += Free(self, ptrs[i]);
+ }
+ return total;
+}
+
+void LargeObjectMapSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
+ MutexLock mu(Thread::Current(), lock_);
+ for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
+ MemMap* mem_map = it->second;
+ callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
+ callback(NULL, NULL, 0, arg);
+ }
+}
+
+bool LargeObjectMapSpace::Contains(const Object* obj) const {
+ MutexLock mu(Thread::Current(), lock_);
+ return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
+}
+
+FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
+ CHECK(size % kAlignment == 0);
+ MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
+ PROT_READ | PROT_WRITE);
+ CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
+ return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+}
+
+FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
+ : LargeObjectSpace(name),
+ begin_(begin),
+ end_(end),
+ mem_map_(mem_map),
+ lock_("free list space lock", kAllocSpaceLock) {
+ chunks_.resize(Size() / kAlignment + 1);
+ // Add a dummy chunk so we don't need to handle chunks having no next chunk.
+ chunks_.back().SetSize(kAlignment, false);
+ // Start out with one large free chunk.
+ AddFreeChunk(begin_, end_ - begin_, NULL);
+}
+
+FreeListSpace::~FreeListSpace() {
+
+}
+
+void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
+ Chunk* chunk = ChunkFromAddr(address);
+ chunk->SetSize(size, true);
+ chunk->SetPrevious(previous);
+ Chunk* next_chunk = GetNextChunk(chunk);
+ next_chunk->SetPrevious(chunk);
+ free_chunks_.insert(chunk);
+}
+
+FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
+ size_t offset = reinterpret_cast<byte*>(address) - Begin();
+ DCHECK(IsAligned<kAlignment>(offset));
+ DCHECK_LT(offset, Size());
+ return &chunks_[offset / kAlignment];
+}
+
+void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
+ return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
+}
+
+void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
+ // TODO: C++0x
+ // TODO: Improve performance, this might be slow.
+ std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
+ for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
+ if (*it == chunk) {
+ free_chunks_.erase(it);
+ return;
+ }
+ }
+}
+
+void FreeListSpace::Walk(DlMallocSpace::WalkCallback callback, void* arg) {
+ MutexLock mu(Thread::Current(), lock_);
+ for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
+ if (!chunk->IsFree()) {
+ size_t size = chunk->GetSize();
+ void* begin = AddrFromChunk(chunk);
+ void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
+ callback(begin, end, size, arg);
+ callback(NULL, NULL, 0, arg);
+ }
+ chunk = GetNextChunk(chunk);
+ }
+}
+
+size_t FreeListSpace::Free(Thread* self, Object* obj) {
+ MutexLock mu(self, lock_);
+ CHECK(Contains(obj));
+ // Check adjacent chunks to see if we need to combine.
+ Chunk* chunk = ChunkFromAddr(obj);
+ CHECK(!chunk->IsFree());
+
+ size_t allocation_size = chunk->GetSize();
+ madvise(obj, allocation_size, MADV_DONTNEED);
+ num_objects_allocated_--;
+ num_bytes_allocated_ -= allocation_size;
+ Chunk* prev = chunk->GetPrevious();
+ Chunk* next = GetNextChunk(chunk);
+
+ // Combine any adjacent free chunks
+ size_t extra_size = chunk->GetSize();
+ if (next->IsFree()) {
+ extra_size += next->GetSize();
+ RemoveFreeChunk(next);
+ }
+ if (prev != NULL && prev->IsFree()) {
+ RemoveFreeChunk(prev);
+ AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
+ } else {
+ AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
+ }
+ return allocation_size;
+}
+
+bool FreeListSpace::Contains(const Object* obj) const {
+ return mem_map_->HasAddress(obj);
+}
+
+FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
+ return chunk + chunk->GetSize() / kAlignment;
+}
+
+size_t FreeListSpace::AllocationSize(const Object* obj) {
+ Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
+ CHECK(!chunk->IsFree());
+ return chunk->GetSize();
+}
+
+Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
+ MutexLock mu(self, lock_);
+ num_bytes = RoundUp(num_bytes, kAlignment);
+ Chunk temp;
+ temp.SetSize(num_bytes);
+ // Find the smallest chunk at least num_bytes in size.
+ FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
+ if (found == free_chunks_.end()) {
+ // Out of memory, or too much fragmentation.
+ return NULL;
+ }
+ Chunk* chunk = *found;
+ free_chunks_.erase(found);
+ CHECK(chunk->IsFree());
+ void* addr = AddrFromChunk(chunk);
+ size_t chunk_size = chunk->GetSize();
+ chunk->SetSize(num_bytes);
+ if (chunk_size > num_bytes) {
+ // Split the chunk into two chunks.
+ Chunk* new_chunk = GetNextChunk(chunk);
+ AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
+ }
+
+ num_objects_allocated_++;
+ total_objects_allocated_++;
+ num_bytes_allocated_ += num_bytes;
+ total_bytes_allocated_ += num_bytes;
+ return reinterpret_cast<Object*>(addr);
+}
+
+}
diff --git a/src/gc/large_object_space.h b/src/gc/large_object_space.h
new file mode 100644
index 0000000..2bf6abf
--- /dev/null
+++ b/src/gc/large_object_space.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_GC_LARGE_OBJECT_SPACE_H_
+#define ART_SRC_GC_LARGE_OBJECT_SPACE_H_
+
+#include "space.h"
+
+namespace art {
+
+class LargeObjectSpace : public DiscontinuousSpace, public AllocSpace {
+ public:
+ virtual bool CanAllocateInto() const {
+ return true;
+ }
+
+ virtual bool IsCompactible() const {
+ return true;
+ }
+
+ virtual SpaceType GetType() const {
+ return kSpaceTypeLargeObjectSpace;
+ }
+
+ virtual SpaceSetMap* GetLiveObjects() const {
+ return live_objects_.get();
+ }
+
+ virtual SpaceSetMap* GetMarkObjects() const {
+ return mark_objects_.get();
+ }
+
+ virtual void SwapBitmaps();
+ virtual void CopyLiveToMarked();
+ virtual void Walk(DlMallocSpace::WalkCallback, void* arg) = 0;
+ virtual ~LargeObjectSpace() {}
+
+ uint64_t GetNumBytesAllocated() const {
+ return num_bytes_allocated_;
+ }
+
+ uint64_t GetNumObjectsAllocated() const {
+ return num_objects_allocated_;
+ }
+
+ uint64_t GetTotalBytesAllocated() const {
+ return total_bytes_allocated_;
+ }
+
+ uint64_t GetTotalObjectsAllocated() const {
+ return total_objects_allocated_;
+ }
+
+ size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+
+ protected:
+
+ LargeObjectSpace(const std::string& name);
+
+ // Approximate number of bytes which have been allocated into the space.
+ size_t num_bytes_allocated_;
+ size_t num_objects_allocated_;
+ size_t total_bytes_allocated_;
+ size_t total_objects_allocated_;
+
+ UniquePtr<SpaceSetMap> live_objects_;
+ UniquePtr<SpaceSetMap> mark_objects_;
+
+ friend class Space;
+};
+
+class LargeObjectMapSpace : public LargeObjectSpace {
+ public:
+
+ // Creates a large object space. Allocations into the large object space use memory maps instead
+ // of malloc.
+ static LargeObjectMapSpace* Create(const std::string& name);
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const Object* obj);
+ virtual Object* Alloc(Thread* self, size_t num_bytes);
+ size_t Free(Thread* self, Object* ptr);
+ virtual void Walk(DlMallocSpace::WalkCallback, void* arg);
+ virtual bool Contains(const Object* obj) const;
+private:
+ LargeObjectMapSpace(const std::string& name);
+ virtual ~LargeObjectMapSpace() {}
+
+ // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
+ mutable Mutex lock_;
+ std::vector<Object*> large_objects_;
+ typedef SafeMap<Object*, MemMap*> MemMaps;
+ MemMaps mem_maps_;
+};
+
+class FreeListSpace : public LargeObjectSpace {
+ public:
+ virtual ~FreeListSpace();
+ static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
+
+ size_t AllocationSize(const Object* obj);
+ Object* Alloc(Thread* self, size_t num_bytes);
+ size_t Free(Thread* self, Object* obj);
+ bool Contains(const Object* obj) const;
+ void Walk(DlMallocSpace::WalkCallback callback, void* arg);
+
+ // Address at which the space begins
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
+ // Current size of space
+ size_t Size() const {
+ return End() - Begin();
+ }
+ private:
+ static const size_t kAlignment = kPageSize;
+
+ class Chunk {
+ public:
+ static const size_t kFreeFlag = 0x80000000;
+
+ struct SortBySize {
+ bool operator()(const Chunk* a, const Chunk* b) const {
+ return a->GetSize() < b->GetSize();
+ }
+ };
+
+ bool IsFree() const {
+ return (m_size & kFreeFlag) != 0;
+ }
+
+ void SetSize(size_t size, bool is_free = false) {
+ m_size = size | (is_free ? kFreeFlag : 0);
+ }
+
+ size_t GetSize() const {
+ return m_size & (kFreeFlag - 1);
+ }
+
+ Chunk* GetPrevious() {
+ return m_previous;
+ }
+
+ void SetPrevious(Chunk* previous) {
+ m_previous = previous;
+ DCHECK(m_previous == NULL ||
+ (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
+ }
+ private:
+ size_t m_size;
+ Chunk* m_previous;
+ };
+
+ FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+ void AddFreeChunk(void* address, size_t size, Chunk* previous);
+ Chunk* ChunkFromAddr(void* address);
+ void* AddrFromChunk(Chunk* chunk);
+ void RemoveFreeChunk(Chunk* chunk);
+ Chunk* GetNextChunk(Chunk* chunk);
+
+ typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
+ byte* begin_;
+ byte* end_;
+ UniquePtr<MemMap> mem_map_;
+ Mutex lock_;
+ std::vector<Chunk> chunks_;
+ FreeChunks free_chunks_;
+};
+
+}
+
+#endif // ART_SRC_GC_LARGE_OBJECT_SPACE_H_
diff --git a/src/gc/mark_sweep.cc b/src/gc/mark_sweep.cc
index b82bc6e..e19e2c4 100644
--- a/src/gc/mark_sweep.cc
+++ b/src/gc/mark_sweep.cc
@@ -26,6 +26,7 @@
#include "indirect_reference_table.h"
#include "intern_table.h"
#include "jni_internal.h"
+#include "large_object_space.h"
#include "logging.h"
#include "macros.h"
#include "monitor.h"
@@ -208,7 +209,7 @@
void MarkSweep::BindLiveToMarkBitmap(ContinuousSpace* space) {
CHECK(space->IsAllocSpace());
- AllocSpace* alloc_space = space->AsAllocSpace();
+ DlMallocSpace* alloc_space = space->AsAllocSpace();
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
SpaceBitmap* mark_bitmap = alloc_space->mark_bitmap_.release();
GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
@@ -499,17 +500,12 @@
// TODO: investigate performance
static const bool kUseFreeList = true;
if (kUseFreeList) {
- for (size_t i = 0; i < num_ptrs; ++i) {
- Object* obj = static_cast<Object*>(ptrs[i]);
- freed_bytes += space->AllocationSize(obj);
- }
// AllocSpace::FreeList clears the value in ptrs, so perform after clearing the live bit
- space->FreeList(self, num_ptrs, ptrs);
+ freed_bytes += space->FreeList(self, num_ptrs, ptrs);
} else {
for (size_t i = 0; i < num_ptrs; ++i) {
Object* obj = static_cast<Object*>(ptrs[i]);
- freed_bytes += space->AllocationSize(obj);
- space->Free(self, obj);
+ freed_bytes += space->Free(self, obj);
}
}
@@ -532,7 +528,7 @@
void MarkSweep::SweepArray(TimingLogger& logger, ObjectStack* allocations, bool swap_bitmaps) {
size_t freed_bytes = 0;
- AllocSpace* space = heap_->GetAllocSpace();
+ DlMallocSpace* space = heap_->GetAllocSpace();
// If we don't swap bitmaps then newly allocated Weaks go into the live bitmap but not mark
// bitmap, resulting in occasional frees of Weaks which are still in use.
@@ -566,22 +562,18 @@
if (!mark_bitmap->Test(obj)) {
// Don't bother un-marking since we clear the mark bitmap anyways.
*(out++) = obj;
- size_t size = space->AllocationSize(obj);
- freed_bytes += size;
}
} else if (!large_mark_objects->Test(obj)) {
++freed_large_objects;
- size_t size = large_object_space->AllocationSize(obj);
- freed_bytes += size;
- large_object_space->Free(self, obj);
+ freed_bytes += large_object_space->Free(self, obj);
}
}
logger.AddSplit("Process allocation stack");
size_t freed_objects = out - objects;
+ freed_bytes += space->FreeList(self, freed_objects, objects);
VLOG(heap) << "Freed " << freed_objects << "/" << count
<< " objects with size " << PrettySize(freed_bytes);
- space->FreeList(self, freed_objects, objects);
heap_->RecordFree(freed_objects + freed_large_objects, freed_bytes);
freed_objects_ += freed_objects;
freed_bytes_ += freed_bytes;
@@ -645,8 +637,7 @@
Thread* self = Thread::Current();
for (SpaceSetMap::Objects::iterator it = live_objects.begin(); it != live_objects.end(); ++it) {
if (!large_mark_objects->Test(*it)) {
- freed_bytes += large_object_space->AllocationSize(*it);
- large_object_space->Free(self, const_cast<Object*>(*it));
+ freed_bytes += large_object_space->Free(self, const_cast<Object*>(*it));
++freed_objects;
}
}
@@ -1017,7 +1008,7 @@
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
Space* space = *it;
if (space->IsAllocSpace()) {
- AllocSpace* alloc_space = space->AsAllocSpace();
+ DlMallocSpace* alloc_space = space->AsAllocSpace();
if (alloc_space->temp_bitmap_.get() != NULL) {
// At this point, the temp_bitmap holds our old mark bitmap.
SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release();
diff --git a/src/gc/space.cc b/src/gc/space.cc
index 274d777..4d5ce93 100644
--- a/src/gc/space.cc
+++ b/src/gc/space.cc
@@ -28,11 +28,6 @@
namespace art {
-#ifndef NDEBUG
-static const bool kDebugSpaces = true;
-#else
-static const bool kDebugSpaces = false;
-#endif
// Magic padding value that we use to check for buffer overruns.
static const word kPaddingValue = 0xBAC0BAC0;
@@ -46,17 +41,35 @@
} \
} while (false)
-Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy)
- : name_(name),
- gc_retention_policy_(gc_retention_policy) {
+ImageSpace* Space::AsImageSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
+ return down_cast<ImageSpace*>(down_cast<MemMapSpace*>(this));
+}
+DlMallocSpace* Space::AsAllocSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
+ return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+DlMallocSpace* Space::AsZygoteSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
+ return down_cast<DlMallocSpace*>(down_cast<MemMapSpace*>(this));
+}
+
+LargeObjectSpace* Space::AsLargeObjectSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
+ return reinterpret_cast<LargeObjectSpace*>(this);
}
ContinuousSpace::ContinuousSpace(const std::string& name, byte* begin, byte* end,
GcRetentionPolicy gc_retention_policy)
- : Space(name, gc_retention_policy),
- begin_(begin),
- end_(end) {
+ : name_(name), gc_retention_policy_(gc_retention_policy), begin_(begin), end_(end) {
+
+}
+
+DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
+ GcRetentionPolicy gc_retention_policy)
+ : name_(name), gc_retention_policy_(gc_retention_policy) {
}
@@ -68,9 +81,9 @@
}
-size_t AllocSpace::bitmap_index_ = 0;
+size_t DlMallocSpace::bitmap_index_ = 0;
-AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
+DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
byte* end, size_t growth_limit)
: MemMapSpace(name, mem_map, end - begin, kGcRetentionPolicyAlwaysCollect),
num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
@@ -94,8 +107,8 @@
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
}
-AllocSpace* AllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
- size_t capacity, byte* requested_begin) {
+DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t
+ growth_limit, size_t capacity, byte* requested_begin) {
// Memory we promise to dlmalloc before it asks for morecore.
// Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
@@ -140,7 +153,7 @@
return NULL;
}
- void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
+ void* mspace = CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
if (mspace == NULL) {
LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
return NULL;
@@ -154,8 +167,8 @@
// Everything is set so record in immutable structure and leave
MemMap* mem_map_ptr = mem_map.release();
- AllocSpace* space = new AllocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
- growth_limit);
+ DlMallocSpace* space = new DlMallocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+ growth_limit);
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
@@ -163,7 +176,7 @@
return space;
}
-void* AllocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
+void* DlMallocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
// clear errno to allow PLOG on error
errno = 0;
// create mspace using our backing storage starting at begin and with a footprint of
@@ -179,7 +192,7 @@
return msp;
}
-void AllocSpace::SwapBitmaps() {
+void DlMallocSpace::SwapBitmaps() {
SpaceBitmap* temp_live_bitmap = live_bitmap_.release();
live_bitmap_.reset(mark_bitmap_.release());
mark_bitmap_.reset(temp_live_bitmap);
@@ -189,7 +202,7 @@
mark_bitmap_->SetName(temp_name);
}
-Object* AllocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
+Object* DlMallocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
if (kDebugSpaces) {
num_bytes += sizeof(word);
}
@@ -210,28 +223,27 @@
return result;
}
-Object* AllocSpace::AllocWithoutGrowth(Thread* self, size_t num_bytes) {
+Object* DlMallocSpace::Alloc(Thread* self, size_t num_bytes) {
MutexLock mu(self, lock_);
return AllocWithoutGrowthLocked(num_bytes);
}
-Object* AllocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
+Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes) {
MutexLock mu(self, lock_);
// Grow as much as possible within the mspace.
size_t max_allowed = Capacity();
mspace_set_footprint_limit(mspace_, max_allowed);
// Try the allocation.
- void* ptr = AllocWithoutGrowthLocked(num_bytes);
+ Object* result = AllocWithoutGrowthLocked(num_bytes);
// Shrink back down as small as possible.
size_t footprint = mspace_footprint(mspace_);
mspace_set_footprint_limit(mspace_, footprint);
// Return the new allocation or NULL.
- Object* result = reinterpret_cast<Object*>(ptr);
CHECK(!kDebugSpaces || result == NULL || Contains(result));
return result;
}
-void AllocSpace::SetGrowthLimit(size_t growth_limit) {
+void DlMallocSpace::SetGrowthLimit(size_t growth_limit) {
growth_limit = RoundUp(growth_limit, kPageSize);
growth_limit_ = growth_limit;
if (Size() > growth_limit_) {
@@ -239,7 +251,7 @@
}
}
-AllocSpace* AllocSpace::CreateZygoteSpace() {
+DlMallocSpace* DlMallocSpace::CreateZygoteSpace() {
end_ = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(end_), kPageSize));
DCHECK(IsAligned<CardTable::kCardSize>(begin_));
DCHECK(IsAligned<CardTable::kCardSize>(end_));
@@ -276,7 +288,8 @@
if (capacity - initial_size > 0) {
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
}
- AllocSpace* alloc_space = new AllocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
+ DlMallocSpace* alloc_space =
+ new DlMallocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
@@ -286,7 +299,7 @@
return alloc_space;
}
-void AllocSpace::Free(Thread* self, Object* ptr) {
+size_t DlMallocSpace::Free(Thread* self, Object* ptr) {
MutexLock mu(self, lock_);
if (kDebugSpaces) {
CHECK(ptr != NULL);
@@ -295,12 +308,20 @@
*reinterpret_cast<word*>(reinterpret_cast<byte*>(ptr) + AllocationSize(ptr) -
sizeof(word) - kChunkOverhead), kPaddingValue);
}
- num_bytes_allocated_ -= AllocationSize(ptr);
+ const size_t bytes_freed = AllocationSize(ptr);
+ num_bytes_allocated_ -= bytes_freed;
--num_objects_allocated_;
mspace_free(mspace_, ptr);
+ return bytes_freed;
}
-void AllocSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
+ // Don't need the lock to calculate the size of the freed pointers.
+ size_t bytes_freed = 0;
+ for (size_t i = 0; i < num_ptrs; i++) {
+ bytes_freed += AllocationSize(ptrs[i]);
+ }
+
MutexLock mu(self, lock_);
if (kDebugSpaces) {
CHECK(ptrs != NULL);
@@ -316,11 +337,10 @@
}
CHECK_EQ(num_broken_ptrs, 0u);
}
- for (size_t i = 0; i < num_ptrs; i++) {
- num_bytes_allocated_ -= AllocationSize(ptrs[i]);
- }
+ num_bytes_allocated_ -= bytes_freed;
num_objects_allocated_ -= num_ptrs;
mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
+ return bytes_freed;
}
// Callback from dlmalloc when it needs to increase the footprint
@@ -330,7 +350,7 @@
return heap->GetAllocSpace()->MoreCore(increment);
}
-void* AllocSpace::MoreCore(intptr_t increment) {
+void* DlMallocSpace::MoreCore(intptr_t increment) {
lock_.AssertHeld(Thread::Current());
byte* original_end = end_;
if (increment != 0) {
@@ -364,7 +384,7 @@
return original_end;
}
-size_t AllocSpace::AllocationSize(const Object* obj) {
+size_t DlMallocSpace::AllocationSize(const Object* obj) {
return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
kChunkOverhead;
}
@@ -383,7 +403,7 @@
}
}
-void AllocSpace::Trim() {
+void DlMallocSpace::Trim() {
MutexLock mu(Thread::Current(), lock_);
// Trim to release memory at the end of the space.
mspace_trim(mspace_, 0);
@@ -391,21 +411,21 @@
mspace_inspect_all(mspace_, MspaceMadviseCallback, NULL);
}
-void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
+void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
void* arg) {
MutexLock mu(Thread::Current(), lock_);
mspace_inspect_all(mspace_, callback, arg);
callback(NULL, NULL, 0, arg); // Indicate end of a space.
}
-size_t AllocSpace::GetFootprintLimit() {
+size_t DlMallocSpace::GetFootprintLimit() {
MutexLock mu(Thread::Current(), lock_);
return mspace_footprint_limit(mspace_);
}
-void AllocSpace::SetFootprintLimit(size_t new_size) {
+void DlMallocSpace::SetFootprintLimit(size_t new_size) {
MutexLock mu(Thread::Current(), lock_);
- VLOG(heap) << "AllocSpace::SetFootprintLimit " << PrettySize(new_size);
+ VLOG(heap) << "DLMallocSpace::SetFootprintLimit " << PrettySize(new_size);
// Compare against the actual footprint, rather than the Size(), because the heap may not have
// grown all the way to the allowed size yet.
size_t current_space_size = mspace_footprint(mspace_);
@@ -414,6 +434,7 @@
new_size = current_space_size;
}
mspace_set_footprint_limit(mspace_, new_size);
+ LOG(INFO) << "Setting footprint limit to " << new_size;
}
size_t ImageSpace::bitmap_index_ = 0;
@@ -521,7 +542,7 @@
return os;
}
-void AllocSpace::Dump(std::ostream& os) const {
+void DlMallocSpace::Dump(std::ostream& os) const {
os << GetType()
<< "begin=" << reinterpret_cast<void*>(Begin())
<< ",end=" << reinterpret_cast<void*>(End())
@@ -537,242 +558,4 @@
<< ",name=\"" << GetName() << "\"]";
}
-void LargeObjectSpace::SwapBitmaps() {
- SpaceSetMap* temp_live_objects = live_objects_.release();
- live_objects_.reset(mark_objects_.release());
- mark_objects_.reset(temp_live_objects);
- // Swap names to get more descriptive diagnostics.
- std::string temp_name = live_objects_->GetName();
- live_objects_->SetName(mark_objects_->GetName());
- mark_objects_->SetName(temp_name);
-}
-
-DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
- GcRetentionPolicy gc_retention_policy)
- : Space(name, gc_retention_policy) {
-
-}
-
-LargeObjectSpace::LargeObjectSpace(const std::string& name)
- : DiscontinuousSpace(name, kGcRetentionPolicyAlwaysCollect),
- num_bytes_allocated_(0), num_objects_allocated_(0), total_bytes_allocated_(0),
- total_objects_allocated_(0) {
- live_objects_.reset(new SpaceSetMap("large live objects"));
- mark_objects_.reset(new SpaceSetMap("large marked objects"));
-}
-
-
-void LargeObjectSpace::CopyLiveToMarked() {
- mark_objects_->CopyFrom(*live_objects_.get());
-}
-
-LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
- : LargeObjectSpace(name),
- lock_("large object space lock", kAllocSpaceLock)
-{
-
-}
-
-LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
- return new LargeObjectMapSpace(name);
-}
-
-Object* LargeObjectMapSpace::Alloc(Thread* self, size_t num_bytes) {
- MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
- if (mem_map == NULL) {
- return NULL;
- }
- MutexLock mu(self, lock_);
- Object* obj = reinterpret_cast<Object*>(mem_map->Begin());
- large_objects_.push_back(obj);
- mem_maps_.Put(obj, mem_map);
- size_t allocation_size = mem_map->Size();
- num_bytes_allocated_ += allocation_size;
- total_bytes_allocated_ += allocation_size;
- ++num_objects_allocated_;
- ++total_objects_allocated_;
- return obj;
-}
-
-void LargeObjectMapSpace::Free(Thread* self, Object* ptr) {
- MutexLock mu(self, lock_);
- MemMaps::iterator found = mem_maps_.find(ptr);
- CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
- DCHECK_GE(num_bytes_allocated_, found->second->Size());
- num_bytes_allocated_ -= found->second->Size();
- --num_objects_allocated_;
- delete found->second;
- mem_maps_.erase(found);
-}
-
-size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
- MutexLock mu(Thread::Current(), lock_);
- MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
- CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
- return found->second->Size();
-}
-
-void LargeObjectMapSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
- MutexLock mu(Thread::Current(), lock_);
- for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
- MemMap* mem_map = it->second;
- callback(mem_map->Begin(), mem_map->End(), mem_map->Size(), arg);
- callback(NULL, NULL, 0, arg);
- }
-}
-
-bool LargeObjectMapSpace::Contains(const Object* obj) const {
- MutexLock mu(Thread::Current(), lock_);
- return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
-}
-
-FreeListSpace* FreeListSpace::Create(const std::string& name, byte* requested_begin, size_t size) {
- CHECK(size % kAlignment == 0);
- MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), requested_begin, size,
- PROT_READ | PROT_WRITE);
- CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
- return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
-}
-
-FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
- : LargeObjectSpace(name),
- begin_(begin),
- end_(end),
- mem_map_(mem_map),
- lock_("free list space lock", kAllocSpaceLock) {
- chunks_.resize(Size() / kAlignment + 1);
- // Add a dummy chunk so we don't need to handle chunks having no next chunk.
- chunks_.back().SetSize(kAlignment, false);
- // Start out with one large free chunk.
- AddFreeChunk(begin_, end_ - begin_, NULL);
-}
-
-FreeListSpace::~FreeListSpace() {
-
-}
-
-void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
- Chunk* chunk = ChunkFromAddr(address);
- chunk->SetSize(size, true);
- chunk->SetPrevious(previous);
- Chunk* next_chunk = GetNextChunk(chunk);
- next_chunk->SetPrevious(chunk);
- free_chunks_.insert(chunk);
-}
-
-FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
- size_t offset = reinterpret_cast<byte*>(address) - Begin();
- DCHECK(IsAligned<kAlignment>(offset));
- DCHECK_LT(offset, Size());
- return &chunks_[offset / kAlignment];
-}
-
-void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
- return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
-}
-
-void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
- // TODO: C++0x
- // TODO: Improve performance, this might be slow.
- std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
- for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
- if (*it == chunk) {
- free_chunks_.erase(it);
- return;
- }
- }
-}
-
-void FreeListSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
- MutexLock mu(Thread::Current(), lock_);
- for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
- if (!chunk->IsFree()) {
- size_t size = chunk->GetSize();
- void* begin = AddrFromChunk(chunk);
- void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
- callback(begin, end, size, arg);
- callback(NULL, NULL, 0, arg);
- }
- chunk = GetNextChunk(chunk);
- }
-}
-
-void FreeListSpace::Free(Thread* self, Object* obj) {
- MutexLock mu(self, lock_);
- CHECK(Contains(obj));
- // Check adjacent chunks to see if we need to combine.
- Chunk* chunk = ChunkFromAddr(obj);
- CHECK(!chunk->IsFree());
-
- size_t allocation_size = chunk->GetSize();
- madvise(obj, allocation_size, MADV_DONTNEED);
- num_objects_allocated_--;
- num_bytes_allocated_ -= allocation_size;
- Chunk* prev = chunk->GetPrevious();
- Chunk* next = GetNextChunk(chunk);
-
- // Combine any adjacent free chunks
- size_t extra_size = chunk->GetSize();
- if (next->IsFree()) {
- extra_size += next->GetSize();
- RemoveFreeChunk(next);
- }
- if (prev != NULL && prev->IsFree()) {
- RemoveFreeChunk(prev);
- AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
- } else {
- AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
- }
-}
-
-bool FreeListSpace::Contains(const Object* obj) const {
- return mem_map_->HasAddress(obj);
-}
-
-FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
- return chunk + chunk->GetSize() / kAlignment;
-}
-
-size_t FreeListSpace::AllocationSize(const Object* obj) {
- Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
- CHECK(!chunk->IsFree());
- return chunk->GetSize();
-}
-
-Object* FreeListSpace::Alloc(Thread* self, size_t num_bytes) {
- MutexLock mu(self, lock_);
- num_bytes = RoundUp(num_bytes, kAlignment);
- Chunk temp;
- temp.SetSize(num_bytes);
- // Find the smallest chunk at least num_bytes in size.
- FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
- if (found == free_chunks_.end()) {
- // Out of memory, or too much fragmentation.
- return NULL;
- }
- Chunk* chunk = *found;
- free_chunks_.erase(found);
- CHECK(chunk->IsFree());
- void* addr = AddrFromChunk(chunk);
- size_t chunk_size = chunk->GetSize();
- chunk->SetSize(num_bytes);
- if (chunk_size > num_bytes) {
- // Split the chunk into two chunks.
- Chunk* new_chunk = GetNextChunk(chunk);
- AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
- }
-
- num_objects_allocated_++;
- total_objects_allocated_++;
- num_bytes_allocated_ += num_bytes;
- total_bytes_allocated_ += num_bytes;
- return reinterpret_cast<Object*>(addr);
-}
-
-void FreeListSpace::FreeList(Thread* self, size_t num_ptrs, Object** ptrs) {
- for (size_t i = 0; i < num_ptrs; ++i) {
- Free(self, ptrs[i]);
- }
-}
-
} // namespace art
diff --git a/src/gc/space.h b/src/gc/space.h
index ab29582..81b03fa 100644
--- a/src/gc/space.h
+++ b/src/gc/space.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_SRC_SPACE_H_
-#define ART_SRC_SPACE_H_
+#ifndef ART_SRC_GC_SPACE_H_
+#define ART_SRC_GC_SPACE_H_
#include <string>
@@ -29,7 +29,13 @@
namespace art {
-class AllocSpace;
+#ifndef NDEBUG
+static const bool kDebugSpaces = true;
+#else
+static const bool kDebugSpaces = false;
+#endif
+
+class DlMallocSpace;
class ImageSpace;
class LargeObjectSpace;
class Object;
@@ -57,38 +63,13 @@
virtual bool IsCompactible() const = 0;
virtual bool Contains(const Object* obj) const = 0;
virtual SpaceType GetType() const = 0;
+ virtual GcRetentionPolicy GetGcRetentionPolicy() const = 0;
+ virtual std::string GetName() const = 0;
- const std::string& GetName() const {
- return name_;
- }
-
- GcRetentionPolicy GetGcRetentionPolicy() const {
- return gc_retention_policy_;
- }
-
- void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
- gc_retention_policy_ = gc_retention_policy;
- }
-
- ImageSpace* AsImageSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
- return down_cast<ImageSpace*>(this);
- }
-
- AllocSpace* AsAllocSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
- return down_cast<AllocSpace*>(this);
- }
-
- AllocSpace* AsZygoteSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
- return down_cast<AllocSpace*>(this);
- }
-
- LargeObjectSpace* AsLargeObjectSpace() {
- DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
- return down_cast<LargeObjectSpace*>(this);
- }
+ ImageSpace* AsImageSpace();
+ DlMallocSpace* AsAllocSpace();
+ DlMallocSpace* AsZygoteSpace();
+ LargeObjectSpace* AsLargeObjectSpace();
bool IsImageSpace() const {
return GetType() == kSpaceTypeImageSpace;
@@ -111,18 +92,45 @@
virtual ~Space() {}
protected:
- Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
-
- // Name of the space.
- std::string name_;
-
- // Garbage collection retention policy, used to figure out when we should sweep over this space.
- GcRetentionPolicy gc_retention_policy_;
+ Space() { }
private:
DISALLOW_COPY_AND_ASSIGN(Space);
};
+// AllocSpace interface.
+class AllocSpace {
+ public:
+ virtual bool CanAllocateInto() const {
+ return true;
+ }
+
+ // General statistics
+ virtual uint64_t GetNumBytesAllocated() const = 0;
+ virtual uint64_t GetNumObjectsAllocated() const = 0;
+ virtual uint64_t GetTotalBytesAllocated() const = 0;
+ virtual uint64_t GetTotalObjectsAllocated() const = 0;
+
+ // Allocate num_bytes without allowing growth.
+ virtual Object* Alloc(Thread* self, size_t num_bytes) = 0;
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const Object* obj) = 0;
+
+ // Returns how many bytes were freed.
+ virtual size_t Free(Thread* self, Object* ptr) = 0;
+
+ // Returns how many bytes were freed.
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs) = 0;
+
+ protected:
+ AllocSpace() {}
+ virtual ~AllocSpace() {}
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(AllocSpace);
+};
+
// Continuous spaces have bitmaps, and an address range.
class ContinuousSpace : public Space {
public:
@@ -156,10 +164,21 @@
virtual ~ContinuousSpace() {}
+ virtual std::string GetName() const {
+ return name_;
+ }
+
+ virtual GcRetentionPolicy GetGcRetentionPolicy() const {
+ return gc_retention_policy_;
+ }
+
protected:
ContinuousSpace(const std::string& name, byte* begin, byte* end,
GcRetentionPolicy gc_retention_policy);
+ std::string name_;
+ GcRetentionPolicy gc_retention_policy_;
+
// The beginning of the storage for fast access.
byte* begin_;
@@ -170,15 +189,26 @@
DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
};
-class DiscontinuousSpace : public Space {
+class DiscontinuousSpace : public virtual Space {
public:
// Is object within this space?
virtual bool Contains(const Object* obj) const = 0;
+ virtual std::string GetName() const {
+ return name_;
+ }
+
+ virtual GcRetentionPolicy GetGcRetentionPolicy() const {
+ return gc_retention_policy_;
+ }
+
protected:
DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
private:
+ std::string name_;
+ GcRetentionPolicy gc_retention_policy_;
+
DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
};
@@ -217,7 +247,7 @@
};
// An alloc space is a space where objects may be allocated and garbage collected.
-class AllocSpace : public MemMapSpace {
+class DlMallocSpace : public MemMapSpace, public AllocSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
@@ -237,19 +267,19 @@
// base address is not guaranteed to be granted, if it is required,
// the caller should call Begin on the returned space to confirm
// the request was granted.
- static AllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
+ static DlMallocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
size_t capacity, byte* requested_begin);
// Allocate num_bytes without allowing the underlying mspace to grow.
virtual Object* AllocWithGrowth(Thread* self, size_t num_bytes);
// Allocate num_bytes allowing the underlying mspace to grow.
- virtual Object* AllocWithoutGrowth(Thread* self, size_t num_bytes);
+ virtual Object* Alloc(Thread* self, size_t num_bytes);
// Return the storage space required by obj.
virtual size_t AllocationSize(const Object* obj);
- virtual void Free(Thread* self, Object* ptr);
- virtual void FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
+ virtual size_t Free(Thread* self, Object* ptr);
+ virtual size_t FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
void* MoreCore(intptr_t increment);
@@ -304,21 +334,25 @@
virtual void SwapBitmaps();
// Turn ourself into a zygote space and return a new alloc space which has our unused memory.
- AllocSpace* CreateZygoteSpace();
+ DlMallocSpace* CreateZygoteSpace();
- size_t GetNumBytesAllocated() const {
+ void SetGcRetentionPolicy(GcRetentionPolicy gc_retention_policy) {
+ gc_retention_policy_ = gc_retention_policy;
+ }
+
+ virtual uint64_t GetNumBytesAllocated() const {
return num_bytes_allocated_;
}
- size_t GetNumObjectsAllocated() const {
+ virtual uint64_t GetNumObjectsAllocated() const {
return num_objects_allocated_;
}
- size_t GetTotalBytesAllocated() const {
+ virtual uint64_t GetTotalBytesAllocated() const {
return total_bytes_allocated_;
}
- size_t GetTotalObjectsAllocated() const {
+ virtual uint64_t GetTotalObjectsAllocated() const {
return total_objects_allocated_;
}
@@ -337,7 +371,7 @@
static size_t bitmap_index_;
- AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
+ DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
size_t growth_limit);
bool Init(size_t initial_size, size_t maximum_size, size_t growth_size, byte* requested_base);
@@ -364,7 +398,7 @@
friend class MarkSweep;
- DISALLOW_COPY_AND_ASSIGN(AllocSpace);
+ DISALLOW_COPY_AND_ASSIGN(DlMallocSpace);
};
// An image space is a space backed with a memory mapped image
@@ -390,7 +424,7 @@
return *reinterpret_cast<ImageHeader*>(Begin());
}
- const std::string& GetImageFilename() const {
+ const std::string GetImageFilename() const {
return GetName();
}
@@ -421,181 +455,10 @@
DISALLOW_COPY_AND_ASSIGN(ImageSpace);
};
-class LargeObjectSpace : public DiscontinuousSpace {
- public:
- virtual bool CanAllocateInto() const {
- return true;
- }
-
- virtual bool IsCompactible() const {
- return true;
- }
-
- virtual SpaceType GetType() const {
- return kSpaceTypeLargeObjectSpace;
- }
-
- virtual SpaceSetMap* GetLiveObjects() const {
- return live_objects_.get();
- }
-
- virtual SpaceSetMap* GetMarkObjects() const {
- return mark_objects_.get();
- }
-
- virtual void SwapBitmaps();
- virtual void CopyLiveToMarked();
-
- // Return the storage space required by obj.
- virtual size_t AllocationSize(const Object* obj) = 0;
- virtual Object* Alloc(Thread* self, size_t num_bytes) = 0;
- virtual void Free(Thread* self, Object* ptr) = 0;
- virtual void Walk(AllocSpace::WalkCallback, void* arg) = 0;
-
- virtual ~LargeObjectSpace() {}
-
-
- size_t GetNumBytesAllocated() const {
- return num_bytes_allocated_;
- }
-
- size_t GetNumObjectsAllocated() const {
- return num_objects_allocated_;
- }
-
- size_t GetTotalBytesAllocated() const {
- return total_bytes_allocated_;
- }
-
- size_t GetTotalObjectsAllocated() const {
- return total_objects_allocated_;
- }
-
- protected:
-
- LargeObjectSpace(const std::string& name);
-
- // Approximate number of bytes which have been allocated into the space.
- size_t num_bytes_allocated_;
- size_t num_objects_allocated_;
- size_t total_bytes_allocated_;
- size_t total_objects_allocated_;
-
- UniquePtr<SpaceSetMap> live_objects_;
- UniquePtr<SpaceSetMap> mark_objects_;
-
- friend class Space;
-};
-
-class LargeObjectMapSpace : public LargeObjectSpace {
- public:
-
- // Creates a large object space. Allocations into the large object space use memory maps instead
- // of malloc.
- static LargeObjectMapSpace* Create(const std::string& name);
-
- // Return the storage space required by obj.
- virtual size_t AllocationSize(const Object* obj);
- virtual Object* Alloc(Thread* self, size_t num_bytes);
- virtual void Free(Thread* self, Object* ptr);
- virtual void Walk(AllocSpace::WalkCallback, void* arg);
- virtual bool Contains(const Object* obj) const;
-private:
- LargeObjectMapSpace(const std::string& name);
- virtual ~LargeObjectMapSpace() {}
-
- // Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
- mutable Mutex lock_;
- std::vector<Object*> large_objects_;
- typedef SafeMap<Object*, MemMap*> MemMaps;
- MemMaps mem_maps_;
-};
-
-class FreeListSpace : public LargeObjectSpace {
- public:
- virtual ~FreeListSpace();
- static FreeListSpace* Create(const std::string& name, byte* requested_begin, size_t capacity);
-
- virtual size_t AllocationSize(const Object* obj);
- virtual Object* Alloc(Thread* self, size_t num_bytes);
- virtual void Free(Thread* self, Object* obj);
- virtual void FreeList(Thread* self, size_t num_ptrs, Object** ptrs);
- virtual bool Contains(const Object* obj) const;
- virtual void Walk(AllocSpace::WalkCallback callback, void* arg);
-
- // Address at which the space begins
- byte* Begin() const {
- return begin_;
- }
-
- // Address at which the space ends, which may vary as the space is filled.
- byte* End() const {
- return end_;
- }
-
- // Current size of space
- size_t Size() const {
- return End() - Begin();
- }
- private:
- static const size_t kAlignment = kPageSize;
-
- class Chunk {
- public:
- static const size_t kFreeFlag = 0x80000000;
-
- struct SortBySize {
- bool operator()(const Chunk* a, const Chunk* b) const {
- return a->GetSize() < b->GetSize();
- }
- };
-
- bool IsFree() const {
- return (m_size & kFreeFlag) != 0;
- }
-
- void SetSize(size_t size, bool is_free = false) {
- m_size = size | (is_free ? kFreeFlag : 0);
- }
-
- size_t GetSize() const {
- return m_size & (kFreeFlag - 1);
- }
-
- Chunk* GetPrevious() {
- return m_previous;
- }
-
- void SetPrevious(Chunk* previous) {
- m_previous = previous;
- DCHECK(m_previous == NULL ||
- (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
- }
- private:
- size_t m_size;
- Chunk* m_previous;
- };
-
- FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
- void AddFreeChunk(void* address, size_t size, Chunk* previous);
- Chunk* ChunkFromAddr(void* address);
- void* AddrFromChunk(Chunk* chunk);
- void RemoveFreeChunk(Chunk* chunk);
- Chunk* GetNextChunk(Chunk* chunk);
-
- typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
- byte* begin_;
- byte* end_;
- UniquePtr<MemMap> mem_map_;
- Mutex lock_;
- std::vector<Chunk> chunks_;
- FreeChunks free_chunks_;
-};
-
// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
// pages back to the kernel.
void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /*arg*/);
} // namespace art
-#endif // ART_SRC_SPACE_H_
+#endif // ART_SRC_GC_SPACE_H_
diff --git a/src/gc/space_test.cc b/src/gc/space_test.cc
index 0319147..2e03eae 100644
--- a/src/gc/space_test.cc
+++ b/src/gc/space_test.cc
@@ -27,7 +27,7 @@
class SpaceTest : public CommonTest {
public:
- void SizeFootPrintGrowthLimitAndTrimBody(AllocSpace* space, intptr_t object_size,
+ void SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
int round, size_t growth_limit);
void SizeFootPrintGrowthLimitAndTrimDriver(size_t object_size);
};
@@ -35,37 +35,37 @@
TEST_F(SpaceTest, Init) {
{
// Init < max == growth
- UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 32 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 32 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init == max == growth
- UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 16 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 16 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init > max == growth
- UniquePtr<Space> space(AllocSpace::Create("test", 32 * MB, 16 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 32 * MB, 16 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
{
// Growth == init < max
- UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 16 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 16 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Growth < init < max
- UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 8 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 16 * MB, 8 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
{
// Init < growth < max
- UniquePtr<Space> space(AllocSpace::Create("test", 8 * MB, 16 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 8 * MB, 16 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init < max < growth
- UniquePtr<Space> space(AllocSpace::Create("test", 8 * MB, 32 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(DlMallocSpace::Create("test", 8 * MB, 32 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
}
@@ -75,7 +75,7 @@
// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
// the GC works with the ZygoteSpace.
TEST_F(SpaceTest, ZygoteSpace) {
- AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -83,11 +83,11 @@
Thread* self = Thread::Current();
// Succeeds, fits without adjusting the footprint limit.
- Object* ptr1 = space->AllocWithoutGrowth(self, 1 * MB);
+ Object* ptr1 = space->Alloc(self, 1 * MB);
EXPECT_TRUE(ptr1 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr2 = space->AllocWithoutGrowth(self, 8 * MB);
+ Object* ptr2 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr2 == NULL);
// Succeeds, adjusts the footprint.
@@ -95,7 +95,7 @@
EXPECT_TRUE(ptr3 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr4 = space->AllocWithoutGrowth(self, 8 * MB);
+ Object* ptr4 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr4 == NULL);
// Also fails, requires a higher allowed footprint.
@@ -104,7 +104,7 @@
// Release some memory.
size_t free3 = space->AllocationSize(ptr3);
- space->Free(self, ptr3);
+ EXPECT_EQ(free3, space->Free(self, ptr3));
EXPECT_LE(8U * MB, free3);
// Succeeds, now that memory has been freed.
@@ -117,18 +117,18 @@
EXPECT_LE(1U * MB, free1);
// Make sure that the zygote space isn't directly at the start of the space.
- space->AllocWithoutGrowth(self, 1U * MB);
+ space->Alloc(self, 1U * MB);
space = space->CreateZygoteSpace();
// Make space findable to the heap, will also delete space when runtime is cleaned up
Runtime::Current()->GetHeap()->AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
- ptr1 = space->AllocWithoutGrowth(self, 1 * MB);
+ ptr1 = space->Alloc(self, 1 * MB);
EXPECT_TRUE(ptr1 != NULL);
// Fails, requires a higher footprint limit.
- ptr2 = space->AllocWithoutGrowth(self, 8 * MB);
+ ptr2 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr2 == NULL);
// Succeeds, adjusts the footprint.
@@ -143,7 +143,7 @@
}
TEST_F(SpaceTest, AllocAndFree) {
- AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
Thread* self = Thread::Current();
@@ -151,11 +151,11 @@
Runtime::Current()->GetHeap()->AddSpace(space);
// Succeeds, fits without adjusting the footprint limit.
- Object* ptr1 = space->AllocWithoutGrowth(self, 1 * MB);
+ Object* ptr1 = space->Alloc(self, 1 * MB);
EXPECT_TRUE(ptr1 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr2 = space->AllocWithoutGrowth(self, 8 * MB);
+ Object* ptr2 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr2 == NULL);
// Succeeds, adjusts the footprint.
@@ -163,7 +163,7 @@
EXPECT_TRUE(ptr3 != NULL);
// Fails, requires a higher footprint limit.
- Object* ptr4 = space->AllocWithoutGrowth(self, 8 * MB);
+ Object* ptr4 = space->Alloc(self, 8 * MB);
EXPECT_TRUE(ptr4 == NULL);
// Also fails, requires a higher allowed footprint.
@@ -186,7 +186,7 @@
}
TEST_F(SpaceTest, AllocAndFreeList) {
- AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ DlMallocSpace* space(DlMallocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -196,7 +196,7 @@
// Succeeds, fits without adjusting the max allowed footprint.
Object* lots_of_objects[1024];
for (size_t i = 0; i < arraysize(lots_of_objects); i++) {
- lots_of_objects[i] = space->AllocWithoutGrowth(self, 16);
+ lots_of_objects[i] = space->Alloc(self, 16);
EXPECT_TRUE(lots_of_objects[i] != NULL);
}
@@ -224,7 +224,7 @@
return rand();
}
-void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(AllocSpace* space, intptr_t object_size,
+void SpaceTest::SizeFootPrintGrowthLimitAndTrimBody(DlMallocSpace* space, intptr_t object_size,
int round, size_t growth_limit) {
if (((object_size > 0 && object_size >= static_cast<intptr_t>(growth_limit))) ||
((object_size < 0 && -object_size >= static_cast<intptr_t>(growth_limit)))) {
@@ -271,7 +271,7 @@
}
Object* object;
if (round <= 1) {
- object = space->AllocWithoutGrowth(self, alloc_size);
+ object = space->Alloc(self, alloc_size);
} else {
object = space->AllocWithGrowth(self, alloc_size);
}
@@ -350,7 +350,7 @@
Object* large_object;
size_t three_quarters_space = (growth_limit / 2) + (growth_limit / 4);
if (round <= 1) {
- large_object = space->AllocWithoutGrowth(self, three_quarters_space);
+ large_object = space->Alloc(self, three_quarters_space);
} else {
large_object = space->AllocWithGrowth(self, three_quarters_space);
}
@@ -376,7 +376,7 @@
size_t initial_size = 4 * MB;
size_t growth_limit = 8 * MB;
size_t capacity = 16 * MB;
- AllocSpace* space(AllocSpace::Create("test", initial_size, growth_limit, capacity, NULL));
+ DlMallocSpace* space(DlMallocSpace::Create("test", initial_size, growth_limit, capacity, NULL));
ASSERT_TRUE(space != NULL);
// Basic sanity
diff --git a/src/heap.cc b/src/heap.cc
index d3de603..6c13563 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -26,6 +26,7 @@
#include "gc/atomic_stack.h"
#include "gc/card_table.h"
#include "gc/heap_bitmap.h"
+#include "gc/large_object_space.h"
#include "gc/mark_sweep.h"
#include "gc/mod_union_table.h"
#include "gc/space.h"
@@ -135,7 +136,9 @@
card_marking_disabled_(false),
is_gc_running_(false),
last_gc_type_(kGcTypeNone),
+ enforce_heap_growth_rate_(false),
growth_limit_(growth_limit),
+ max_allowed_footprint_(kInitialSize),
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
concurrent_start_size_(128 * KB),
concurrent_min_free_(256 * KB),
@@ -221,9 +224,11 @@
live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects());
mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects());
- UniquePtr<AllocSpace> alloc_space(AllocSpace::Create("alloc space", initial_size, growth_limit,
- capacity, requested_begin));
+ UniquePtr<DlMallocSpace> alloc_space(DlMallocSpace::Create("alloc space", initial_size,
+ growth_limit, capacity,
+ requested_begin));
alloc_space_ = alloc_space.release();
+ alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
AddSpace(alloc_space_);
@@ -393,7 +398,7 @@
return NULL;
}
-AllocSpace* Heap::GetAllocSpace() {
+DlMallocSpace* Heap::GetAllocSpace() {
return alloc_space_;
}
@@ -425,7 +430,7 @@
// range. This also means that we rely on SetClass not dirtying the object's card.
if (byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray()) {
size = RoundUp(byte_count, kPageSize);
- obj = Allocate(self, NULL, size);
+ obj = Allocate(self, large_object_space_.get(), size);
// Make sure that our large object didn't get placed anywhere within the space interval or else
// it breaks the immune range.
DCHECK(obj == NULL ||
@@ -624,17 +629,23 @@
Object* Heap::TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow) {
// Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize?
+ if (enforce_heap_growth_rate_ && num_bytes_allocated_ + alloc_size > max_allowed_footprint_) {
+ if (grow) {
+ // Grow the heap by alloc_size extra bytes.
+ max_allowed_footprint_ = std::min(max_allowed_footprint_ + alloc_size, growth_limit_);
+ VLOG(gc) << "Grow heap to " << PrettySize(max_allowed_footprint_)
+ << " for a " << PrettySize(alloc_size) << " allocation";
+ } else {
+ return NULL;
+ }
+ }
+
if (num_bytes_allocated_ + alloc_size > growth_limit_) {
+ // Completely out of memory.
return NULL;
}
- if (UNLIKELY(space == NULL)) {
- return large_object_space_->Alloc(self, alloc_size);
- } else if (grow) {
- return space->AllocWithGrowth(self, alloc_size);
- } else {
- return space->AllocWithoutGrowth(self, alloc_size);
- }
+ return space->Alloc(self, alloc_size);
}
Object* Heap::Allocate(Thread* self, AllocSpace* space, size_t alloc_size) {
@@ -701,14 +712,6 @@
// Try harder, growing the heap if necessary.
ptr = TryToAllocate(self, space, alloc_size, true);
if (ptr != NULL) {
- if (space != NULL) {
- size_t new_footprint = space->GetFootprintLimit();
- // OLD-TODO: may want to grow a little bit more so that the amount of
- // free space is equal to the old free space + the
- // utilization slop for the new allocation.
- VLOG(gc) << "Grow alloc space (frag case) to " << PrettySize(new_footprint)
- << " for a " << PrettySize(alloc_size) << " allocation";
- }
return ptr;
}
@@ -846,11 +849,12 @@
// TODO: C++0x auto
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
if ((*it)->IsAllocSpace()) {
- AllocSpace* zygote_space = (*it)->AsAllocSpace();
+ DlMallocSpace* zygote_space = (*it)->AsAllocSpace();
// Turns the current alloc space into a Zygote space and obtain the new alloc space composed
// of the remaining available heap memory.
alloc_space_ = zygote_space->CreateZygoteSpace();
+ alloc_space_->SetFootprintLimit(alloc_space_->Capacity());
// Change the GC retention policy of the zygote space to only collect when full.
zygote_space->SetGcRetentionPolicy(kGcRetentionPolicyFullCollect);
@@ -1821,23 +1825,12 @@
}
void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
- AllocSpace* alloc_space = alloc_space_;
if (max_allowed_footprint > GetMaxMemory()) {
VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
<< PrettySize(GetMaxMemory());
max_allowed_footprint = GetMaxMemory();
}
- // We want to update the footprint for just the alloc space.
- max_allowed_footprint -= large_object_space_->GetNumBytesAllocated();
- for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- if ((*it)->IsAllocSpace()) {
- AllocSpace* alloc_space = (*it)->AsAllocSpace();
- if (alloc_space != alloc_space_) {
- max_allowed_footprint -= alloc_space->GetNumBytesAllocated();
- }
- }
- }
- alloc_space->SetFootprintLimit(max_allowed_footprint);
+ max_allowed_footprint_ = max_allowed_footprint;
}
// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
diff --git a/src/heap.h b/src/heap.h
index 3d49e30..d5f1ba7 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -42,6 +42,7 @@
class AllocSpace;
class Class;
class ConditionVariable;
+class DlMallocSpace;
class HeapBitmap;
class ImageSpace;
class LargeObjectSpace;
@@ -297,7 +298,7 @@
// DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
// Assumes there is only one image space.
ImageSpace* GetImageSpace();
- AllocSpace* GetAllocSpace();
+ DlMallocSpace* GetAllocSpace();
LargeObjectSpace* GetLargeObjectsSpace() {
return large_object_space_.get();
}
@@ -379,7 +380,7 @@
UniquePtr<MemMap> oat_file_map_;
// The alloc space which we are currently allocating into.
- AllocSpace* alloc_space_;
+ DlMallocSpace* alloc_space_;
// One cumulative logger for each type of Gc.
typedef SafeMap<GcType, CumulativeLogger*> CumulativeTimings;
@@ -415,13 +416,19 @@
// Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
volatile GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
+ // If enabled, causes Gc for alloc when heap size reaches the current footprint limit before the
+ // Gc updates it.
+ const bool enforce_heap_growth_rate_;
+
// Maximum size that the heap can reach.
size_t growth_limit_;
+ size_t max_allowed_footprint_;
// Bytes until concurrent GC starts.
volatile size_t concurrent_start_bytes_;
size_t concurrent_start_size_;
size_t concurrent_min_free_;
+
// Number of bytes allocated since the last Gc, we use this to help determine when to schedule concurrent GCs.
size_t bytes_since_last_gc_;
size_t sticky_gc_count_;
diff --git a/src/image_writer.cc b/src/image_writer.cc
index 13d9374..56dfa5e 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -26,6 +26,8 @@
#include "compiler.h"
#include "dex_cache.h"
#include "file.h"
+#include "gc/large_object_space.h"
+#include "gc/space.h"
#include "globals.h"
#include "heap.h"
#include "image.h"
@@ -37,7 +39,6 @@
#include "runtime.h"
#include "scoped_thread_state_change.h"
#include "sirt_ref.h"
-#include "gc/space.h"
#include "UniquePtr.h"
#include "utils.h"
diff --git a/src/mem_map.cc b/src/mem_map.cc
index 9e86772..653eb3f 100644
--- a/src/mem_map.cc
+++ b/src/mem_map.cc
@@ -91,7 +91,7 @@
<< ", " << prot << ", " << flags << ", " << fd.get() << ", 0) failed for " << name;
return NULL;
}
- return new MemMap(actual, byte_count, actual, page_aligned_byte_count, prot);
+ return new MemMap(name, actual, byte_count, actual, page_aligned_byte_count, prot);
}
MemMap* MemMap::MapFileAtAddress(byte* addr, size_t byte_count, int prot, int flags, int fd, off_t start) {
@@ -113,7 +113,8 @@
PLOG(ERROR) << "mmap failed";
return NULL;
}
- return new MemMap(actual + page_offset, byte_count, actual, page_aligned_byte_count, prot);
+ return new MemMap("file", actual + page_offset, byte_count, actual, page_aligned_byte_count,
+ prot);
}
MemMap::~MemMap() {
@@ -126,8 +127,10 @@
}
}
-MemMap::MemMap(byte* begin, size_t size, void* base_begin, size_t base_size, int prot)
- : begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size), prot_(prot) {
+MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
+ size_t base_size, int prot)
+ : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
+ prot_(prot) {
CHECK(begin_ != NULL);
CHECK_NE(size_, 0U);
CHECK(base_begin_ != NULL);
diff --git a/src/mem_map.h b/src/mem_map.h
index 1748a0e..3327a37 100644
--- a/src/mem_map.h
+++ b/src/mem_map.h
@@ -17,6 +17,8 @@
#ifndef ART_SRC_MEM_MAP_H_
#define ART_SRC_MEM_MAP_H_
+#include <string>
+
#include <stddef.h>
#include <sys/mman.h> // For the PROT_* and MAP_* constants.
#include <sys/types.h>
@@ -83,8 +85,10 @@
void UnMapAtEnd(byte* new_end);
private:
- MemMap(byte* begin, size_t size, void* base_begin, size_t base_size, int prot);
+ MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
+ int prot);
+ std::string name_;
byte* const begin_; // Start of data.
size_t size_; // Length of data.
diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc
index 93f6d5b..b98f974 100644
--- a/src/native/dalvik_system_VMRuntime.cc
+++ b/src/native/dalvik_system_VMRuntime.cc
@@ -160,7 +160,7 @@
// Trim the managed heap.
Heap* heap = Runtime::Current()->GetHeap();
uint64_t start_ns = NanoTime();
- AllocSpace* alloc_space = heap->GetAllocSpace();
+ DlMallocSpace* alloc_space = heap->GetAllocSpace();
size_t alloc_space_size = alloc_space->Size();
float utilization = static_cast<float>(alloc_space->GetNumBytesAllocated()) / alloc_space_size;
Thread* self = static_cast<JNIEnvExt*>(env)->self;
diff --git a/src/oatdump.cc b/src/oatdump.cc
index a592b9f..b4ba3df 100644
--- a/src/oatdump.cc
+++ b/src/oatdump.cc
@@ -26,6 +26,8 @@
#include "dex_instruction.h"
#include "disassembler.h"
#include "file.h"
+#include "gc/large_object_space.h"
+#include "gc/space.h"
#include "image.h"
#include "oat/runtime/context.h" // For VmapTable
#include "object_utils.h"
@@ -33,7 +35,6 @@
#include "runtime.h"
#include "safe_map.h"
#include "scoped_thread_state_change.h"
-#include "gc/space.h"
#include "stringpiece.h"
#include "gc_map.h"