Refactor spaces and add free list large object space
Added some more abstraction for spaces, we now have ContinuousSpaces and DiscontinousSpaces.
Added a free list version of large object space.
Performance should be better than the memory map version since we avoid creating more than
one memory map.
Added a cause for Gc which prints with the Gc message, dalvik has this as well.
Change-Id: Ie4aa6b204fbde7193e8305eb246158fae0444cc1
diff --git a/src/atomic_integer.h b/src/atomic_integer.h
new file mode 100644
index 0000000..61b93cc
--- /dev/null
+++ b/src/atomic_integer.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_ATOMIC_INTEGER_H_
+#define ART_SRC_ATOMIC_INTEGER_H_
+
+#include "atomic.h"
+
+namespace art {
+
+class AtomicInteger {
+ public:
+ AtomicInteger(int32_t value) : value_(value) { }
+
+ operator int32_t () const {
+ return get();
+ }
+
+ int32_t get() const {
+ return value_;
+ }
+
+ int32_t operator += (const int32_t value) {
+ return android_atomic_add(value, &value_);
+ }
+
+ int32_t operator -= (const int32_t value) {
+ return android_atomic_add(-value, &value_);
+ }
+
+ int32_t operator |= (const int32_t value) {
+ return android_atomic_or(value, &value_);
+ }
+
+ int32_t operator &= (const int32_t value) {
+ return android_atomic_and(-value, &value_);
+ }
+
+ int32_t operator ++ () {
+ return android_atomic_inc(&value_);
+ }
+
+ int32_t operator -- () {
+ return android_atomic_dec(&value_);
+ }
+ private:
+ int32_t value_;
+};
+
+}
+
+#endif // ART_SRC_ATOMIC_INTEGER_H_
diff --git a/src/card_table.cc b/src/card_table.cc
index 5bfdfaf..8d0ddd1 100644
--- a/src/card_table.cc
+++ b/src/card_table.cc
@@ -88,7 +88,7 @@
ANNOTATE_BENIGN_RACE_SIZED(begin, (end - begin), "writes to GC card table");
}
-void CardTable::ClearSpaceCards(Space* space) {
+void CardTable::ClearSpaceCards(ContinuousSpace* space) {
// TODO: clear just the range of the table that has been modified
byte* card_start = CardFromAddr(space->Begin());
byte* card_end = CardFromAddr(space->End()); // Make sure to round up.
@@ -100,7 +100,7 @@
memset(mem_map_->Begin(), GC_CARD_CLEAN, mem_map_->Size());
}
-void CardTable::PreClearCards(Space* space, std::vector<byte*>& out_cards) {
+void CardTable::PreClearCards(ContinuousSpace* space, std::vector<byte*>& out_cards) {
byte* card_end = CardFromAddr(space->End());
for (byte* card_cur = CardFromAddr(space->Begin()); card_cur < card_end; ++card_cur) {
if (*card_cur == GC_CARD_DIRTY) {
@@ -110,7 +110,7 @@
}
}
-void CardTable::GetDirtyCards(Space* space, std::vector<byte*>& out_cards) const {
+void CardTable::GetDirtyCards(ContinuousSpace* space, std::vector<byte*>& out_cards) const {
byte* card_end = CardFromAddr(space->End());
for (byte* card_cur = CardFromAddr(space->Begin());card_cur < card_end; ++card_cur) {
if (*card_cur == GC_CARD_DIRTY) {
diff --git a/src/card_table.h b/src/card_table.h
index 9dc7201..f19be7d 100644
--- a/src/card_table.h
+++ b/src/card_table.h
@@ -26,7 +26,7 @@
namespace art {
class Heap;
-class Space;
+class ContinuousSpace;
class SpaceBitmap;
class Object;
@@ -108,13 +108,13 @@
void ClearCardTable();
// Resets all of the bytes in the card table which do not map to the image space.
- void ClearSpaceCards(Space* space);
+ void ClearSpaceCards(ContinuousSpace* space);
// Clean all the cards which map to a space.
- void PreClearCards(Space* space, std::vector<byte*>& out_cards);
+ void PreClearCards(ContinuousSpace* space, std::vector<byte*>& out_cards);
// Returns all of the dirty cards which map to a space.
- void GetDirtyCards(Space* space, std::vector<byte*>& out_cards) const;
+ void GetDirtyCards(ContinuousSpace* space, std::vector<byte*>& out_cards) const;
// Returns the first address in the heap which maps to this card.
void* AddrFromCard(const byte *card_addr) const {
diff --git a/src/heap.cc b/src/heap.cc
index 5c6a606..eafe272 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -22,7 +22,6 @@
#include <limits>
#include <vector>
-#include "atomic.h"
#include "card_table.h"
#include "debugger.h"
#include "heap_bitmap.h"
@@ -131,15 +130,14 @@
card_marking_disabled_(false),
is_gc_running_(false),
last_gc_type_(kGcTypeNone),
+ growth_limit_(growth_limit),
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
concurrent_start_size_(128 * KB),
concurrent_min_free_(256 * KB),
- bytes_since_last_gc_(0),
concurrent_gc_start_rate_(3 * MB / 2),
sticky_gc_count_(0),
- large_object_threshold_(12 * KB),
+ large_object_threshold_(3 * kPageSize),
num_bytes_allocated_(0),
- num_objects_allocated_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
verify_pre_gc_heap_(false),
@@ -169,30 +167,27 @@
byte* requested_begin = NULL;
std::string image_file_name(original_image_file_name);
if (!image_file_name.empty()) {
- Space* image_space = NULL;
+ ImageSpace* image_space = NULL;
if (OS::FileExists(image_file_name.c_str())) {
// If the /system file exists, it should be up-to-date, don't try to generate
- image_space = Space::CreateImageSpace(image_file_name);
+ image_space = ImageSpace::Create(image_file_name);
} else {
// If the /system file didn't exist, we need to use one from the art-cache.
// If the cache file exists, try to open, but if it fails, regenerate.
// If it does not exist, generate.
image_file_name = GetArtCacheFilenameOrDie(image_file_name);
if (OS::FileExists(image_file_name.c_str())) {
- image_space = Space::CreateImageSpace(image_file_name);
+ image_space = ImageSpace::Create(image_file_name);
}
if (image_space == NULL) {
if (!GenerateImage(image_file_name)) {
LOG(FATAL) << "Failed to generate image: " << image_file_name;
}
- image_space = Space::CreateImageSpace(image_file_name);
+ image_space = ImageSpace::Create(image_file_name);
}
}
- if (image_space == NULL) {
- LOG(FATAL) << "Failed to create space from " << image_file_name;
- }
-
+ CHECK(image_space != NULL) << "Failed to create space from " << image_file_name;
AddSpace(image_space);
// Oat files referenced by image files immediately follow them in memory, ensure alloc space
// isn't going to get in the middle
@@ -204,26 +199,31 @@
}
}
- UniquePtr<AllocSpace> alloc_space(Space::CreateAllocSpace(
- "alloc space", initial_size, growth_limit, capacity, requested_begin));
+ UniquePtr<AllocSpace> alloc_space(AllocSpace::Create("alloc space", initial_size, growth_limit,
+ capacity, requested_begin));
alloc_space_ = alloc_space.release();
CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
AddSpace(alloc_space_);
// Spaces are sorted in order of Begin().
byte* heap_begin = spaces_.front()->Begin();
- size_t heap_capacity = (spaces_.back()->Begin() - spaces_.front()->Begin()) + spaces_.back()->NonGrowthLimitCapacity();
+ size_t heap_capacity = spaces_.back()->End() - spaces_.front()->Begin();
+ if (spaces_.back()->IsAllocSpace()) {
+ heap_capacity += spaces_.back()->AsAllocSpace()->NonGrowthLimitCapacity();
+ }
// Mark image objects in the live bitmap
- for (size_t i = 0; i < spaces_.size(); ++i) {
- Space* space = spaces_[i];
+ // TODO: C++0x
+ for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ Space* space = *it;
if (space->IsImageSpace()) {
- space->AsImageSpace()->RecordImageAllocations(space->GetLiveBitmap());
+ ImageSpace* image_space = space->AsImageSpace();
+ image_space->RecordImageAllocations(image_space->GetLiveBitmap());
}
}
// Allocate the large object space.
- large_object_space_.reset(Space::CreateLargeObjectSpace("large object space"));
+ large_object_space_.reset(FreeListSpace::Create("large object space", 64 * MB));
live_bitmap_->SetLargeObjects(large_object_space_->GetLiveObjects());
mark_bitmap_->SetLargeObjects(large_object_space_->GetMarkObjects());
@@ -239,7 +239,6 @@
// TODO: Count objects in the image space here.
num_bytes_allocated_ = 0;
- num_objects_allocated_ = 0;
// Max stack size in bytes.
static const size_t max_stack_size = capacity / SpaceBitmap::kAlignment * kWordSize;
@@ -270,14 +269,13 @@
}
// Sort spaces based on begin address
-class SpaceSorter {
- public:
- bool operator ()(const Space* a, const Space* b) const {
+struct SpaceSorter {
+ bool operator ()(const ContinuousSpace* a, const ContinuousSpace* b) const {
return a->Begin() < b->Begin();
}
};
-void Heap::AddSpace(Space* space) {
+void Heap::AddSpace(ContinuousSpace* space) {
WriterMutexLock mu(*Locks::heap_bitmap_lock_);
DCHECK(space != NULL);
DCHECK(space->GetLiveBitmap() != NULL);
@@ -324,7 +322,7 @@
STLDeleteValues(&cumulative_timings_);
}
-Space* Heap::FindSpaceFromObject(const Object* obj) const {
+ContinuousSpace* Heap::FindSpaceFromObject(const Object* obj) const {
// TODO: C++0x auto
for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
if ((*it)->Contains(obj)) {
@@ -350,22 +348,12 @@
}
static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
- size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
-
- size_t chunk_size = static_cast<size_t>(reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start));
- size_t chunk_free_bytes = 0;
+ size_t chunk_size = reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start);
if (used_bytes < chunk_size) {
- chunk_free_bytes = chunk_size - used_bytes;
+ size_t chunk_free_bytes = chunk_size - used_bytes;
+ size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
+ max_contiguous_allocation = std::max(max_contiguous_allocation, chunk_free_bytes);
}
-
- if (chunk_free_bytes > max_contiguous_allocation) {
- max_contiguous_allocation = chunk_free_bytes;
- }
-}
-
-bool Heap::CanAllocateBytes(size_t bytes) const {
- return num_bytes_allocated_ + large_object_space_->GetNumBytesAllocated() + bytes <=
- alloc_space_->Capacity();
}
Object* Heap::AllocObject(Class* c, size_t byte_count) {
@@ -382,8 +370,8 @@
// We can only do this for primive objects since large objects will not be within the card table
// range. This also means that we rely on SetClass not dirtying the object's card.
if (byte_count >= large_object_threshold_ && have_zygote_space_ && c->IsPrimitiveArray()) {
- obj = Allocate(NULL, RoundUp(byte_count, kPageSize));
- size = 0;
+ size = RoundUp(byte_count, kPageSize);
+ obj = Allocate(NULL, size);
if (obj != NULL) {
// Make sure that our large object didn't get placed anywhere within the space interval or else
@@ -402,9 +390,6 @@
}
if (LIKELY(obj != NULL)) {
- android_atomic_add(byte_count, reinterpret_cast<volatile int32_t*>(
- reinterpret_cast<size_t>(&bytes_since_last_gc_)));
-
obj->SetClass(c);
// Record allocation after since we want to use the atomic add for the atomic fence to guard
@@ -414,12 +399,10 @@
if (Dbg::IsAllocTrackingEnabled()) {
Dbg::RecordAllocation(c, byte_count);
}
- if (bytes_since_last_gc_ >= concurrent_gc_start_rate_ ||
- num_bytes_allocated_ >= concurrent_start_bytes_) {
+ if (static_cast<size_t>(num_bytes_allocated_) >= concurrent_start_bytes_) {
// We already have a request pending, no reason to start more until we update
// concurrent_start_bytes_.
concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
- bytes_since_last_gc_ = 0;
// The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
Thread* self = Thread::Current();
SirtRef<Object> ref(self, obj);
@@ -481,7 +464,7 @@
void Heap::DumpSpaces() {
// TODO: C++0x auto
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
LOG(INFO) << *space << "\n"
<< *space->GetLiveBitmap() << "\n"
<< *space->GetMarkBitmap();
@@ -508,7 +491,7 @@
}
// Ignore early dawn of the universe verifications
- if (!VERIFY_OBJECT_FAST && num_objects_allocated_ > 10) {
+ if (!VERIFY_OBJECT_FAST && GetObjectsAllocated() > 10) {
const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
Object::ClassOffset().Int32Value();
const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
@@ -543,20 +526,17 @@
void Heap::RecordAllocation(size_t size, const Object* obj) {
DCHECK(obj != NULL);
DCHECK_GT(size, 0u);
- COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
- int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
- android_atomic_add(size, reinterpret_cast<volatile int32_t*>(
- reinterpret_cast<size_t>(&num_bytes_allocated_)));
- android_atomic_add(1, reinterpret_cast<volatile int32_t*>(
- reinterpret_cast<size_t>(&num_objects_allocated_)));
+ num_bytes_allocated_ += size;
if (Runtime::Current()->HasStatsEnabled()) {
- RuntimeStats* global_stats = Runtime::Current()->GetStats();
RuntimeStats* thread_stats = Thread::Current()->GetStats();
- ++global_stats->allocated_objects;
++thread_stats->allocated_objects;
- global_stats->allocated_bytes += size;
thread_stats->allocated_bytes += size;
+
+ // TODO: Update these atomically.
+ RuntimeStats* global_stats = Runtime::Current()->GetStats();
+ ++global_stats->allocated_objects;
+ global_stats->allocated_bytes += size;
}
allocation_stack_->AtomicPush(obj);
@@ -565,35 +545,29 @@
void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
- DCHECK_LE(freed_objects, num_objects_allocated_);
- android_atomic_add(-static_cast<int32_t>(freed_objects),
- reinterpret_cast<volatile int32_t*>(
- reinterpret_cast<size_t>(&num_objects_allocated_)));
-
- DCHECK_LE(freed_bytes, num_bytes_allocated_);
- android_atomic_add(-static_cast<int32_t>(freed_bytes),
- reinterpret_cast<volatile int32_t*>(
- reinterpret_cast<size_t>(&num_bytes_allocated_)));
+ DCHECK_LE(freed_bytes, static_cast<size_t>(num_bytes_allocated_));
+ num_bytes_allocated_ -= freed_bytes;
if (Runtime::Current()->HasStatsEnabled()) {
- RuntimeStats* global_stats = Runtime::Current()->GetStats();
RuntimeStats* thread_stats = Thread::Current()->GetStats();
- global_stats->freed_objects += freed_objects;
thread_stats->freed_objects += freed_objects;
- global_stats->freed_bytes += freed_bytes;
thread_stats->freed_bytes += freed_bytes;
+
+ // TODO: Do this concurrently.
+ RuntimeStats* global_stats = Runtime::Current()->GetStats();
+ global_stats->freed_objects += freed_objects;
+ global_stats->freed_bytes += freed_bytes;
}
}
Object* Heap::TryToAllocate(AllocSpace* space, size_t alloc_size, bool grow) {
+ // Should we try to use a CAS here and fix up num_bytes_allocated_ later with AllocationSize?
+ if (num_bytes_allocated_ + alloc_size > growth_limit_) {
+ return NULL;
+ }
+
if (UNLIKELY(space == NULL)) {
- if (CanAllocateBytes(alloc_size)) {
- // TODO: This is racy, but is it worth fixing?
- return large_object_space_->Alloc(alloc_size);
- } else {
- // Application ran out of heap space.
- return NULL;
- }
+ return large_object_space_->Alloc(alloc_size);
} else if (grow) {
return space->AllocWithGrowth(alloc_size);
} else {
@@ -651,14 +625,10 @@
}
if (run_gc) {
- if (Runtime::Current()->HasStatsEnabled()) {
- ++Runtime::Current()->GetStats()->gc_for_alloc_count;
- ++Thread::Current()->GetStats()->gc_for_alloc_count;
- }
self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
// If we actually ran a different type of Gc than requested, we can skip the index forwards.
- GcType gc_type_ran = CollectGarbageInternal(gc_type, false);
+ GcType gc_type_ran = CollectGarbageInternal(gc_type, kGcCauseForAlloc, false);
DCHECK(static_cast<size_t>(gc_type_ran) >= i);
i = static_cast<size_t>(gc_type_ran);
self->TransitionFromSuspendedToRunnable();
@@ -694,27 +664,15 @@
VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
<< " allocation";
- if (Runtime::Current()->HasStatsEnabled()) {
- ++Runtime::Current()->GetStats()->gc_for_alloc_count;
- ++Thread::Current()->GetStats()->gc_for_alloc_count;
- }
// We don't need a WaitForConcurrentGcToComplete here either.
self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
- CollectGarbageInternal(kGcTypeFull, true);
+ CollectGarbageInternal(kGcTypeFull, kGcCauseForAlloc, true);
self->TransitionFromSuspendedToRunnable();
return TryToAllocate(space, alloc_size, true);
}
int64_t Heap::GetMaxMemory() {
- size_t total = 0;
- // TODO: C++0x auto
- for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- Space* space = *it;
- if (space->IsAllocSpace()) {
- total += space->AsAllocSpace()->Capacity();
- }
- }
- return total;
+ return growth_limit_;
}
int64_t Heap::GetTotalMemory() {
@@ -774,7 +732,8 @@
Thread* self = Thread::Current();
WaitForConcurrentGcToComplete(self);
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
- CollectGarbageInternal(have_zygote_space_ ? kGcTypePartial : kGcTypeFull, clear_soft_references);
+ // CollectGarbageInternal(have_zygote_space_ ? kGcTypePartial : kGcTypeFull, clear_soft_references);
+ CollectGarbageInternal(kGcTypeFull, kGcCauseExplicit, clear_soft_references);
}
void Heap::PreZygoteFork() {
@@ -819,9 +778,6 @@
it != cumulative_timings_.end(); ++it) {
it->second->Reset();
}
-
- // Reset this since we now count the ZygoteSpace in the total heap size.
- num_bytes_allocated_ = 0;
}
void Heap::FlushAllocStack() {
@@ -831,13 +787,7 @@
}
size_t Heap::GetUsedMemorySize() const {
- size_t total = num_bytes_allocated_ + large_object_space_->GetNumBytesAllocated();
- for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- if ((*it)->IsZygoteSpace()) {
- total += (*it)->AsAllocSpace()->Size();
- }
- }
- return total;
+ return num_bytes_allocated_;
}
void Heap::MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, MarkStack* stack) {
@@ -866,7 +816,7 @@
}
}
-GcType Heap::CollectGarbageInternal(GcType gc_type, bool clear_soft_references) {
+GcType Heap::CollectGarbageInternal(GcType gc_type, GcCause gc_cause, bool clear_soft_references) {
Thread* self = Thread::Current();
Locks::mutator_lock_->AssertNotHeld(self);
DCHECK_EQ(self->GetState(), kWaitingPerformingGc);
@@ -893,6 +843,11 @@
}
gc_complete_lock_->AssertNotHeld(self);
+ if (gc_cause == kGcCauseForAlloc && Runtime::Current()->HasStatsEnabled()) {
+ ++Runtime::Current()->GetStats()->gc_for_alloc_count;
+ ++Thread::Current()->GetStats()->gc_for_alloc_count;
+ }
+
// We need to do partial GCs every now and then to avoid the heap growing too much and
// fragmenting.
if (gc_type == kGcTypeSticky && ++sticky_gc_count_ > partial_gc_frequency_) {
@@ -903,9 +858,9 @@
}
if (concurrent_gc_) {
- CollectGarbageConcurrentMarkSweepPlan(self, gc_type, clear_soft_references);
+ CollectGarbageConcurrentMarkSweepPlan(self, gc_type, gc_cause, clear_soft_references);
} else {
- CollectGarbageMarkSweepPlan(self, gc_type, clear_soft_references);
+ CollectGarbageMarkSweepPlan(self, gc_type, gc_cause, clear_soft_references);
}
bytes_since_last_gc_ = 0;
@@ -921,7 +876,8 @@
return gc_type;
}
-void Heap::CollectGarbageMarkSweepPlan(Thread* self, GcType gc_type, bool clear_soft_references) {
+void Heap::CollectGarbageMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_cause,
+ bool clear_soft_references) {
TimingLogger timings("CollectGarbageInternal", true);
std::stringstream gc_type_str;
@@ -968,7 +924,7 @@
// Clear image space cards and keep track of cards we cleared in the mod-union table.
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
if (space->IsImageSpace()) {
mod_union_table_->ClearCards(*it);
timings.AddSplit("ClearModUnionCards");
@@ -1101,7 +1057,7 @@
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetUsedMemorySize();
const size_t total_memory = GetTotalMemory();
- LOG(INFO) << gc_type_str.str() << " "
+ LOG(INFO) << gc_cause << " " << gc_type_str.str() << " "
<< "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
<< PrettySize(current_heap_size) << "/" << PrettySize(total_memory) << ", "
<< "paused " << PrettyDuration(duration);
@@ -1414,7 +1370,7 @@
// instead, resulting in no new allocated objects being incorrectly freed by sweep.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
// We never allocate into zygote spaces.
if (space->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
@@ -1439,7 +1395,8 @@
}
}
-void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, bool clear_soft_references) {
+void Heap::CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_type, GcCause gc_cause,
+ bool clear_soft_references) {
TimingLogger timings("ConcurrentCollectGarbageInternal", true);
uint64_t root_begin = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0;
std::stringstream gc_type_str;
@@ -1496,7 +1453,7 @@
// Clear image space cards and keep track of cards we cleared in the mod-union table.
for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
if (space->IsImageSpace()) {
mod_union_table_->ClearCards(*it);
timings.AddSplit("ModUnionClearCards");
@@ -1661,12 +1618,13 @@
{
// TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above).
- WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
if (gc_type != kGcTypeSticky) {
+ WriterMutexLock mu(*Locks::heap_bitmap_lock_);
mark_sweep.SweepLargeObjects(swap);
timings.AddSplit("SweepLargeObjects");
mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
} else {
+ WriterMutexLock mu(*Locks::heap_bitmap_lock_);
mark_sweep.SweepArray(timings, live_stack_.get(), swap);
timings.AddSplit("SweepArray");
}
@@ -1699,7 +1657,7 @@
const size_t percent_free = GetPercentFree();
const size_t current_heap_size = GetUsedMemorySize();
const size_t total_memory = GetTotalMemory();
- LOG(INFO) << gc_type_str.str()
+ LOG(INFO) << gc_cause << " " << gc_type_str.str()
<< "Concurrent GC freed " << PrettySize(bytes_freed) << ", " << percent_free
<< "% free, " << PrettySize(current_heap_size) << "/"
<< PrettySize(total_memory) << ", " << "paused " << PrettyDuration(pause_roots)
@@ -1745,8 +1703,8 @@
}
void Heap::DumpForSigQuit(std::ostream& os) {
- os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(num_bytes_allocated_) << "/"
- << PrettySize(GetTotalMemory()) << "; " << num_objects_allocated_ << " objects\n";
+ os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(GetUsedMemorySize()) << "/"
+ << PrettySize(GetTotalMemory()) << "; " << GetObjectsAllocated() << " objects\n";
// Dump cumulative timings.
LOG(INFO) << "Dumping cumulative Gc timings";
for (CumulativeTimings::iterator it = cumulative_timings_.begin();
@@ -1756,18 +1714,25 @@
}
size_t Heap::GetPercentFree() {
- size_t total = GetTotalMemory();
- return 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
+ return static_cast<size_t>(100.0f * static_cast<float>(GetFreeMemory()) / GetTotalMemory());
}
void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
AllocSpace* alloc_space = alloc_space_;
- size_t alloc_space_capacity = alloc_space->Capacity();
- alloc_space_capacity -= large_object_space_->GetNumBytesAllocated();
- if (max_allowed_footprint > alloc_space_capacity) {
+ if (max_allowed_footprint > GetMaxMemory()) {
VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
- << PrettySize(alloc_space_capacity);
- max_allowed_footprint = alloc_space_capacity;
+ << PrettySize(GetMaxMemory());
+ max_allowed_footprint = GetMaxMemory();
+ }
+ // We want to update the footprint for just the alloc space.
+ max_allowed_footprint -= large_object_space_->GetNumBytesAllocated();
+ for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ if ((*it)->IsAllocSpace()) {
+ AllocSpace* alloc_space = (*it)->AsAllocSpace();
+ if (alloc_space != alloc_space_) {
+ max_allowed_footprint -= alloc_space->GetNumBytesAllocated();
+ }
+ }
}
alloc_space->SetFootprintLimit(max_allowed_footprint);
}
@@ -1779,33 +1744,24 @@
static const size_t kHeapMinFree = kHeapIdealFree / 4;
void Heap::GrowForUtilization() {
- size_t target_size;
- bool use_footprint_limit = false;
- {
- // We know what our utilization is at this moment.
- // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
- target_size = num_bytes_allocated_ / Heap::GetTargetHeapUtilization();
-
- if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
- target_size = num_bytes_allocated_ + kHeapIdealFree;
- } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
- target_size = num_bytes_allocated_ + kHeapMinFree;
- }
-
- // Calculate when to perform the next ConcurrentGC.
- if (GetTotalMemory() - GetUsedMemorySize() < concurrent_min_free_) {
- // Not enough free memory to perform concurrent GC.
- concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
- } else {
- // Compute below to avoid holding both the statistics and the alloc space lock
- use_footprint_limit = true;
- }
+ // We know what our utilization is at this moment.
+ // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
+ size_t target_size = num_bytes_allocated_ / Heap::GetTargetHeapUtilization();
+ if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
+ target_size = num_bytes_allocated_ + kHeapIdealFree;
+ } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
+ target_size = num_bytes_allocated_ + kHeapMinFree;
}
- if (use_footprint_limit) {
- size_t foot_print_limit = alloc_space_->GetFootprintLimit();
- concurrent_start_bytes_ = foot_print_limit - concurrent_start_size_;
+ // Calculate when to perform the next ConcurrentGC.
+ if (GetFreeMemory() < concurrent_min_free_) {
+ // Not enough free memory to perform concurrent GC.
+ concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
+ } else {
+ // Start a concurrent Gc when we get close to the target size.
+ concurrent_start_bytes_ = target_size - concurrent_start_size_;
}
+
SetIdealFootprint(target_size);
}
@@ -1902,7 +1858,15 @@
}
size_t Heap::GetObjectsAllocated() const {
- return num_objects_allocated_;
+ size_t total = 0;
+ // TODO: C++0x
+ for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
+ Space* space = *it;
+ if (space->IsAllocSpace()) {
+ total += space->AsAllocSpace()->GetNumObjectsAllocated();
+ }
+ }
+ return total;
}
size_t Heap::GetConcurrentStartSize() const {
@@ -1960,16 +1924,13 @@
}
}
- // TODO: We shouldn't need a WaitForConcurrentGcToComplete here since only
- // concurrent GC resumes threads before the GC is completed and this function
- // is only called within the GC daemon thread.
if (WaitForConcurrentGcToComplete(self) == kGcTypeNone) {
// Start a concurrent GC as one wasn't in progress
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
if (alloc_space_->Size() > min_alloc_space_size_for_sticky_gc_) {
- CollectGarbageInternal(kGcTypeSticky, false);
+ CollectGarbageInternal(kGcTypeSticky, kGcCauseBackground, false);
} else {
- CollectGarbageInternal(kGcTypePartial, false);
+ CollectGarbageInternal(kGcTypePartial, kGcCauseBackground, false);
}
}
}
@@ -1987,13 +1948,12 @@
// We could try mincore(2) but that's only a measure of how many pages we haven't given away,
// not how much use we're making of those pages.
uint64_t ms_time = NsToMs(NanoTime());
- {
- float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
- if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) {
- // Don't bother trimming the heap if it's more than 75% utilized, or if a
- // heap trim occurred in the last two seconds.
- return;
- }
+ float utilization =
+ static_cast<float>(alloc_space_->GetNumBytesAllocated()) / alloc_space_->Size();
+ if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) {
+ // Don't bother trimming the alloc space if it's more than 75% utilized, or if a
+ // heap trim occurred in the last two seconds.
+ return;
}
Thread* self = Thread::Current();
diff --git a/src/heap.h b/src/heap.h
index 76206c4..5fe491f 100644
--- a/src/heap.h
+++ b/src/heap.h
@@ -21,6 +21,7 @@
#include <string>
#include <vector>
+#include "atomic_integer.h"
#include "card_table.h"
#include "globals.h"
#include "gtest/gtest.h"
@@ -52,7 +53,7 @@
class Thread;
class TimingLogger;
-typedef std::vector<Space*> Spaces;
+typedef std::vector<ContinuousSpace*> Spaces;
// The ordering of the enum matters, it is used to determine which GCs are run first.
enum GcType {
@@ -69,6 +70,13 @@
};
std::ostream& operator<<(std::ostream& os, const GcType& policy);
+enum GcCause {
+ kGcCauseForAlloc,
+ kGcCauseBackground,
+ kGcCauseExplicit,
+};
+std::ostream& operator<<(std::ostream& os, const GcCause& policy);
+
class LOCKABLE Heap {
public:
static const size_t kInitialSize = 2 * MB;
@@ -244,7 +252,7 @@
// Functions for getting the bitmap which corresponds to an object's address.
// This is probably slow, TODO: use better data structure like binary tree .
- Space* FindSpaceFromObject(const Object*) const;
+ ContinuousSpace* FindSpaceFromObject(const Object*) const;
void DumpForSigQuit(std::ostream& os);
@@ -311,15 +319,16 @@
// Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
// which type of Gc was actually ran.
- GcType CollectGarbageInternal(GcType gc_plan, bool clear_soft_references)
+ GcType CollectGarbageInternal(GcType gc_plan, GcCause gc_cause, bool clear_soft_references)
LOCKS_EXCLUDED(gc_complete_lock_,
Locks::heap_bitmap_lock_,
Locks::mutator_lock_,
Locks::thread_suspend_count_lock_);
- void CollectGarbageMarkSweepPlan(Thread* self, GcType gc_plan, bool clear_soft_references)
+ void CollectGarbageMarkSweepPlan(Thread* self, GcType gc_plan, GcCause gc_cause,
+ bool clear_soft_references)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
- void CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_plan,
+ void CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_plan, GcCause gc_cause,
bool clear_soft_references)
LOCKS_EXCLUDED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
@@ -331,10 +340,7 @@
size_t GetPercentFree();
- void AddSpace(Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
-
- // Returns true if we "should" be able to allocate a number of bytes.
- bool CanAllocateBytes(size_t bytes) const;
+ void AddSpace(ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
// No thread saftey analysis since we call this everywhere and it is impossible to find a proper
// lock ordering for it.
@@ -386,6 +392,9 @@
// Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
volatile GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
+ // Maximum size that the heap can reach.
+ size_t growth_limit_;
+
// Bytes until concurrent GC starts.
volatile size_t concurrent_start_bytes_;
size_t concurrent_start_size_;
@@ -403,10 +412,7 @@
UniquePtr<LargeObjectSpace> large_object_space_;
// Number of bytes allocated. Adjusted after each allocation and free.
- volatile size_t num_bytes_allocated_;
-
- // Number of objects allocated. Adjusted after each allocation and free.
- volatile size_t num_objects_allocated_;
+ AtomicInteger num_bytes_allocated_;
// Heap verification flags.
const bool verify_missing_card_marks_;
diff --git a/src/image_test.cc b/src/image_test.cc
index 82b4760..c569b79 100644
--- a/src/image_test.cc
+++ b/src/image_test.cc
@@ -65,7 +65,7 @@
Heap* heap = Runtime::Current()->GetHeap();
ASSERT_EQ(1U, heap->GetSpaces().size());
- Space* space = heap->GetSpaces()[0];
+ ContinuousSpace* space = heap->GetSpaces().front();
ASSERT_FALSE(space->IsImageSpace());
ASSERT_TRUE(space != NULL);
ASSERT_TRUE(space->IsAllocSpace());
diff --git a/src/image_writer.cc b/src/image_writer.cc
index 33d37c7..c876329 100644
--- a/src/image_writer.cc
+++ b/src/image_writer.cc
@@ -136,12 +136,11 @@
}
bool ImageWriter::AllocMemory() {
- typedef std::vector<Space*> SpaceVec;
- const SpaceVec& spaces = Runtime::Current()->GetHeap()->GetSpaces();
+ const Spaces& spaces = Runtime::Current()->GetHeap()->GetSpaces();
size_t size = 0;
- for (SpaceVec::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
- if ((*cur)->IsAllocSpace()) {
- size += (*cur)->Size();
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ if ((*it)->IsAllocSpace()) {
+ size += (*it)->Size();
}
}
diff --git a/src/mark_sweep.cc b/src/mark_sweep.cc
index cdb73db..e9056f5 100644
--- a/src/mark_sweep.cc
+++ b/src/mark_sweep.cc
@@ -190,7 +190,7 @@
mark_sweep->CheckObject(root);
}
-void MarkSweep::CopyMarkBits(Space* space) {
+void MarkSweep::CopyMarkBits(ContinuousSpace* space) {
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
mark_bitmap->CopyFrom(live_bitmap);
@@ -212,21 +212,6 @@
MarkSweep* const mark_sweep_;
};
-// Marks all objects that are in images and have been touched by the mutator
-void MarkSweep::ScanDirtyImageRoots() {
- const Spaces& spaces = heap_->GetSpaces();
- CardTable* card_table = heap_->GetCardTable();
- ScanImageRootVisitor image_root_visitor(this);
- // TODO: C++ 0x auto
- for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- Space* space = *it;
- if (space->IsImageSpace()) {
- card_table->Scan(space->GetLiveBitmap(), space->Begin(), space->End(), image_root_visitor,
- IdentityFunctor());
- }
- }
-}
-
void MarkSweep::ScanGrayObjects(bool update_finger) {
const Spaces& spaces = heap_->GetSpaces();
CardTable* card_table = heap_->GetCardTable();
@@ -234,7 +219,7 @@
SetFingerVisitor finger_visitor(this);
// TODO: C++ 0x auto
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
byte* begin = space->Begin();
byte* end = space->End();
// Image spaces are handled properly since live == marked for them.
@@ -271,8 +256,8 @@
CheckBitmapVisitor visitor(this);
const Spaces& spaces = heap_->GetSpaces();
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- const Space* space = *it;
- if (space->IsImageSpace()) {
+ if ((*it)->IsImageSpace()) {
+ ImageSpace* space = (*it)->AsImageSpace();
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
SpaceBitmap* live_bitmap = space->GetLiveBitmap();
@@ -314,7 +299,7 @@
SetFingerVisitor set_finger_visitor(this);
ScanObjectVisitor scan_visitor(this);
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT ||
(!partial && space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT)
) {
@@ -544,7 +529,7 @@
} else if (!large_mark_objects->Test(obj)) {
++freed_large_objects;
size_t size = large_object_space->AllocationSize(obj);
- freed_bytes_ += size;
+ freed_bytes += size;
large_object_space->Free(obj);
}
}
@@ -574,7 +559,7 @@
scc.mark_sweep = this;
// TODO: C++0x auto
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
if (
space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT ||
(!partial && space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT)
@@ -603,7 +588,6 @@
void MarkSweep::SweepLargeObjects(bool swap_bitmaps) {
// Sweep large objects
LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
- MutexLock mu(large_object_space->GetLock());
SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
if (swap_bitmaps) {
@@ -612,17 +596,19 @@
SpaceSetMap::Objects& live_objects = large_live_objects->GetObjects();
// O(n*log(n)) but hopefully there are not too many large objects.
size_t freed_objects = 0;
+ size_t freed_bytes = 0;
// TODO: C++0x
for (SpaceSetMap::Objects::iterator it = live_objects.begin(); it != live_objects.end(); ++it) {
if (!large_mark_objects->Test(*it)) {
- freed_bytes_ += large_object_space->AllocationSize(*it);
+ freed_bytes += large_object_space->AllocationSize(*it);
large_object_space->Free(const_cast<Object*>(*it));
++freed_objects;
}
}
freed_objects_ += freed_objects;
+ freed_bytes_ += freed_bytes;
// Large objects don't count towards bytes_allocated.
- GetHeap()->RecordFree(freed_objects, 0);
+ GetHeap()->RecordFree(freed_objects, freed_bytes);
}
// Scans instance fields.
@@ -986,7 +972,6 @@
// Reset the marked large objects.
LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
- MutexLock mu(large_objects->GetLock());
large_objects->GetMarkObjects()->Clear();
}
diff --git a/src/mark_sweep.h b/src/mark_sweep.h
index 0de75c5..7ef6817 100644
--- a/src/mark_sweep.h
+++ b/src/mark_sweep.h
@@ -49,9 +49,6 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- // Marks the roots in the image space on dirty cards.
- void ScanDirtyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
// Verify that image roots point to only marked objects within the alloc space.
void VerifyImageRoots() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -65,7 +62,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Copies mark bits from live bitmap of ZygoteSpace to mark bitmap for partial GCs.
- void CopyMarkBits(Space* space);
+ void CopyMarkBits(ContinuousSpace* space);
// Builds a mark stack with objects on dirty cards and recursively mark
// until it empties.
diff --git a/src/mem_map.h b/src/mem_map.h
index c7744bb..1748a0e 100644
--- a/src/mem_map.h
+++ b/src/mem_map.h
@@ -72,7 +72,11 @@
}
byte* End() const {
- return begin_ + size_;
+ return Begin() + Size();
+ }
+
+ bool HasAddress(const void* addr) const {
+ return Begin() <= addr && addr < End();
}
// Trim by unmapping pages at the end of the map.
diff --git a/src/mod_union_table.cc b/src/mod_union_table.cc
index 83f67db..ee31ef6 100644
--- a/src/mod_union_table.cc
+++ b/src/mod_union_table.cc
@@ -104,12 +104,12 @@
// Create one heap bitmap per image space.
// TODO: C++0x auto
for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
- Space* space = *it;
+ ContinuousSpace* space = *it;
if (space->IsImageSpace()) {
// The mod-union table is only needed when we have an image space since it's purpose is to
// cache image roots.
UniquePtr<SpaceBitmap> bitmap(SpaceBitmap::Create("mod-union table bitmap", space->Begin(),
- space->Capacity()));
+ space->Size()));
CHECK(bitmap.get() != NULL) << "Failed to create mod-union bitmap";
bitmaps_.Put(space, bitmap.release());
}
@@ -120,7 +120,7 @@
STLDeleteValues(&bitmaps_);
}
-void ModUnionTableBitmap::ClearCards(Space* space) {
+void ModUnionTableBitmap::ClearCards(ContinuousSpace* space) {
CardTable* card_table = mark_sweep_->heap_->GetCardTable();
ModUnionClearCardVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this image space and update the corresponding mod-union bits.
@@ -135,7 +135,7 @@
uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card));
uintptr_t end = start + GC_CARD_SIZE;
- Space* space = heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start));
+ ContinuousSpace* space = heap_->FindSpaceFromObject(reinterpret_cast<Object*>(start));
SpaceBitmap* bitmap = space->GetLiveBitmap();
// Clear the mod-union bitmap range corresponding to this card so that we don't have any
@@ -169,7 +169,7 @@
// Some tests have no image space, and therefore no mod-union bitmap.
ModUnionScanImageRootVisitor image_root_scanner(GetMarkSweep());
for (BitmapMap::iterator cur = bitmaps_.begin(); cur != bitmaps_.end(); ++cur) {
- const Space* space = cur->first;
+ const ContinuousSpace* space = cur->first;
uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
cur->second->VisitMarkedRange(begin, end, image_root_scanner, IdentityFunctor());
@@ -186,7 +186,7 @@
}
-void ModUnionTableReferenceCache::ClearCards(Space* space) {
+void ModUnionTableReferenceCache::ClearCards(ContinuousSpace* space) {
CardTable* card_table = GetMarkSweep()->GetHeap()->GetCardTable();
ModUnionClearCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
@@ -257,8 +257,8 @@
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
Heap* heap = mod_union_table_->GetMarkSweep()->GetHeap();
if (mod_union_table_->AddReference(obj, ref) && references_.find(ref) == references_.end()) {
- Space* from_space = heap->FindSpaceFromObject(obj);
- Space* to_space = heap->FindSpaceFromObject(ref);
+ ContinuousSpace* from_space = heap->FindSpaceFromObject(obj);
+ ContinuousSpace* to_space = heap->FindSpaceFromObject(ref);
LOG(INFO) << "Object " << reinterpret_cast<const void*>(obj) << "(" << PrettyTypeOf(obj) << ")"
<< "References " << reinterpret_cast<const void*>(ref)
<< "(" << PrettyTypeOf(ref) << ") without being in mod-union table";
@@ -382,7 +382,7 @@
}
-void ModUnionTableCardCache::ClearCards(Space* space) {
+void ModUnionTableCardCache::ClearCards(ContinuousSpace* space) {
CardTable* card_table = GetMarkSweep()->GetHeap()->GetCardTable();
ModUnionClearCardSetVisitor visitor(&cleared_cards_);
// Clear dirty cards in the this space and update the corresponding mod-union bits.
diff --git a/src/mod_union_table.h b/src/mod_union_table.h
index 2e8bd0e..7629665 100644
--- a/src/mod_union_table.h
+++ b/src/mod_union_table.h
@@ -42,7 +42,7 @@
}
// Clear cards which map to a memory range of a space.
- virtual void ClearCards(Space* space) = 0;
+ virtual void ClearCards(ContinuousSpace* space) = 0;
// Update the mod-union table.
virtual void Update() = 0;
@@ -82,7 +82,7 @@
virtual ~ModUnionTableBitmap();
// Clear space cards.
- void ClearCards(Space* space);
+ void ClearCards(ContinuousSpace* space);
// Update table based on cleared cards.
void Update()
@@ -98,7 +98,7 @@
// One bitmap per image space.
// TODO: Add support for Zygote spaces?
- typedef SafeMap<Space*, SpaceBitmap*> BitmapMap;
+ typedef SafeMap<ContinuousSpace*, SpaceBitmap*> BitmapMap;
BitmapMap bitmaps_;
};
@@ -111,7 +111,7 @@
virtual ~ModUnionTableReferenceCache();
// Clear and store cards for a space.
- void ClearCards(Space* space);
+ void ClearCards(ContinuousSpace* space);
// Update table based on cleared cards.
void Update()
@@ -145,7 +145,7 @@
virtual ~ModUnionTableCardCache();
// Clear and store cards for a space.
- void ClearCards(Space* space);
+ void ClearCards(ContinuousSpace* space);
// Nothing to update.
void Update() {}
diff --git a/src/native/dalvik_system_VMRuntime.cc b/src/native/dalvik_system_VMRuntime.cc
index f37b237..7184e48 100644
--- a/src/native/dalvik_system_VMRuntime.cc
+++ b/src/native/dalvik_system_VMRuntime.cc
@@ -160,7 +160,7 @@
uint64_t start_ns = NanoTime();
AllocSpace* alloc_space = heap->GetAllocSpace();
size_t alloc_space_size = alloc_space->Size();
- float utilization = static_cast<float>(heap->GetBytesAllocated()) / alloc_space_size;
+ float utilization = static_cast<float>(alloc_space->GetNumBytesAllocated()) / alloc_space_size;
Thread* self = static_cast<JNIEnvExt*>(env)->self;
heap->Trim(self);
// Trim the native heap.
diff --git a/src/oatdump.cc b/src/oatdump.cc
index ab7f277..7e14199 100644
--- a/src/oatdump.cc
+++ b/src/oatdump.cc
@@ -581,9 +581,13 @@
}
ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
// TODO: C++0x auto
- for (Spaces::const_iterator cur = spaces.begin(); cur != spaces.end(); ++cur) {
- (*cur)->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
- os_ << "\n";
+ for (Spaces::const_iterator it = spaces.begin(); it != spaces.end(); ++it) {
+ Space* space = *it;
+ if (space->IsImageSpace()) {
+ ImageSpace* image_space = space->AsImageSpace();
+ image_space->GetLiveBitmap()->Walk(ImageDumper::Callback, this);
+ os_ << "\n";
+ }
}
// Dump the large objects separately.
heap->GetLargeObjectsSpace()->GetLiveObjects()->Walk(ImageDumper::Callback, this);
diff --git a/src/space.cc b/src/space.cc
index 434c39e..74e719a 100644
--- a/src/space.cc
+++ b/src/space.cc
@@ -30,8 +30,11 @@
#ifndef NDEBUG
#define DEBUG_SPACES 1
+#else
+#define DEBUG_SPACES 0
#endif
+// TODO: Remove define macro
#define CHECK_MEMORY_CALL(call, args, what) \
do { \
int rc = call args; \
@@ -41,19 +44,43 @@
} \
} while (false)
+Space::Space(const std::string& name, GcRetentionPolicy gc_retention_policy)
+ : name_(name),
+ gc_retention_policy_(gc_retention_policy) {
+
+}
+
+ContinuousSpace::ContinuousSpace(const std::string& name, byte* begin, byte* end,
+ GcRetentionPolicy gc_retention_policy)
+ : Space(name, gc_retention_policy),
+ begin_(begin),
+ end_(end) {
+
+}
+
+MemMapSpace::MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
+ GcRetentionPolicy gc_retention_policy)
+ : ContinuousSpace(name, mem_map->Begin(), mem_map->Begin() + initial_size, gc_retention_policy),
+ mem_map_(mem_map)
+{
+
+}
+
size_t AllocSpace::bitmap_index_ = 0;
-AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
- size_t growth_limit)
- : Space(name, mem_map, begin, end, GCRP_ALWAYS_COLLECT),
+AllocSpace::AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
+ byte* end, size_t growth_limit)
+ : MemMapSpace(name, mem_map, end - begin, GCRP_ALWAYS_COLLECT),
+ num_bytes_allocated_(0), num_objects_allocated_(0),
lock_("allocation space lock", kAllocSpaceLock), mspace_(mspace),
growth_limit_(growth_limit) {
CHECK(mspace != NULL);
size_t bitmap_index = bitmap_index_++;
- DCHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % static_cast<uintptr_t>GC_CARD_SIZE == 0);
- DCHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % static_cast<uintptr_t>GC_CARD_SIZE == 0);
+ static const uintptr_t kGcCardSize = static_cast<uintptr_t>(GC_CARD_SIZE);
+ CHECK(reinterpret_cast<uintptr_t>(mem_map->Begin()) % kGcCardSize == 0);
+ CHECK(reinterpret_cast<uintptr_t>(mem_map->End()) % kGcCardSize == 0);
live_bitmap_.reset(SpaceBitmap::Create(
StringPrintf("allocspace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
@@ -66,9 +93,8 @@
DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index;
}
-AllocSpace* Space::CreateAllocSpace(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity,
- byte* requested_begin) {
+AllocSpace* AllocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin) {
// Memory we promise to dlmalloc before it asks for morecore.
// Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
// will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
@@ -127,7 +153,8 @@
// Everything is set so record in immutable structure and leave
MemMap* mem_map_ptr = mem_map.release();
- AllocSpace* space = new AllocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end, growth_limit);
+ AllocSpace* space = new AllocSpace(name, mem_map_ptr, mspace, mem_map_ptr->Begin(), end,
+ growth_limit);
if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
<< " ) " << *space;
@@ -135,13 +162,6 @@
return space;
}
-LargeObjectSpace* Space::CreateLargeObjectSpace(const std::string& name) {
- if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
- VLOG(startup) << "Space::CreateLargeObjectSpace entering " << name;
- }
- return new LargeObjectSpace(name);
-}
-
void* AllocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
// clear errno to allow PLOG on error
errno = 0;
@@ -170,12 +190,12 @@
Object* AllocSpace::AllocWithoutGrowthLocked(size_t num_bytes) {
Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
-#if DEBUG_SPACES
- if (result != NULL) {
+ if (DEBUG_SPACES && result != NULL) {
CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
<< ") not in bounds of allocation space " << *this;
}
-#endif
+ num_bytes_allocated_ += AllocationSize(result);
+ ++num_objects_allocated_;
return result;
}
@@ -196,9 +216,7 @@
mspace_set_footprint_limit(mspace_, footprint);
// Return the new allocation or NULL.
Object* result = reinterpret_cast<Object*>(ptr);
-#if DEBUG_SPACES
- CHECK(result == NULL || Contains(result));
-#endif
+ CHECK(!DEBUG_SPACES || result == NULL || Contains(result));
return result;
}
@@ -220,7 +238,7 @@
// Trim the heap so that we minimize the size of the Zygote space.
Trim();
// Trim our mem-map to free unused pages.
- mem_map_->UnMapAtEnd(end_);
+ GetMemMap()->UnMapAtEnd(end_);
// TODO: Not hardcode these in?
const size_t starting_size = kPageSize;
const size_t initial_size = 2 * MB;
@@ -237,10 +255,10 @@
// FIXME: Do we need reference counted pointers here?
// Make the two spaces share the same mark bitmaps since the bitmaps span both of the spaces.
VLOG(heap) << "Creating new AllocSpace: ";
- VLOG(heap) << "Size " << mem_map_->Size();
+ VLOG(heap) << "Size " << GetMemMap()->Size();
VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
VLOG(heap) << "Capacity " << PrettySize(capacity);
- UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name_.c_str(), end_, capacity, PROT_READ | PROT_WRITE));
+ UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(GetName().c_str(), End(), capacity, PROT_READ | PROT_WRITE));
void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
// Protect memory beyond the initial size.
byte* end = mem_map->Begin() + starting_size;
@@ -248,10 +266,10 @@
CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name_.c_str());
}
AllocSpace* alloc_space = new AllocSpace(name_, mem_map.release(), mspace, end_, end, growth_limit);
- live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
- CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(end_));
- mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(end_));
- CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(end_));
+ live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+ CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
+ mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End()));
+ CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End()));
name_ += "-zygote-transformed";
VLOG(heap) << "zygote space creation done";
return alloc_space;
@@ -259,29 +277,35 @@
void AllocSpace::Free(Object* ptr) {
MutexLock mu(lock_);
-#if DEBUG_SPACES
- CHECK(ptr != NULL);
- CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
-#endif
+ if (DEBUG_SPACES) {
+ CHECK(ptr != NULL);
+ CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
+ }
+ num_bytes_allocated_ -= AllocationSize(ptr);
+ --num_objects_allocated_;
mspace_free(mspace_, ptr);
}
void AllocSpace::FreeList(size_t num_ptrs, Object** ptrs) {
MutexLock mu(lock_);
-#if DEBUG_SPACES
- CHECK(ptrs != NULL);
- size_t num_broken_ptrs = 0;
- for (size_t i = 0; i < num_ptrs; i++) {
- if (!Contains(ptrs[i])) {
- num_broken_ptrs++;
- LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
- } else {
- size_t size = mspace_usable_size(ptrs[i]);
- memset(ptrs[i], 0xEF, size);
+ if (DEBUG_SPACES) {
+ CHECK(ptrs != NULL);
+ size_t num_broken_ptrs = 0;
+ for (size_t i = 0; i < num_ptrs; i++) {
+ if (!Contains(ptrs[i])) {
+ num_broken_ptrs++;
+ LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
+ } else {
+ size_t size = mspace_usable_size(ptrs[i]);
+ memset(ptrs[i], 0xEF, size);
+ }
}
+ CHECK_EQ(num_broken_ptrs, 0u);
}
- CHECK_EQ(num_broken_ptrs, 0u);
-#endif
+ for (size_t i = 0; i < num_ptrs; i++) {
+ num_bytes_allocated_ -= AllocationSize(ptrs[i]);
+ }
+ num_objects_allocated_ -= num_ptrs;
mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
}
@@ -304,7 +328,7 @@
// by mspace_set_footprint_limit.
CHECK_LE(new_end, Begin() + Capacity());
#endif
- CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetSpaceName());
+ CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetName());
} else {
#if DEBUG_SPACES
// Should never be asked for negative footprint (ie before begin)
@@ -317,8 +341,8 @@
// removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
// likely just a useful debug feature.
size_t size = -increment;
- CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetSpaceName());
- CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetSpaceName());
+ CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetName());
+ CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetName());
}
// Update end_
end_ = new_end;
@@ -327,7 +351,8 @@
}
size_t AllocSpace::AllocationSize(const Object* obj) {
- return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) + kChunkOverhead;
+ return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) +
+ kChunkOverhead;
}
void MspaceMadviseCallback(void* start, void* end, size_t used_bytes, void* /* arg */) {
@@ -380,7 +405,7 @@
size_t ImageSpace::bitmap_index_ = 0;
ImageSpace::ImageSpace(const std::string& name, MemMap* mem_map)
- : Space(name, mem_map, mem_map->Begin(), mem_map->End(), GCRP_NEVER_COLLECT) {
+ : MemMapSpace(name, mem_map, mem_map->Size(), GCRP_NEVER_COLLECT) {
const size_t bitmap_index = bitmap_index_++;
live_bitmap_.reset(SpaceBitmap::Create(
StringPrintf("imagespace-%s-live-bitmap-%d", name.c_str(), static_cast<int>(bitmap_index)),
@@ -388,7 +413,7 @@
DCHECK(live_bitmap_.get() != NULL) << "could not create imagespace live bitmap #" << bitmap_index;
}
-ImageSpace* Space::CreateImageSpace(const std::string& image_file_name) {
+ImageSpace* ImageSpace::Create(const std::string& image_file_name) {
CHECK(!image_file_name.empty());
uint64_t start_time = 0;
@@ -478,24 +503,27 @@
}
std::ostream& operator<<(std::ostream& os, const Space& space) {
- os << (space.IsImageSpace() ? "Image" : "Alloc") << "Space["
- << "begin=" << reinterpret_cast<void*>(space.Begin())
- << ",end=" << reinterpret_cast<void*>(space.End())
- << ",size=" << PrettySize(space.Size()) << ",capacity=" << PrettySize(space.Capacity())
- << ",name=\"" << space.GetSpaceName() << "\"]";
+ space.Dump(os);
return os;
}
-LargeObjectSpace::LargeObjectSpace(const std::string& name)
- : Space(name, NULL, NULL, NULL, GCRP_ALWAYS_COLLECT),
- lock_("large object space lock", kAllocSpaceLock),
- num_bytes_allocated_(0) {
- live_objects_.reset(new SpaceSetMap("large live objects"));
- mark_objects_.reset(new SpaceSetMap("large marked objects"));
+void AllocSpace::Dump(std::ostream& os) const {
+ os << GetType()
+ << "begin=" << reinterpret_cast<void*>(Begin())
+ << ",end=" << reinterpret_cast<void*>(End())
+ << ",size=" << PrettySize(Size()) << ",capacity=" << PrettySize(Capacity())
+ << ",name=\"" << GetName() << "\"]";
+}
+
+void ImageSpace::Dump(std::ostream& os) const {
+ os << GetType()
+ << "begin=" << reinterpret_cast<void*>(Begin())
+ << ",end=" << reinterpret_cast<void*>(End())
+ << ",size=" << PrettySize(Size())
+ << ",name=\"" << GetName() << "\"]";
}
void LargeObjectSpace::SwapBitmaps() {
- MutexLock mu(lock_);
SpaceSetMap* temp_live_objects = live_objects_.release();
live_objects_.reset(mark_objects_.release());
mark_objects_.reset(temp_live_objects);
@@ -505,7 +533,37 @@
mark_objects_->SetName(temp_name);
}
-Object* LargeObjectSpace::Alloc(size_t num_bytes) {
+DiscontinuousSpace::DiscontinuousSpace(const std::string& name,
+ GcRetentionPolicy gc_retention_policy)
+ : Space(name, gc_retention_policy) {
+
+}
+
+LargeObjectSpace::LargeObjectSpace(const std::string& name)
+ : DiscontinuousSpace(name, GCRP_ALWAYS_COLLECT),
+ num_bytes_allocated_(0),
+ num_objects_allocated_(0) {
+ live_objects_.reset(new SpaceSetMap("large live objects"));
+ mark_objects_.reset(new SpaceSetMap("large marked objects"));
+}
+
+
+void LargeObjectSpace::CopyLiveToMarked() {
+ mark_objects_->CopyFrom(*live_objects_.get());
+}
+
+LargeObjectMapSpace::LargeObjectMapSpace(const std::string& name)
+ : LargeObjectSpace(name),
+ lock_("large object space lock", kAllocSpaceLock)
+{
+
+}
+
+LargeObjectMapSpace* LargeObjectMapSpace::Create(const std::string& name) {
+ return new LargeObjectMapSpace(name);
+}
+
+Object* LargeObjectMapSpace::Alloc(size_t num_bytes) {
MutexLock mu(lock_);
MemMap* mem_map = MemMap::MapAnonymous("allocation", NULL, num_bytes, PROT_READ | PROT_WRITE);
if (mem_map == NULL) {
@@ -515,31 +573,29 @@
large_objects_.push_back(obj);
mem_maps_.Put(obj, mem_map);
num_bytes_allocated_ += mem_map->Size();
+ ++num_objects_allocated_;
return obj;
}
-void LargeObjectSpace::CopyLiveToMarked() {
- mark_objects_->CopyFrom(*live_objects_.get());
-}
-
-void LargeObjectSpace::Free(Object* ptr) {
+void LargeObjectMapSpace::Free(Object* ptr) {
MutexLock mu(lock_);
MemMaps::iterator found = mem_maps_.find(ptr);
CHECK(found != mem_maps_.end()) << "Attempted to free large object which was not live";
DCHECK_GE(num_bytes_allocated_, found->second->Size());
num_bytes_allocated_ -= found->second->Size();
+ --num_objects_allocated_;
delete found->second;
mem_maps_.erase(found);
}
-size_t LargeObjectSpace::AllocationSize(const Object* obj) {
+size_t LargeObjectMapSpace::AllocationSize(const Object* obj) {
MutexLock mu(lock_);
MemMaps::iterator found = mem_maps_.find(const_cast<Object*>(obj));
CHECK(found != mem_maps_.end()) << "Attempted to get size of a large object which is not live";
return found->second->Size();
}
-void LargeObjectSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
+void LargeObjectMapSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
MutexLock mu(lock_);
for (MemMaps::iterator it = mem_maps_.begin(); it != mem_maps_.end(); ++it) {
MemMap* mem_map = it->second;
@@ -548,4 +604,155 @@
}
}
+bool LargeObjectMapSpace::Contains(const Object* obj) const {
+ MutexLock mu(const_cast<Mutex&>(lock_));
+ return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
+}
+
+FreeListSpace* FreeListSpace::Create(const std::string& name, size_t size) {
+ CHECK(size % kAlignment == 0);
+ MemMap* mem_map = MemMap::MapAnonymous(name.c_str(), NULL, size, PROT_READ | PROT_WRITE);
+ CHECK(mem_map != NULL) << "Failed to allocate large object space mem map";
+ return new FreeListSpace(name, mem_map, mem_map->Begin(), mem_map->End());
+}
+
+FreeListSpace::FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end)
+ : LargeObjectSpace(name),
+ begin_(begin),
+ end_(end),
+ mem_map_(mem_map),
+ lock_("free list space lock", kAllocSpaceLock) {
+ chunks_.resize(Size() / kAlignment + 1);
+ // Add a dummy chunk so we don't need to handle chunks having no next chunk.
+ chunks_.back().SetSize(kAlignment, false);
+ // Start out with one large free chunk.
+ AddFreeChunk(begin_, end_ - begin_, NULL);
+}
+
+FreeListSpace::~FreeListSpace() {
+
+}
+
+void FreeListSpace::AddFreeChunk(void* address, size_t size, Chunk* previous) {
+ Chunk* chunk = ChunkFromAddr(address);
+ chunk->SetSize(size, true);
+ chunk->SetPrevious(previous);
+ Chunk* next_chunk = GetNextChunk(chunk);
+ next_chunk->SetPrevious(chunk);
+ free_chunks_.insert(chunk);
+}
+
+FreeListSpace::Chunk* FreeListSpace::ChunkFromAddr(void* address) {
+ size_t offset = reinterpret_cast<byte*>(address) - Begin();
+ DCHECK(IsAligned<kAlignment>(offset));
+ DCHECK_LT(offset, Size());
+ return &chunks_[offset / kAlignment];
+}
+
+void* FreeListSpace::AddrFromChunk(Chunk* chunk) {
+ return reinterpret_cast<void*>(Begin() + (chunk - &chunks_.front()) * kAlignment);
+}
+
+void FreeListSpace::RemoveFreeChunk(Chunk* chunk) {
+ // TODO: C++0x
+ // TODO: Improve performance, this might be slow.
+ std::pair<FreeChunks::iterator, FreeChunks::iterator> range = free_chunks_.equal_range(chunk);
+ for (FreeChunks::iterator it = range.first; it != range.second; ++it) {
+ if (*it == chunk) {
+ free_chunks_.erase(it);
+ return;
+ }
+ }
+}
+
+void FreeListSpace::Walk(AllocSpace::WalkCallback callback, void* arg) {
+ MutexLock mu(lock_);
+ for (Chunk* chunk = &chunks_.front(); chunk < &chunks_.back(); ) {
+ if (!chunk->IsFree()) {
+ size_t size = chunk->GetSize();
+ void* begin = AddrFromChunk(chunk);
+ void* end = reinterpret_cast<void*>(reinterpret_cast<byte*>(begin) + size);
+ callback(begin, end, size, arg);
+ callback(NULL, NULL, 0, arg);
+ }
+ chunk = GetNextChunk(chunk);
+ }
+}
+
+void FreeListSpace::Free(Object* obj) {
+ MutexLock mu(lock_);
+ CHECK(Contains(obj));
+ // Check adjacent chunks to see if we need to combine.
+ Chunk* chunk = ChunkFromAddr(obj);
+ CHECK(!chunk->IsFree());
+
+ size_t allocation_size = chunk->GetSize();
+ madvise(obj, allocation_size, MADV_DONTNEED);
+ num_objects_allocated_--;
+ num_bytes_allocated_ -= allocation_size;
+ Chunk* prev = chunk->GetPrevious();
+ Chunk* next = GetNextChunk(chunk);
+
+ // Combine any adjacent free chunks
+ size_t extra_size = chunk->GetSize();
+ if (next->IsFree()) {
+ extra_size += next->GetSize();
+ RemoveFreeChunk(next);
+ }
+ if (prev != NULL && prev->IsFree()) {
+ RemoveFreeChunk(prev);
+ AddFreeChunk(AddrFromChunk(prev), prev->GetSize() + extra_size, prev->GetPrevious());
+ } else {
+ AddFreeChunk(AddrFromChunk(chunk), extra_size, prev);
+ }
+}
+
+bool FreeListSpace::Contains(const Object* obj) const {
+ return mem_map_->HasAddress(obj);
+}
+
+FreeListSpace::Chunk* FreeListSpace::GetNextChunk(Chunk* chunk) {
+ return chunk + chunk->GetSize() / kAlignment;
+}
+
+size_t FreeListSpace::AllocationSize(const Object* obj) {
+ Chunk* chunk = ChunkFromAddr(const_cast<Object*>(obj));
+ CHECK(!chunk->IsFree());
+ return chunk->GetSize();
+}
+
+Object* FreeListSpace::Alloc(size_t num_bytes) {
+ MutexLock mu(lock_);
+ num_bytes = RoundUp(num_bytes, kAlignment);
+ Chunk temp;
+ temp.SetSize(num_bytes);
+ // Find the smallest chunk at least num_bytes in size.
+ FreeChunks::iterator found = free_chunks_.lower_bound(&temp);
+ if (found == free_chunks_.end()) {
+ // Out of memory, or too much fragmentation.
+ return NULL;
+ }
+ Chunk* chunk = *found;
+ free_chunks_.erase(found);
+ CHECK(chunk->IsFree());
+ void* addr = AddrFromChunk(chunk);
+ size_t chunk_size = chunk->GetSize();
+ chunk->SetSize(num_bytes);
+ if (chunk_size > num_bytes) {
+ // Split the chunk into two chunks.
+ Chunk* new_chunk = GetNextChunk(chunk);
+ AddFreeChunk(AddrFromChunk(new_chunk), chunk_size - num_bytes, chunk);
+ }
+
+ num_objects_allocated_++;
+ num_bytes_allocated_ += num_bytes;
+ return reinterpret_cast<Object*>(addr);
+}
+
+void FreeListSpace::FreeList(size_t num_ptrs, Object** ptrs) {
+ for (size_t i = 0; i < num_ptrs; ++i) {
+ Free(ptrs[i]);
+ }
+}
+
} // namespace art
diff --git a/src/space.h b/src/space.h
index d6c7f98..9f03904 100644
--- a/src/space.h
+++ b/src/space.h
@@ -42,63 +42,26 @@
};
std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy);
+enum SpaceType {
+ kSpaceTypeImageSpace,
+ kSpaceTypeAllocSpace,
+ kSpaceTypeZygoteSpace,
+ kSpaceTypeLargeObjectSpace,
+};
+std::ostream& operator<<(std::ostream& os, const SpaceType& space_type);
+
// A space contains memory allocated for managed objects.
class Space {
public:
- // Create a AllocSpace with the requested sizes. The requested
- // base address is not guaranteed to be granted, if it is required,
- // the caller should call Begin on the returned space to confirm
- // the request was granted.
- static AllocSpace* CreateAllocSpace(const std::string& name, size_t initial_size,
- size_t growth_limit, size_t capacity,
- byte* requested_begin);
+ virtual bool CanAllocateInto() const = 0;
+ virtual bool IsCompactible() const = 0;
+ virtual bool Contains(const Object* obj) const = 0;
+ virtual SpaceType GetType() const = 0;
- // Creates a large object space. Allocations into the large object space use mem maps instead of
- // malloc.
- static LargeObjectSpace* CreateLargeObjectSpace(const std::string& name);
-
- // create a Space from an image file. cannot be used for future allocation or collected.
- static ImageSpace* CreateImageSpace(const std::string& image)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
- virtual ~Space() {}
-
- const std::string& GetSpaceName() const {
+ const std::string& GetName() const {
return name_;
}
- // Address at which the space begins
- byte* Begin() const {
- return begin_;
- }
-
- // Address at which the space ends, which may vary as the space is filled
- byte* End() const {
- return end_;
- }
-
- // Is object within this space?
- bool Contains(const Object* obj) const {
- const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
- return Begin() <= byte_ptr && byte_ptr < End();
- }
-
- // Current size of space
- size_t Size() const {
- return End() - Begin();
- }
-
- // Maximum size of space
- virtual size_t Capacity() const {
- return mem_map_->Size();
- }
-
- // Size of the space without a limit on its growth. By default this is just the Capacity, but
- // for the allocation space we support starting with a small heap and then extending it.
- virtual size_t NonGrowthLimitCapacity() const {
- return Capacity();
- }
-
GcRetentionPolicy GetGcRetentionPolicy() const {
return gc_retention_policy_;
}
@@ -108,77 +71,185 @@
}
ImageSpace* AsImageSpace() {
- DCHECK(IsImageSpace());
+ DCHECK_EQ(GetType(), kSpaceTypeImageSpace);
return down_cast<ImageSpace*>(this);
}
AllocSpace* AsAllocSpace() {
- DCHECK(IsAllocSpace());
+ DCHECK_EQ(GetType(), kSpaceTypeAllocSpace);
+ return down_cast<AllocSpace*>(this);
+ }
+
+ AllocSpace* AsZygoteSpace() {
+ DCHECK_EQ(GetType(), kSpaceTypeZygoteSpace);
return down_cast<AllocSpace*>(this);
}
LargeObjectSpace* AsLargeObjectSpace() {
- DCHECK(IsLargeObjectSpace());
+ DCHECK_EQ(GetType(), kSpaceTypeLargeObjectSpace);
return down_cast<LargeObjectSpace*>(this);
}
- virtual bool IsAllocSpace() const = 0;
- virtual bool IsImageSpace() const = 0;
- virtual bool IsZygoteSpace() const = 0;
- virtual bool IsLargeObjectSpace() const = 0;
-
- virtual SpaceBitmap* GetLiveBitmap() const = 0;
- virtual SpaceBitmap* GetMarkBitmap() const = 0;
-
- const std::string GetName() const {
- return name_;
+ bool IsImageSpace() const {
+ return GetType() == kSpaceTypeImageSpace;
}
+ bool IsAllocSpace() const {
+ return GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace;
+ }
+
+ bool IsZygoteSpace() const {
+ return GetType() == kSpaceTypeZygoteSpace;
+ }
+
+ bool IsLargeObjectSpace() const {
+ return GetType() == kSpaceTypeLargeObjectSpace;
+ }
+
+ virtual void Dump(std::ostream& /* os */) const { }
+
+ virtual ~Space() {}
+
protected:
- Space(const std::string& name, MemMap* mem_map, byte* begin, byte* end,
- GcRetentionPolicy gc_retention_policy)
- : name_(name),
- mem_map_(mem_map),
- begin_(begin),
- end_(end),
- gc_retention_policy_(gc_retention_policy) {}
+ Space(const std::string& name, GcRetentionPolicy gc_retention_policy);
+ // Name of the space.
std::string name_;
- // Underlying storage of the space
- UniquePtr<MemMap> mem_map_;
-
- // The beginning of the storage for fast access (always equals mem_map_->GetAddress())
- byte* const begin_;
-
- // Current end of the space.
- byte* end_;
-
// Garbage collection retention policy, used to figure out when we should sweep over this space.
GcRetentionPolicy gc_retention_policy_;
+ private:
DISALLOW_COPY_AND_ASSIGN(Space);
};
+// Continuous spaces have bitmaps, and an address range.
+class ContinuousSpace : public Space {
+ public:
+ // Address at which the space begins
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
+ // Current size of space
+ size_t Size() const {
+ return End() - Begin();
+ }
+
+ virtual SpaceBitmap* GetLiveBitmap() const = 0;
+ virtual SpaceBitmap* GetMarkBitmap() const = 0;
+
+ // Is object within this space?
+ bool HasAddress(const Object* obj) const {
+ const byte* byte_ptr = reinterpret_cast<const byte*>(obj);
+ return Begin() <= byte_ptr && byte_ptr < End();
+ }
+
+ virtual bool Contains(const Object* obj) const {
+ return HasAddress(obj);
+ }
+
+ virtual ~ContinuousSpace() {}
+
+ protected:
+ ContinuousSpace(const std::string& name, byte* begin, byte* end,
+ GcRetentionPolicy gc_retention_policy);
+
+ // The beginning of the storage for fast access.
+ byte* begin_;
+
+ // Current end of the space.
+ byte* end_;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(ContinuousSpace);
+};
+
+class DiscontinuousSpace : public Space {
+ public:
+ // Is object within this space?
+ virtual bool Contains(const Object* obj) const = 0;
+
+protected:
+ DiscontinuousSpace(const std::string& name, GcRetentionPolicy gc_retention_policy);
+
+private:
+ DISALLOW_COPY_AND_ASSIGN(DiscontinuousSpace);
+};
+
std::ostream& operator<<(std::ostream& os, const Space& space);
+class MemMapSpace : public ContinuousSpace {
+ public:
+ // Maximum which the mapped space can grow to.
+ virtual size_t Capacity() const {
+ return mem_map_->Size();
+ }
+
+ // Size of the space without a limit on its growth. By default this is just the Capacity, but
+ // for the allocation space we support starting with a small heap and then extending it.
+ virtual size_t NonGrowthLimitCapacity() const {
+ return Capacity();
+ }
+
+ protected:
+ MemMapSpace(const std::string& name, MemMap* mem_map, size_t initial_size,
+ GcRetentionPolicy gc_retention_policy);
+
+ MemMap* GetMemMap() {
+ return mem_map_.get();
+ }
+
+ const MemMap* GetMemMap() const {
+ return mem_map_.get();
+ }
+
+ private:
+ // Underlying storage of the space
+ UniquePtr<MemMap> mem_map_;
+
+ DISALLOW_COPY_AND_ASSIGN(MemMapSpace);
+};
+
// An alloc space is a space where objects may be allocated and garbage collected.
-class AllocSpace : public Space {
+class AllocSpace : public MemMapSpace {
public:
typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg);
+ virtual bool CanAllocateInto() const {
+ return true;
+ }
+
+ virtual bool IsCompactible() const {
+ return false;
+ }
+
+ virtual SpaceType GetType() const {
+ return kSpaceTypeAllocSpace;
+ }
+
+ // Create a AllocSpace with the requested sizes. The requested
+ // base address is not guaranteed to be granted, if it is required,
+ // the caller should call Begin on the returned space to confirm
+ // the request was granted.
+ static AllocSpace* Create(const std::string& name, size_t initial_size, size_t growth_limit,
+ size_t capacity, byte* requested_begin);
+
// Allocate num_bytes without allowing the underlying mspace to grow.
- Object* AllocWithGrowth(size_t num_bytes);
+ virtual Object* AllocWithGrowth(size_t num_bytes);
// Allocate num_bytes allowing the underlying mspace to grow.
- Object* AllocWithoutGrowth(size_t num_bytes);
+ virtual Object* AllocWithoutGrowth(size_t num_bytes);
// Return the storage space required by obj.
- size_t AllocationSize(const Object* obj);
-
- void Free(Object* ptr);
-
- void FreeList(size_t num_ptrs, Object** ptrs);
+ virtual size_t AllocationSize(const Object* obj);
+ virtual void Free(Object* ptr);
+ virtual void FreeList(size_t num_ptrs, Object** ptrs);
void* MoreCore(intptr_t increment);
@@ -214,23 +285,7 @@
// The total amount of memory reserved for the alloc space
virtual size_t NonGrowthLimitCapacity() const {
- return mem_map_->End() - mem_map_->Begin();
- }
-
- virtual bool IsAllocSpace() const {
- return gc_retention_policy_ != GCRP_NEVER_COLLECT;
- }
-
- virtual bool IsImageSpace() const {
- return false;
- }
-
- virtual bool IsLargeObjectSpace() const {
- return false;
- }
-
- virtual bool IsZygoteSpace() const {
- return gc_retention_policy_ == GCRP_FULL_COLLECT;
+ return GetMemMap()->Size();
}
virtual SpaceBitmap* GetLiveBitmap() const {
@@ -241,6 +296,8 @@
return mark_bitmap_.get();
}
+ virtual void Dump(std::ostream& os) const;
+
void SetGrowthLimit(size_t growth_limit);
// Swap the live and mark bitmaps of this space. This is used by the GC for concurrent sweeping.
@@ -249,13 +306,24 @@
// Turn ourself into a zygote space and return a new alloc space which has our unused memory.
AllocSpace* CreateZygoteSpace();
+ size_t GetNumBytesAllocated() const {
+ return num_bytes_allocated_;
+ }
+
+ size_t GetNumObjectsAllocated() const {
+ return num_objects_allocated_;
+ }
+
private:
Object* AllocWithoutGrowthLocked(size_t num_bytes) EXCLUSIVE_LOCKS_REQUIRED(lock_);
- friend class Space;
-
UniquePtr<SpaceBitmap> live_bitmap_;
UniquePtr<SpaceBitmap> mark_bitmap_;
+
+ // Approximate number of bytes which have been allocated into the space.
+ size_t num_bytes_allocated_;
+ size_t num_objects_allocated_;
+
static size_t bitmap_index_;
AllocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin, byte* end,
@@ -287,36 +355,36 @@
};
// An image space is a space backed with a memory mapped image
-class ImageSpace : public Space {
+class ImageSpace : public MemMapSpace {
public:
+ virtual bool CanAllocateInto() const {
+ return false;
+ }
+
+ virtual bool IsCompactible() const {
+ return false;
+ }
+
+ virtual SpaceType GetType() const {
+ return kSpaceTypeImageSpace;
+ }
+
+ // create a Space from an image file. cannot be used for future allocation or collected.
+ static ImageSpace* Create(const std::string& image)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
const ImageHeader& GetImageHeader() const {
return *reinterpret_cast<ImageHeader*>(Begin());
}
const std::string& GetImageFilename() const {
- return name_;
+ return GetName();
}
// Mark the objects defined in this space in the given live bitmap
void RecordImageAllocations(SpaceBitmap* live_bitmap) const
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- virtual bool IsAllocSpace() const {
- return false;
- }
-
- virtual bool IsImageSpace() const {
- return true;
- }
-
- virtual bool IsZygoteSpace() const {
- return false;
- }
-
- virtual bool IsLargeObjectSpace() const {
- return false;
- }
-
virtual SpaceBitmap* GetLiveBitmap() const {
return live_bitmap_.get();
}
@@ -327,6 +395,8 @@
return live_bitmap_.get();
}
+ virtual void Dump(std::ostream& os) const;
+
private:
friend class Space;
@@ -338,32 +408,18 @@
DISALLOW_COPY_AND_ASSIGN(ImageSpace);
};
-class LargeObjectSpace : public Space {
+class LargeObjectSpace : public DiscontinuousSpace {
public:
- virtual bool IsAllocSpace() const {
- return false;
- }
-
- virtual bool IsImageSpace() const {
- return false;
- }
-
- virtual bool IsZygoteSpace() const {
- return false;
- }
-
- virtual bool IsLargeObjectSpace() const {
+ virtual bool CanAllocateInto() const {
return true;
}
- virtual SpaceBitmap* GetLiveBitmap() const {
- LOG(FATAL) << "Unimplemented";
- return NULL;
+ virtual bool IsCompactible() const {
+ return true;
}
- virtual SpaceBitmap* GetMarkBitmap() const {
- LOG(FATAL) << "Unimplemented";
- return NULL;
+ virtual SpaceType GetType() const {
+ return kSpaceTypeLargeObjectSpace;
}
virtual SpaceSetMap* GetLiveObjects() const {
@@ -374,43 +430,142 @@
return mark_objects_.get();
}
- // Return the storage space required by obj.
- size_t AllocationSize(const Object* obj);
-
virtual void SwapBitmaps();
virtual void CopyLiveToMarked();
- Object* Alloc(size_t num_bytes);
- void Free(Object* ptr);
- void Walk(AllocSpace::WalkCallback, void* arg);
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const Object* obj) = 0;
+ virtual Object* Alloc(size_t num_bytes) = 0;
+ virtual void Free(Object* ptr) = 0;
+ virtual void Walk(AllocSpace::WalkCallback, void* arg) = 0;
+
+ virtual ~LargeObjectSpace() {}
+
size_t GetNumBytesAllocated() const {
return num_bytes_allocated_;
}
- Mutex& GetLock() {
- return lock_;
+ size_t GetNumObjectsAllocated() const {
+ return num_objects_allocated_;
}
- bool Contains(const Object* obj) const {
- MutexLock mu(const_cast<Mutex&>(lock_));
- return mem_maps_.find(const_cast<Object*>(obj)) != mem_maps_.end();
- }
-
- private:
+ protected:
LargeObjectSpace(const std::string& name);
+ // Approximate number of bytes which have been allocated into the space.
+ size_t num_bytes_allocated_;
+ size_t num_objects_allocated_;
+
+ UniquePtr<SpaceSetMap> live_objects_;
+ UniquePtr<SpaceSetMap> mark_objects_;
+
+ friend class Space;
+};
+
+class LargeObjectMapSpace : public LargeObjectSpace {
+ public:
+
+ // Creates a large object space. Allocations into the large object space use memory maps instead
+ // of malloc.
+ static LargeObjectMapSpace* Create(const std::string& name);
+
+ // Return the storage space required by obj.
+ virtual size_t AllocationSize(const Object* obj);
+ virtual Object* Alloc(size_t num_bytes);
+ virtual void Free(Object* ptr);
+ virtual void Walk(AllocSpace::WalkCallback, void* arg);
+ virtual bool Contains(const Object* obj) const;
+private:
+ LargeObjectMapSpace(const std::string& name);
+ virtual ~LargeObjectMapSpace() {}
+
// Used to ensure mutual exclusion when the allocation spaces data structures are being modified.
Mutex lock_;
-
- size_t num_bytes_allocated_;
- UniquePtr<SpaceSetMap> live_objects_;
- UniquePtr<SpaceSetMap> mark_objects_;
std::vector<Object*> large_objects_;
typedef SafeMap<Object*, MemMap*> MemMaps;
MemMaps mem_maps_;
+};
- friend class Space;
+class FreeListSpace : public LargeObjectSpace {
+ public:
+ virtual ~FreeListSpace();
+ static FreeListSpace* Create(const std::string& name, size_t capacity);
+
+ virtual size_t AllocationSize(const Object* obj);
+ virtual Object* Alloc(size_t num_bytes);
+ virtual void Free(Object* obj);
+ virtual void FreeList(size_t num_ptrs, Object** ptrs);
+ virtual bool Contains(const Object* obj) const;
+ virtual void Walk(AllocSpace::WalkCallback callback, void* arg);
+
+ // Address at which the space begins
+ byte* Begin() const {
+ return begin_;
+ }
+
+ // Address at which the space ends, which may vary as the space is filled.
+ byte* End() const {
+ return end_;
+ }
+
+ // Current size of space
+ size_t Size() const {
+ return End() - Begin();
+ }
+ private:
+ static const size_t kAlignment = kPageSize;
+
+ class Chunk {
+ public:
+ static const size_t kFreeFlag = 0x80000000;
+
+ struct SortBySize {
+ bool operator()(const Chunk* a, const Chunk* b) const {
+ return a->GetSize() < b->GetSize();
+ }
+ };
+
+ bool IsFree() const {
+ return (m_size & kFreeFlag) != 0;
+ }
+
+ void SetSize(size_t size, bool is_free = false) {
+ m_size = size | (is_free ? kFreeFlag : 0);
+ }
+
+ size_t GetSize() const {
+ return m_size & (kFreeFlag - 1);
+ }
+
+ Chunk* GetPrevious() {
+ return m_previous;
+ }
+
+ void SetPrevious(Chunk* previous) {
+ m_previous = previous;
+ DCHECK(m_previous == NULL ||
+ (m_previous != NULL && m_previous + m_previous->GetSize() / kAlignment == this));
+ }
+ private:
+ size_t m_size;
+ Chunk* m_previous;
+ };
+
+ FreeListSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end);
+ void AddFreeChunk(void* address, size_t size, Chunk* previous);
+ Chunk* ChunkFromAddr(void* address);
+ void* AddrFromChunk(Chunk* chunk);
+ void RemoveFreeChunk(Chunk* chunk);
+ Chunk* GetNextChunk(Chunk* chunk);
+
+ typedef std::multiset<Chunk*, Chunk::SortBySize> FreeChunks;
+ byte* begin_;
+ byte* end_;
+ UniquePtr<MemMap> mem_map_;
+ Mutex lock_;
+ std::vector<Chunk> chunks_;
+ FreeChunks free_chunks_;
};
// Callback for dlmalloc_inspect_all or mspace_inspect_all that will madvise(2) unused
diff --git a/src/space_test.cc b/src/space_test.cc
index 2dd8dc1..e9c45c4 100644
--- a/src/space_test.cc
+++ b/src/space_test.cc
@@ -35,37 +35,37 @@
TEST_F(SpaceTest, Init) {
{
// Init < max == growth
- UniquePtr<Space> space(Space::CreateAllocSpace("test", 16 * MB, 32 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 32 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init == max == growth
- UniquePtr<Space> space(Space::CreateAllocSpace("test", 16 * MB, 16 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 16 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init > max == growth
- UniquePtr<Space> space(Space::CreateAllocSpace("test", 32 * MB, 16 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(AllocSpace::Create("test", 32 * MB, 16 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
{
// Growth == init < max
- UniquePtr<Space> space(Space::CreateAllocSpace("test", 16 * MB, 16 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 16 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Growth < init < max
- UniquePtr<Space> space(Space::CreateAllocSpace("test", 16 * MB, 8 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(AllocSpace::Create("test", 16 * MB, 8 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
{
// Init < growth < max
- UniquePtr<Space> space(Space::CreateAllocSpace("test", 8 * MB, 16 * MB, 32 * MB, NULL));
+ UniquePtr<Space> space(AllocSpace::Create("test", 8 * MB, 16 * MB, 32 * MB, NULL));
EXPECT_TRUE(space.get() != NULL);
}
{
// Init < max < growth
- UniquePtr<Space> space(Space::CreateAllocSpace("test", 8 * MB, 32 * MB, 16 * MB, NULL));
+ UniquePtr<Space> space(AllocSpace::Create("test", 8 * MB, 32 * MB, 16 * MB, NULL));
EXPECT_TRUE(space.get() == NULL);
}
}
@@ -75,7 +75,7 @@
// allocations after the ZygoteSpace is created. The test should also do some GCs to ensure that
// the GC works with the ZygoteSpace.
TEST_F(SpaceTest, ZygoteSpace) {
- AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -142,7 +142,7 @@
}
TEST_F(SpaceTest, AllocAndFree) {
- AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -184,7 +184,7 @@
}
TEST_F(SpaceTest, AllocAndFreeList) {
- AllocSpace* space(Space::CreateAllocSpace("test", 4 * MB, 16 * MB, 16 * MB, NULL));
+ AllocSpace* space(AllocSpace::Create("test", 4 * MB, 16 * MB, 16 * MB, NULL));
ASSERT_TRUE(space != NULL);
// Make space findable to the heap, will also delete space when runtime is cleaned up
@@ -372,7 +372,7 @@
size_t initial_size = 4 * MB;
size_t growth_limit = 8 * MB;
size_t capacity = 16 * MB;
- AllocSpace* space(Space::CreateAllocSpace("test", initial_size, growth_limit, capacity, NULL));
+ AllocSpace* space(AllocSpace::Create("test", initial_size, growth_limit, capacity, NULL));
ASSERT_TRUE(space != NULL);
// Basic sanity
diff --git a/src/thread.cc b/src/thread.cc
index 15a4f1b..ba8763c 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -1860,12 +1860,15 @@
}
spill_shifts--; // wind back one as we want the last match
ref = reinterpret_cast<Object*>(GetGPR(spill_shifts));
+ if (ref != NULL) {
+ root_visitor_(ref, arg_);
+ }
} else {
ref = reinterpret_cast<Object*>(GetVReg(cur_quick_frame, code_item, core_spills,
fp_spills, frame_size, reg));
- }
- if (ref != NULL) {
- root_visitor_(ref, arg_);
+ if (ref != NULL) {
+ root_visitor_(ref, arg_);
+ }
}
}
}