Clean up GC callbacks to be virtual methods
Change-Id: Ia08034a4e5931c4fcb329c3bd3c4b1f301135735
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 9316b27..b5d5c34 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -73,6 +73,12 @@
}
}
+void ConcurrentCopying::MarkHeapReference(
+ mirror::HeapReference<mirror::Object>* from_ref ATTRIBUTE_UNUSED) {
+ // Unused, usually called from mod union tables.
+ UNIMPLEMENTED(FATAL);
+}
+
ConcurrentCopying::~ConcurrentCopying() {
STLDeleteElements(&pooled_mark_stacks_);
}
@@ -308,7 +314,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class EmptyCheckpoint : public Closure {
@@ -429,7 +435,7 @@
LOG(INFO) << "ProcessReferences";
}
// Process weak references. This may produce new refs to process and have them processed via
- // ProcessMarkStackCallback (in the GC exclusive mark stack mode).
+ // ProcessMarkStack (in the GC exclusive mark stack mode).
ProcessReferences(self);
CheckEmptyMarkStack();
if (kVerboseMode) {
@@ -644,7 +650,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class ConcurrentCopyingVerifyNoFromSpaceRefsObjectVisitor {
@@ -732,16 +738,9 @@
}
collector_->AssertToSpaceInvariant(nullptr, MemberOffset(0), ref);
}
- static void RootCallback(mirror::Object** root, void *arg, const RootInfo& /*root_info*/)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- ConcurrentCopying* collector = reinterpret_cast<ConcurrentCopying*>(arg);
- ConcurrentCopyingAssertToSpaceInvariantRefsVisitor visitor(collector);
- DCHECK(root != nullptr);
- visitor(*root);
- }
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class ConcurrentCopyingAssertToSpaceInvariantFieldVisitor {
@@ -762,7 +761,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class ConcurrentCopyingAssertToSpaceInvariantObjectVisitor {
@@ -785,7 +784,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
class RevokeThreadLocalMarkStackCheckpoint : public Closure {
@@ -1088,7 +1087,7 @@
void ConcurrentCopying::SweepSystemWeaks(Thread* self) {
TimingLogger::ScopedTiming split("SweepSystemWeaks", GetTimings());
ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
void ConcurrentCopying::Sweep(bool swap_bitmaps) {
@@ -1293,7 +1292,7 @@
}
private:
- ConcurrentCopying* collector_;
+ ConcurrentCopying* const collector_;
};
// Compute how much live objects are left in regions.
@@ -2029,14 +2028,9 @@
heap_->ClearMarkedObjects();
}
-mirror::Object* ConcurrentCopying::IsMarkedCallback(mirror::Object* from_ref, void* arg) {
- return reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
-}
-
-bool ConcurrentCopying::IsHeapReferenceMarkedCallback(
- mirror::HeapReference<mirror::Object>* field, void* arg) {
+bool ConcurrentCopying::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) {
mirror::Object* from_ref = field->AsMirrorPtr();
- mirror::Object* to_ref = reinterpret_cast<ConcurrentCopying*>(arg)->IsMarked(from_ref);
+ mirror::Object* to_ref = IsMarked(from_ref);
if (to_ref == nullptr) {
return false;
}
@@ -2048,18 +2042,12 @@
return true;
}
-mirror::Object* ConcurrentCopying::MarkCallback(mirror::Object* from_ref, void* arg) {
- return reinterpret_cast<ConcurrentCopying*>(arg)->Mark(from_ref);
-}
-
-void ConcurrentCopying::ProcessMarkStackCallback(void* arg) {
- ConcurrentCopying* concurrent_copying = reinterpret_cast<ConcurrentCopying*>(arg);
- concurrent_copying->ProcessMarkStack();
+mirror::Object* ConcurrentCopying::MarkObject(mirror::Object* from_ref) {
+ return Mark(from_ref);
}
void ConcurrentCopying::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
- heap_->GetReferenceProcessor()->DelayReferenceReferent(
- klass, reference, &IsHeapReferenceMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
void ConcurrentCopying::ProcessReferences(Thread* self) {
@@ -2067,8 +2055,7 @@
// We don't really need to lock the heap bitmap lock as we use CAS to mark in bitmaps.
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &IsHeapReferenceMarkedCallback, &MarkCallback, &ProcessMarkStackCallback, this);
+ true /*concurrent*/, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void ConcurrentCopying::RevokeAllThreadLocalBuffers() {
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 1fb4703..4f92ea0 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -130,18 +130,16 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SwitchToSharedMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SwitchToGcExclusiveMarkStackMode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void ProcessReferences(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- mirror::Object* IsMarked(mirror::Object* from_ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static mirror::Object* MarkCallback(mirror::Object* from_ref, void* arg)
+ virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static mirror::Object* IsMarkedCallback(mirror::Object* from_ref, void* arg)
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static bool IsHeapReferenceMarkedCallback(
- mirror::HeapReference<mirror::Object>* field, void* arg)
+ virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void ProcessMarkStackCallback(void* arg)
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
diff --git a/runtime/gc/collector/garbage_collector.h b/runtime/gc/collector/garbage_collector.h
index 9b76d1a..e10bef4 100644
--- a/runtime/gc/collector/garbage_collector.h
+++ b/runtime/gc/collector/garbage_collector.h
@@ -17,6 +17,9 @@
#ifndef ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
#define ART_RUNTIME_GC_COLLECTOR_GARBAGE_COLLECTOR_H_
+#include <stdint.h>
+#include <vector>
+
#include "base/histogram.h"
#include "base/mutex.h"
#include "base/timing_logger.h"
@@ -24,10 +27,16 @@
#include "gc/gc_cause.h"
#include "gc_root.h"
#include "gc_type.h"
-#include <stdint.h>
-#include <vector>
+#include "object_callbacks.h"
namespace art {
+
+namespace mirror {
+class Class;
+class Object;
+class Reference;
+} // namespace mirror
+
namespace gc {
class Heap;
@@ -113,7 +122,7 @@
DISALLOW_COPY_AND_ASSIGN(Iteration);
};
-class GarbageCollector : public RootVisitor {
+class GarbageCollector : public RootVisitor, public IsMarkedVisitor, public MarkObjectVisitor {
public:
class SCOPED_LOCKABLE ScopedPause {
public:
@@ -172,6 +181,22 @@
void RecordFreeLOS(const ObjectBytePair& freed);
void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
+ // Helper functions for querying if objects are marked at compile time. These are used for
+ // reading system weaks, processing references.
+ virtual mirror::Object* IsMarked(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ // Used by reference processor.
+ virtual void ProcessMarkStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ // Force mark an object.
+ virtual mirror::Object* MarkObject(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+ virtual void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) = 0;
+
protected:
// Run all of the GC phases.
virtual void RunPhases() = 0;
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 3c247cd..65e6b40 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -21,34 +21,19 @@
#include "base/timing_logger.h"
#include "gc/accounting/heap_bitmap-inl.h"
#include "gc/accounting/mod_union_table.h"
-#include "gc/accounting/remembered_set.h"
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/reference_processor.h"
-#include "gc/space/bump_pointer_space.h"
#include "gc/space/bump_pointer_space-inl.h"
-#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
-#include "indirect_reference_table.h"
-#include "intern_table.h"
-#include "jni_internal.h"
-#include "mark_sweep-inl.h"
-#include "monitor.h"
#include "mirror/class-inl.h"
-#include "mirror/class_loader.h"
-#include "mirror/dex_cache.h"
-#include "mirror/reference-inl.h"
#include "mirror/object-inl.h"
-#include "mirror/object_array.h"
-#include "mirror/object_array-inl.h"
#include "runtime.h"
#include "stack.h"
#include "thread-inl.h"
#include "thread_list.h"
-using ::art::mirror::Object;
-
namespace art {
namespace gc {
namespace collector {
@@ -67,7 +52,7 @@
MarkCompact::MarkCompact(Heap* heap, const std::string& name_prefix)
: GarbageCollector(heap, name_prefix + (name_prefix.empty() ? "" : " ") + "mark compact"),
- space_(nullptr), collector_name_(name_) {
+ space_(nullptr), collector_name_(name_), updating_references_(false) {
}
void MarkCompact::RunPhases() {
@@ -107,7 +92,7 @@
void operator()(mirror::Object* obj) const EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_,
Locks::heap_bitmap_lock_) {
DCHECK_ALIGNED(obj, space::BumpPointerSpace::kAlignment);
- DCHECK(collector_->IsMarked(obj));
+ DCHECK(collector_->IsMarked(obj) != nullptr);
collector_->ForwardObject(obj);
}
@@ -141,8 +126,7 @@
void MarkCompact::ProcessReferences(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
heap_->GetReferenceProcessor()->ProcessReferences(
- false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
class BitmapSetSlowPathVisitor {
@@ -156,29 +140,29 @@
}
};
-inline void MarkCompact::MarkObject(mirror::Object* obj) {
+inline mirror::Object* MarkCompact::MarkObject(mirror::Object* obj) {
if (obj == nullptr) {
- return;
+ return obj;
}
if (kUseBakerOrBrooksReadBarrier) {
// Verify all the objects have the correct forward pointer installed.
obj->AssertReadBarrierPointer();
}
- if (immune_region_.ContainsObject(obj)) {
- return;
- }
- if (objects_before_forwarding_->HasAddress(obj)) {
- if (!objects_before_forwarding_->Set(obj)) {
- MarkStackPush(obj); // This object was not previously marked.
- }
- } else {
- DCHECK(!space_->HasAddress(obj));
- BitmapSetSlowPathVisitor visitor;
- if (!mark_bitmap_->Set(obj, visitor)) {
- // This object was not previously marked.
- MarkStackPush(obj);
+ if (!immune_region_.ContainsObject(obj)) {
+ if (objects_before_forwarding_->HasAddress(obj)) {
+ if (!objects_before_forwarding_->Set(obj)) {
+ MarkStackPush(obj); // This object was not previously marked.
+ }
+ } else {
+ DCHECK(!space_->HasAddress(obj));
+ BitmapSetSlowPathVisitor visitor;
+ if (!mark_bitmap_->Set(obj, visitor)) {
+ // This object was not previously marked.
+ MarkStackPush(obj);
+ }
}
}
+ return obj;
}
void MarkCompact::MarkingPhase() {
@@ -240,7 +224,7 @@
TimingLogger::ScopedTiming t2(
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable", GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ table->UpdateAndMarkReferences(this);
}
}
}
@@ -272,7 +256,7 @@
}
void MarkCompact::ResizeMarkStack(size_t new_size) {
- std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
+ std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
CHECK_LE(mark_stack_->Size(), new_size);
mark_stack_->Resize(new_size);
for (auto& obj : temp) {
@@ -280,7 +264,7 @@
}
}
-inline void MarkCompact::MarkStackPush(Object* obj) {
+inline void MarkCompact::MarkStackPush(mirror::Object* obj) {
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
ResizeMarkStack(mark_stack_->Capacity() * 2);
}
@@ -288,23 +272,12 @@
mark_stack_->PushBack(obj);
}
-void MarkCompact::ProcessMarkStackCallback(void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->ProcessMarkStack();
-}
-
-mirror::Object* MarkCompact::MarkObjectCallback(mirror::Object* root, void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->MarkObject(root);
- return root;
-}
-
-void MarkCompact::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
- void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->MarkObject(obj_ptr->AsMirrorPtr());
-}
-
-void MarkCompact::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->DelayReferenceReferent(klass, ref);
+void MarkCompact::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) {
+ if (updating_references_) {
+ UpdateHeapReference(obj_ptr);
+ } else {
+ MarkObject(obj_ptr->AsMirrorPtr());
+ }
}
void MarkCompact::VisitRoots(
@@ -373,6 +346,7 @@
void MarkCompact::UpdateReferences() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
+ updating_references_ = true;
Runtime* runtime = Runtime::Current();
// Update roots.
UpdateRootVisitor update_root_visitor(this);
@@ -387,7 +361,7 @@
space->IsZygoteSpace() ? "UpdateZygoteModUnionTableReferences" :
"UpdateImageModUnionTableReferences",
GetTimings());
- table->UpdateAndMarkReferences(&UpdateHeapReferenceCallback, this);
+ table->UpdateAndMarkReferences(this);
} else {
// No mod union table, so we need to scan the space using bitmap visit.
// Scan the space using bitmap visit.
@@ -403,14 +377,15 @@
CHECK(!kMovingClasses)
<< "Didn't update large object classes since they are assumed to not move.";
// Update the system weaks, these should already have been swept.
- runtime->SweepSystemWeaks(&MarkedForwardingAddressCallback, this);
+ runtime->SweepSystemWeaks(this);
// Update the objects in the bump pointer space last, these objects don't have a bitmap.
UpdateObjectReferencesVisitor visitor(this);
objects_before_forwarding_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space_->Begin()),
reinterpret_cast<uintptr_t>(space_->End()),
visitor);
// Update the reference processor cleared list.
- heap_->GetReferenceProcessor()->UpdateRoots(&MarkedForwardingAddressCallback, this);
+ heap_->GetReferenceProcessor()->UpdateRoots(this);
+ updating_references_ = false;
}
void MarkCompact::Compact() {
@@ -436,10 +411,6 @@
Runtime::Current()->VisitRoots(this);
}
-mirror::Object* MarkCompact::MarkedForwardingAddressCallback(mirror::Object* obj, void* arg) {
- return reinterpret_cast<MarkCompact*>(arg)->GetMarkedForwardAddress(obj);
-}
-
inline void MarkCompact::UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference) {
mirror::Object* obj = reference->AsMirrorPtr();
if (obj != nullptr) {
@@ -451,17 +422,12 @@
}
}
-void MarkCompact::UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
- void* arg) {
- reinterpret_cast<MarkCompact*>(arg)->UpdateHeapReference(reference);
-}
-
class UpdateReferenceVisitor {
public:
explicit UpdateReferenceVisitor(MarkCompact* collector) : collector_(collector) {
}
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const
ALWAYS_INLINE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
collector_->UpdateHeapReference(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset));
}
@@ -481,7 +447,7 @@
obj->VisitReferences<kMovingClasses>(visitor, visitor);
}
-inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) const {
+inline mirror::Object* MarkCompact::GetMarkedForwardAddress(mirror::Object* obj) {
DCHECK(obj != nullptr);
if (objects_before_forwarding_->HasAddress(obj)) {
DCHECK(objects_before_forwarding_->Test(obj));
@@ -491,33 +457,30 @@
return ret;
}
DCHECK(!space_->HasAddress(obj));
- DCHECK(IsMarked(obj));
return obj;
}
-inline bool MarkCompact::IsMarked(const Object* object) const {
+mirror::Object* MarkCompact::IsMarked(mirror::Object* object) {
if (immune_region_.ContainsObject(object)) {
- return true;
+ return object;
+ }
+ if (updating_references_) {
+ return GetMarkedForwardAddress(object);
}
if (objects_before_forwarding_->HasAddress(object)) {
- return objects_before_forwarding_->Test(object);
+ return objects_before_forwarding_->Test(object) ? object : nullptr;
}
- return mark_bitmap_->Test(object);
+ return mark_bitmap_->Test(object) ? object : nullptr;
}
-mirror::Object* MarkCompact::IsMarkedCallback(mirror::Object* object, void* arg) {
- return reinterpret_cast<MarkCompact*>(arg)->IsMarked(object) ? object : nullptr;
-}
-
-bool MarkCompact::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
- void* arg) {
+bool MarkCompact::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref_ptr) {
// Side effect free since we call this before ever moving objects.
- return reinterpret_cast<MarkCompact*>(arg)->IsMarked(ref_ptr->AsMirrorPtr());
+ return IsMarked(ref_ptr->AsMirrorPtr()) != nullptr;
}
void MarkCompact::SweepSystemWeaks() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
bool MarkCompact::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -592,8 +555,7 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void MarkCompact::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
- heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
- &HeapReferenceMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
class MarkCompactMarkObjectVisitor {
@@ -601,7 +563,7 @@
explicit MarkCompactMarkObjectVisitor(MarkCompact* collector) : collector_(collector) {
}
- void operator()(Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /*is_static*/) const ALWAYS_INLINE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
// Object was already verified when we scanned it.
collector_->MarkObject(obj->GetFieldObject<mirror::Object, kVerifyNone>(offset));
@@ -618,7 +580,7 @@
};
// Visit all of the references of an object and update.
-void MarkCompact::ScanObject(Object* obj) {
+void MarkCompact::ScanObject(mirror::Object* obj) {
MarkCompactMarkObjectVisitor visitor(this);
obj->VisitReferences<kMovingClasses>(visitor, visitor);
}
@@ -627,7 +589,7 @@
void MarkCompact::ProcessMarkStack() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
while (!mark_stack_->IsEmpty()) {
- Object* obj = mark_stack_->PopBack();
+ mirror::Object* obj = mark_stack_->PopBack();
DCHECK(obj != nullptr);
ScanObject(obj);
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index f59a2cd..89d66b5 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -121,23 +121,6 @@
const RootInfo& info)
OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref_ptr,
- void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void ProcessMarkStackCallback(void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
- static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
// Schedules an unmarked object for reference processing.
void DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -145,11 +128,7 @@
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+ mirror::Object* GetMarkedForwardAddress(mirror::Object* object)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -184,30 +163,27 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
// Update the references of objects by using the forwarding addresses.
void UpdateReferences() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static void UpdateRootCallback(mirror::Object** root, void* arg, const RootInfo& /*root_info*/)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
// Move objects and restore lock words.
void MoveObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Move a single object to its forward address.
void MoveObject(mirror::Object* obj, size_t len) EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Mark a single object.
- void MarkObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
- Locks::mutator_lock_);
- bool IsMarked(const mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
void ForwardObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
Locks::mutator_lock_);
// Update a single heap reference.
void UpdateHeapReference(mirror::HeapReference<mirror::Object>* reference)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
- static void UpdateHeapReferenceCallback(mirror::HeapReference<mirror::Object>* reference,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
// Update all of the references of a single object.
void UpdateObjectReferences(mirror::Object* obj)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
@@ -242,6 +218,9 @@
// Which lock words we need to restore as we are moving objects.
std::deque<LockWord> lock_words_to_restore_;
+ // State whether or not we are updating references.
+ bool updating_references_;
+
private:
friend class BitmapSetSlowPathVisitor;
friend class CalculateObjectForwardingAddressVisitor;
diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc
index 1c9c412..e0d6d6b 100644
--- a/runtime/gc/collector/mark_sweep.cc
+++ b/runtime/gc/collector/mark_sweep.cc
@@ -37,7 +37,6 @@
#include "gc/accounting/space_bitmap-inl.h"
#include "gc/heap.h"
#include "gc/reference_processor.h"
-#include "gc/space/image_space.h"
#include "gc/space/large_object_space.h"
#include "gc/space/space-inl.h"
#include "mark_sweep-inl.h"
@@ -47,8 +46,6 @@
#include "thread-inl.h"
#include "thread_list.h"
-using ::art::mirror::Object;
-
namespace art {
namespace gc {
namespace collector {
@@ -175,8 +172,7 @@
void MarkSweep::ProcessReferences(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ true, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void MarkSweep::PausePhase() {
@@ -273,7 +269,7 @@
TimingLogger::ScopedTiming t(name, GetTimings());
accounting::ModUnionTable* mod_union_table = heap_->FindModUnionTableFromSpace(space);
CHECK(mod_union_table != nullptr);
- mod_union_table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ mod_union_table->UpdateAndMarkReferences(this);
}
}
}
@@ -333,7 +329,7 @@
// Someone else acquired the lock and expanded the mark stack before us.
return;
}
- std::vector<StackReference<Object>> temp(mark_stack_->Begin(), mark_stack_->End());
+ std::vector<StackReference<mirror::Object>> temp(mark_stack_->Begin(), mark_stack_->End());
CHECK_LE(mark_stack_->Size(), new_size);
mark_stack_->Resize(new_size);
for (auto& obj : temp) {
@@ -341,7 +337,7 @@
}
}
-inline void MarkSweep::MarkObjectNonNullParallel(Object* obj) {
+inline void MarkSweep::MarkObjectNonNullParallel(mirror::Object* obj) {
DCHECK(obj != nullptr);
if (MarkObjectParallel(obj)) {
MutexLock mu(Thread::Current(), mark_stack_lock_);
@@ -353,28 +349,18 @@
}
}
-mirror::Object* MarkSweep::MarkObjectCallback(mirror::Object* obj, void* arg) {
- MarkSweep* mark_sweep = reinterpret_cast<MarkSweep*>(arg);
- mark_sweep->MarkObject(obj);
- return obj;
-}
-
-void MarkSweep::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
- reinterpret_cast<MarkSweep*>(arg)->MarkObject(ref->AsMirrorPtr());
-}
-
-bool MarkSweep::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg) {
- return reinterpret_cast<MarkSweep*>(arg)->IsMarked(ref->AsMirrorPtr());
+bool MarkSweep::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) {
+ return IsMarked(ref->AsMirrorPtr());
}
class MarkSweepMarkObjectSlowPath {
public:
- explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, Object* holder = nullptr,
+ explicit MarkSweepMarkObjectSlowPath(MarkSweep* mark_sweep, mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
: mark_sweep_(mark_sweep), holder_(holder), offset_(offset) {
}
- void operator()(const Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+ void operator()(const mirror::Object* obj) const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
if (kProfileLargeObjects) {
// TODO: Differentiate between marking and testing somehow.
++mark_sweep_->large_object_test_;
@@ -450,7 +436,8 @@
MemberOffset offset_;
};
-inline void MarkSweep::MarkObjectNonNull(Object* obj, Object* holder, MemberOffset offset) {
+inline void MarkSweep::MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder,
+ MemberOffset offset) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
// Verify all the objects have the correct pointer installed.
@@ -481,7 +468,7 @@
}
}
-inline void MarkSweep::PushOnMarkStack(Object* obj) {
+inline void MarkSweep::PushOnMarkStack(mirror::Object* obj) {
if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
// Lock is not needed but is here anyways to please annotalysis.
MutexLock mu(Thread::Current(), mark_stack_lock_);
@@ -491,14 +478,14 @@
mark_stack_->PushBack(obj);
}
-inline bool MarkSweep::MarkObjectParallel(const Object* obj) {
+inline bool MarkSweep::MarkObjectParallel(mirror::Object* obj) {
DCHECK(obj != nullptr);
if (kUseBakerOrBrooksReadBarrier) {
// Verify all the objects have the correct pointer installed.
obj->AssertReadBarrierPointer();
}
if (immune_region_.ContainsObject(obj)) {
- DCHECK(IsMarked(obj));
+ DCHECK(IsMarked(obj) != nullptr);
return false;
}
// Try to take advantage of locality of references within a space, failing this find the space
@@ -511,8 +498,18 @@
return !mark_bitmap_->AtomicTestAndSet(obj, visitor);
}
+mirror::Object* MarkSweep::MarkObject(mirror::Object* obj) {
+ MarkObject(obj, nullptr, MemberOffset(0));
+ return obj;
+}
+
+void MarkSweep::MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) {
+ MarkObject(ref->AsMirrorPtr(), nullptr, MemberOffset(0));
+}
+
// Used to mark objects when processing the mark stack. If an object is null, it is not marked.
-inline void MarkSweep::MarkObject(Object* obj, Object* holder, MemberOffset offset) {
+inline void MarkSweep::MarkObject(mirror::Object* obj, mirror::Object* holder,
+ MemberOffset offset) {
if (obj != nullptr) {
MarkObjectNonNull(obj, holder, offset);
} else if (kCountMarkedObjects) {
@@ -526,7 +523,7 @@
void VisitRoot(mirror::Object* root, const RootInfo& info) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
- CHECK(collector_->IsMarked(root)) << info.ToString();
+ CHECK(collector_->IsMarked(root) != nullptr) << info.ToString();
}
private:
@@ -599,7 +596,8 @@
explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE
: mark_sweep_(mark_sweep) {}
- void operator()(Object* obj) const ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ void operator()(mirror::Object* obj) const ALWAYS_INLINE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
@@ -631,7 +629,7 @@
class MarkStackTask : public Task {
public:
MarkStackTask(ThreadPool* thread_pool, MarkSweep* mark_sweep, size_t mark_stack_size,
- StackReference<Object>* mark_stack)
+ StackReference<mirror::Object>* mark_stack)
: mark_sweep_(mark_sweep),
thread_pool_(thread_pool),
mark_stack_pos_(mark_stack_size) {
@@ -655,7 +653,7 @@
MarkSweep* mark_sweep) ALWAYS_INLINE
: chunk_task_(chunk_task), mark_sweep_(mark_sweep) {}
- void operator()(Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* static */) const ALWAYS_INLINE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset);
if (ref != nullptr && mark_sweep_->MarkObjectParallel(ref)) {
@@ -681,7 +679,7 @@
: chunk_task_(chunk_task) {}
// No thread safety analysis since multiple threads will use this visitor.
- void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ void operator()(mirror::Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
MarkSweep* const mark_sweep = chunk_task_->mark_sweep_;
MarkObjectParallelVisitor mark_visitor(chunk_task_, mark_sweep);
@@ -704,11 +702,12 @@
MarkSweep* const mark_sweep_;
ThreadPool* const thread_pool_;
// Thread local mark stack for this task.
- StackReference<Object> mark_stack_[kMaxSize];
+ StackReference<mirror::Object> mark_stack_[kMaxSize];
// Mark stack position.
size_t mark_stack_pos_;
- ALWAYS_INLINE void MarkStackPush(Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ALWAYS_INLINE void MarkStackPush(mirror::Object* obj)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
if (UNLIKELY(mark_stack_pos_ == kMaxSize)) {
// Mark stack overflow, give 1/2 the stack to the thread pool as a new work task.
mark_stack_pos_ /= 2;
@@ -732,12 +731,12 @@
ScanObjectParallelVisitor visitor(this);
// TODO: Tune this.
static const size_t kFifoSize = 4;
- BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
+ BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = nullptr;
+ mirror::Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (mark_stack_pos_ != 0 && prefetch_fifo.size() < kFifoSize) {
- Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
+ mirror::Object* const mark_stack_obj = mark_stack_[--mark_stack_pos_].AsMirrorPtr();
DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
@@ -764,7 +763,7 @@
CardScanTask(ThreadPool* thread_pool, MarkSweep* mark_sweep,
accounting::ContinuousSpaceBitmap* bitmap,
uint8_t* begin, uint8_t* end, uint8_t minimum_age, size_t mark_stack_size,
- StackReference<Object>* mark_stack_obj, bool clear_card)
+ StackReference<mirror::Object>* mark_stack_obj, bool clear_card)
: MarkStackTask<false>(thread_pool, mark_sweep, mark_stack_size, mark_stack_obj),
bitmap_(bitmap),
begin_(begin),
@@ -815,8 +814,8 @@
TimingLogger::ScopedTiming t(paused ? "(Paused)ScanGrayObjects" : __FUNCTION__,
GetTimings());
// Try to take some of the mark stack since we can pass this off to the worker tasks.
- StackReference<Object>* mark_stack_begin = mark_stack_->Begin();
- StackReference<Object>* mark_stack_end = mark_stack_->End();
+ StackReference<mirror::Object>* mark_stack_begin = mark_stack_->Begin();
+ StackReference<mirror::Object>* mark_stack_end = mark_stack_->End();
const size_t mark_stack_size = mark_stack_end - mark_stack_begin;
// Estimated number of work tasks we will create.
const size_t mark_stack_tasks = GetHeap()->GetContinuousSpaces().size() * thread_count;
@@ -988,13 +987,6 @@
ProcessMarkStack(false);
}
-mirror::Object* MarkSweep::IsMarkedCallback(mirror::Object* object, void* arg) {
- if (reinterpret_cast<MarkSweep*>(arg)->IsMarked(object)) {
- return object;
- }
- return nullptr;
-}
-
void MarkSweep::RecursiveMarkDirtyObjects(bool paused, uint8_t minimum_age) {
ScanGrayObjects(paused, minimum_age);
ProcessMarkStack(paused);
@@ -1015,16 +1007,23 @@
void MarkSweep::SweepSystemWeaks(Thread* self) {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- Runtime::Current()->SweepSystemWeaks(IsMarkedCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
-mirror::Object* MarkSweep::VerifySystemWeakIsLiveCallback(Object* obj, void* arg) {
- reinterpret_cast<MarkSweep*>(arg)->VerifyIsLive(obj);
- // We don't actually want to sweep the object, so lets return "marked"
- return obj;
-}
+class VerifySystemWeakVisitor : public IsMarkedVisitor {
+ public:
+ explicit VerifySystemWeakVisitor(MarkSweep* mark_sweep) : mark_sweep_(mark_sweep) {}
-void MarkSweep::VerifyIsLive(const Object* obj) {
+ virtual mirror::Object* IsMarked(mirror::Object* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
+ mark_sweep_->VerifyIsLive(obj);
+ return obj;
+ }
+
+ MarkSweep* const mark_sweep_;
+};
+
+void MarkSweep::VerifyIsLive(const mirror::Object* obj) {
if (!heap_->GetLiveBitmap()->Test(obj)) {
// TODO: Consider live stack? Has this code bitrotted?
CHECK(!heap_->allocation_stack_->Contains(obj))
@@ -1035,7 +1034,8 @@
void MarkSweep::VerifySystemWeaks() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
// Verify system weaks, uses a special object visitor which returns the input object.
- Runtime::Current()->SweepSystemWeaks(VerifySystemWeakIsLiveCallback, this);
+ VerifySystemWeakVisitor visitor(this);
+ Runtime::Current()->SweepSystemWeaks(&visitor);
}
class CheckpointMarkThreadRoots : public Closure, public RootVisitor {
@@ -1122,7 +1122,7 @@
ObjectBytePair freed;
ObjectBytePair freed_los;
// How many objects are left in the array, modified after each space is swept.
- StackReference<Object>* objects = allocations->Begin();
+ StackReference<mirror::Object>* objects = allocations->Begin();
size_t count = allocations->Size();
// Change the order to ensure that the non-moving space last swept as an optimization.
std::vector<space::ContinuousSpace*> sweep_spaces;
@@ -1150,9 +1150,9 @@
if (swap_bitmaps) {
std::swap(live_bitmap, mark_bitmap);
}
- StackReference<Object>* out = objects;
+ StackReference<mirror::Object>* out = objects;
for (size_t i = 0; i < count; ++i) {
- Object* const obj = objects[i].AsMirrorPtr();
+ mirror::Object* const obj = objects[i].AsMirrorPtr();
if (kUseThreadLocalAllocationStack && obj == nullptr) {
continue;
}
@@ -1191,7 +1191,7 @@
std::swap(large_live_objects, large_mark_objects);
}
for (size_t i = 0; i < count; ++i) {
- Object* const obj = objects[i].AsMirrorPtr();
+ mirror::Object* const obj = objects[i].AsMirrorPtr();
// Handle large objects.
if (kUseThreadLocalAllocationStack && obj == nullptr) {
continue;
@@ -1250,16 +1250,15 @@
if (kCountJavaLangRefs) {
++reference_count_;
}
- heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, &HeapReferenceMarkedCallback,
- this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, ref, this);
}
-class MarkObjectVisitor {
+class MarkVisitor {
public:
- explicit MarkObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
+ explicit MarkVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE : mark_sweep_(mark_sweep) {
}
- void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const
+ void operator()(mirror::Object* obj, MemberOffset offset, bool /* is_static */) const
ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
if (kCheckLocks) {
@@ -1275,16 +1274,12 @@
// Scans an object reference. Determines the type of the reference
// and dispatches to a specialized scanning routine.
-void MarkSweep::ScanObject(Object* obj) {
- MarkObjectVisitor mark_visitor(this);
+void MarkSweep::ScanObject(mirror::Object* obj) {
+ MarkVisitor mark_visitor(this);
DelayReferenceReferentVisitor ref_visitor(this);
ScanObjectVisit(obj, mark_visitor, ref_visitor);
}
-void MarkSweep::ProcessMarkStackCallback(void* arg) {
- reinterpret_cast<MarkSweep*>(arg)->ProcessMarkStack(false);
-}
-
void MarkSweep::ProcessMarkStackParallel(size_t thread_count) {
Thread* self = Thread::Current();
ThreadPool* thread_pool = GetHeap()->GetThreadPool();
@@ -1317,12 +1312,12 @@
} else {
// TODO: Tune this.
static const size_t kFifoSize = 4;
- BoundedFifoPowerOfTwo<Object*, kFifoSize> prefetch_fifo;
+ BoundedFifoPowerOfTwo<mirror::Object*, kFifoSize> prefetch_fifo;
for (;;) {
- Object* obj = nullptr;
+ mirror::Object* obj = nullptr;
if (kUseMarkStackPrefetch) {
while (!mark_stack_->IsEmpty() && prefetch_fifo.size() < kFifoSize) {
- Object* mark_stack_obj = mark_stack_->PopBack();
+ mirror::Object* mark_stack_obj = mark_stack_->PopBack();
DCHECK(mark_stack_obj != nullptr);
__builtin_prefetch(mark_stack_obj);
prefetch_fifo.push_back(mark_stack_obj);
@@ -1344,14 +1339,14 @@
}
}
-inline bool MarkSweep::IsMarked(const Object* object) const {
+inline mirror::Object* MarkSweep::IsMarked(mirror::Object* object) {
if (immune_region_.ContainsObject(object)) {
- return true;
+ return object;
}
if (current_space_bitmap_->HasAddress(object)) {
- return current_space_bitmap_->Test(object);
+ return current_space_bitmap_->Test(object) ? object : nullptr;
}
- return mark_bitmap_->Test(object);
+ return mark_bitmap_->Test(object) ? object : nullptr;
}
void MarkSweep::FinishPhase() {
diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h
index d29d87a..c13755c 100644
--- a/runtime/gc/collector/mark_sweep.h
+++ b/runtime/gc/collector/mark_sweep.h
@@ -170,18 +170,9 @@
// Verify that an object is live, either in a live bitmap or in the allocation stack.
void VerifyIsLive(const mirror::Object* obj)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* obj, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* ref, void* arg)
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -194,13 +185,14 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static void ProcessMarkStackCallback(void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
// Marks an object.
- void MarkObject(mirror::Object* obj, mirror::Object* holder = nullptr,
- MemberOffset offset = MemberOffset(0))
+ virtual mirror::Object* MarkObject(mirror::Object* obj) OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ void MarkObject(mirror::Object* obj, mirror::Object* holder, MemberOffset offset)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* ref) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
@@ -214,15 +206,9 @@
protected:
// Returns true if the object has its bit set in the mark bitmap.
- bool IsMarked(const mirror::Object* object) const
+ virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static mirror::Object* IsMarkedCallback(mirror::Object* object, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static void VerifyImageRootVisitor(mirror::Object* root, void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
void MarkObjectNonNull(mirror::Object* obj, mirror::Object* holder = nullptr,
MemberOffset offset = MemberOffset(0))
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
@@ -233,7 +219,7 @@
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
// Returns true if we need to add obj to a mark stack.
- bool MarkObjectParallel(const mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
+ bool MarkObjectParallel(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS;
// Verify the roots of the heap and print out information related to any invalid roots.
// Called in MarkObject, so may we may not hold the mutator lock.
@@ -258,6 +244,11 @@
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ virtual void ProcessMarkStack() OVERRIDE EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ ProcessMarkStack(false);
+ }
+
// Recursively blackens objects on the mark stack.
void ProcessMarkStack(bool paused)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index 82d02e7..2a9f47a 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -157,8 +157,7 @@
void SemiSpace::ProcessReferences(Thread* self) {
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
GetHeap()->GetReferenceProcessor()->ProcessReferences(
- false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(),
- &HeapReferenceMarkedCallback, &MarkObjectCallback, &ProcessMarkStackCallback, this);
+ false, GetTimings(), GetCurrentIteration()->GetClearSoftReferences(), this);
}
void SemiSpace::MarkingPhase() {
@@ -336,7 +335,7 @@
space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
"UpdateAndMarkImageModUnionTable",
GetTimings());
- table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this);
+ table->UpdateAndMarkReferences(this);
DCHECK(GetHeap()->FindRememberedSetFromSpace(space) == nullptr);
} else if (collect_from_space_only_ && space->GetLiveBitmap() != nullptr) {
// If the space has no mod union table (the non-moving space and main spaces when the bump
@@ -351,8 +350,7 @@
CHECK_EQ(rem_set != nullptr, kUseRememberedSet);
if (rem_set != nullptr) {
TimingLogger::ScopedTiming t2("UpdateAndMarkRememberedSet", GetTimings());
- rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, DelayReferenceReferentCallback,
- from_space_, this);
+ rem_set->UpdateAndMarkReferences(from_space_, this);
if (kIsDebugBuild) {
// Verify that there are no from-space references that
// remain in the space, that is, the remembered set (and the
@@ -583,24 +581,14 @@
return forward_address;
}
-void SemiSpace::ProcessMarkStackCallback(void* arg) {
- reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack();
-}
-
-mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) {
+mirror::Object* SemiSpace::MarkObject(mirror::Object* root) {
auto ref = StackReference<mirror::Object>::FromMirrorPtr(root);
- reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref);
+ MarkObject(&ref);
return ref.AsMirrorPtr();
}
-void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr,
- void* arg) {
- reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr);
-}
-
-void SemiSpace::DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg) {
- reinterpret_cast<SemiSpace*>(arg)->DelayReferenceReferent(klass, ref);
+void SemiSpace::MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) {
+ MarkObject(obj_ptr);
}
void SemiSpace::VisitRoots(mirror::Object*** roots, size_t count,
@@ -628,29 +616,9 @@
Runtime::Current()->VisitRoots(this);
}
-bool SemiSpace::HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object,
- void* arg) {
- mirror::Object* obj = object->AsMirrorPtr();
- mirror::Object* new_obj =
- reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(obj);
- if (new_obj == nullptr) {
- return false;
- }
- if (new_obj != obj) {
- // Write barrier is not necessary since it still points to the same object, just at a different
- // address.
- object->Assign(new_obj);
- }
- return true;
-}
-
-mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) {
- return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
-}
-
void SemiSpace::SweepSystemWeaks() {
TimingLogger::ScopedTiming t(__FUNCTION__, GetTimings());
- Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this);
+ Runtime::Current()->SweepSystemWeaks(this);
}
bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const {
@@ -688,8 +656,7 @@
// Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been
// marked, put it on the appropriate list in the heap for later processing.
void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) {
- heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference,
- &HeapReferenceMarkedCallback, this);
+ heap_->GetReferenceProcessor()->DelayReferenceReferent(klass, reference, this);
}
class SemiSpaceMarkObjectVisitor {
@@ -746,8 +713,7 @@
}
}
-inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+mirror::Object* SemiSpace::IsMarked(mirror::Object* obj) {
// All immune objects are assumed marked.
if (from_space_->HasAddress(obj)) {
// Returns either the forwarding address or null.
@@ -759,6 +725,20 @@
return mark_bitmap_->Test(obj) ? obj : nullptr;
}
+bool SemiSpace::IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) {
+ mirror::Object* obj = object->AsMirrorPtr();
+ mirror::Object* new_obj = IsMarked(obj);
+ if (new_obj == nullptr) {
+ return false;
+ }
+ if (new_obj != obj) {
+ // Write barrier is not necessary since it still points to the same object, just at a different
+ // address.
+ object->Assign(new_obj);
+ }
+ return true;
+}
+
void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
DCHECK(to_space != nullptr);
to_space_ = to_space;
diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h
index 3c25f53..6b7ea0d 100644
--- a/runtime/gc/collector/semi_space.h
+++ b/runtime/gc/collector/semi_space.h
@@ -103,6 +103,12 @@
void MarkObject(mirror::ObjectReference<kPoisonReferences, mirror::Object>* obj_ptr)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+ virtual mirror::Object* MarkObject(mirror::Object* root) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
+ virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* obj_ptr) OVERRIDE
+ EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
+
void ScanObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -140,19 +146,6 @@
const RootInfo& info) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
- static mirror::Object* MarkObjectCallback(mirror::Object* root, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
- static void ProcessMarkStackCallback(void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_);
-
- static void DelayReferenceReferentCallback(mirror::Class* klass, mirror::Reference* ref,
- void* arg)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
-
virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
@@ -163,15 +156,11 @@
protected:
// Returns null if the object is not marked, otherwise returns the forwarding address (same as
// object for non movable things).
- mirror::Object* GetMarkedForwardAddress(mirror::Object* object) const
+ virtual mirror::Object* IsMarked(mirror::Object* object) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
- static bool HeapReferenceMarkedCallback(mirror::HeapReference<mirror::Object>* object, void* arg)
- EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
- SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-
- static mirror::Object* MarkedForwardingAddressCallback(mirror::Object* object, void* arg)
+ virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* object) OVERRIDE
EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_)
SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);