Merge "Revert "Convert libart and dependencies to Android.bp""
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index 4cf5b4f..9feaf41 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -36,7 +36,7 @@
const uintptr_t offset = addr - heap_begin_;
const size_t index = OffsetToIndex(offset);
const uintptr_t mask = OffsetToMask(offset);
- Atomic<uintptr_t>* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[index]);
+ Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index];
DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
uintptr_t old_word;
do {
@@ -58,7 +58,7 @@
DCHECK(bitmap_begin_ != nullptr);
DCHECK_GE(addr, heap_begin_);
const uintptr_t offset = addr - heap_begin_;
- return (bitmap_begin_[OffsetToIndex(offset)] & OffsetToMask(offset)) != 0;
+ return (bitmap_begin_[OffsetToIndex(offset)].LoadRelaxed() & OffsetToMask(offset)) != 0;
}
template<size_t kAlignment> template<typename Visitor>
@@ -116,7 +116,7 @@
// Traverse the middle, full part.
for (size_t i = index_start + 1; i < index_end; ++i) {
- uintptr_t w = bitmap_begin_[i];
+ uintptr_t w = bitmap_begin_[i].LoadRelaxed();
if (w != 0) {
const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
@@ -164,8 +164,8 @@
const size_t index = OffsetToIndex(offset);
const uintptr_t mask = OffsetToMask(offset);
DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_;
- uintptr_t* address = &bitmap_begin_[index];
- uintptr_t old_word = *address;
+ Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index];
+ uintptr_t old_word = atomic_entry->LoadRelaxed();
if (kSetBit) {
// Check the bit before setting the word incase we are trying to mark a read only bitmap
// like an image space bitmap. This bitmap is mapped as read only and will fault if we
@@ -173,10 +173,10 @@
// occur if we check before setting the bit. This also prevents dirty pages that would
// occur if the bitmap was read write and we did not check the bit.
if ((old_word & mask) == 0) {
- *address = old_word | mask;
+ atomic_entry->StoreRelaxed(old_word | mask);
}
} else {
- *address = old_word & ~mask;
+ atomic_entry->StoreRelaxed(old_word & ~mask);
}
DCHECK_EQ(Test(obj), kSetBit);
return (old_word & mask) != 0;
diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc
index b43f77f..3df02ed 100644
--- a/runtime/gc/accounting/space_bitmap.cc
+++ b/runtime/gc/accounting/space_bitmap.cc
@@ -51,7 +51,9 @@
template<size_t kAlignment>
SpaceBitmap<kAlignment>::SpaceBitmap(const std::string& name, MemMap* mem_map, uintptr_t* bitmap_begin,
size_t bitmap_size, const void* heap_begin)
- : mem_map_(mem_map), bitmap_begin_(bitmap_begin), bitmap_size_(bitmap_size),
+ : mem_map_(mem_map),
+ bitmap_begin_(reinterpret_cast<Atomic<uintptr_t>*>(bitmap_begin)),
+ bitmap_size_(bitmap_size),
heap_begin_(reinterpret_cast<uintptr_t>(heap_begin)),
name_(name) {
CHECK(bitmap_begin_ != nullptr);
@@ -104,7 +106,12 @@
template<size_t kAlignment>
void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) {
DCHECK_EQ(Size(), source_bitmap->Size());
- std::copy(source_bitmap->Begin(), source_bitmap->Begin() + source_bitmap->Size() / sizeof(intptr_t), Begin());
+ const size_t count = source_bitmap->Size() / sizeof(intptr_t);
+ Atomic<uintptr_t>* const src = source_bitmap->Begin();
+ Atomic<uintptr_t>* const dest = Begin();
+ for (size_t i = 0; i < count; ++i) {
+ dest[i].StoreRelaxed(src[i].LoadRelaxed());
+ }
}
template<size_t kAlignment>
@@ -113,9 +120,9 @@
CHECK(callback != nullptr);
uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1);
- uintptr_t* bitmap_begin = bitmap_begin_;
+ Atomic<uintptr_t>* bitmap_begin = bitmap_begin_;
for (uintptr_t i = 0; i <= end; ++i) {
- uintptr_t w = bitmap_begin[i];
+ uintptr_t w = bitmap_begin[i].LoadRelaxed();
if (w != 0) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
do {
@@ -160,10 +167,10 @@
size_t start = OffsetToIndex(sweep_begin - live_bitmap.heap_begin_);
size_t end = OffsetToIndex(sweep_end - live_bitmap.heap_begin_ - 1);
CHECK_LT(end, live_bitmap.Size() / sizeof(intptr_t));
- uintptr_t* live = live_bitmap.bitmap_begin_;
- uintptr_t* mark = mark_bitmap.bitmap_begin_;
+ Atomic<uintptr_t>* live = live_bitmap.bitmap_begin_;
+ Atomic<uintptr_t>* mark = mark_bitmap.bitmap_begin_;
for (size_t i = start; i <= end; i++) {
- uintptr_t garbage = live[i] & ~mark[i];
+ uintptr_t garbage = live[i].LoadRelaxed() & ~mark[i].LoadRelaxed();
if (UNLIKELY(garbage != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_;
do {
@@ -251,7 +258,7 @@
uintptr_t end = Size() / sizeof(intptr_t);
for (uintptr_t i = 0; i < end; ++i) {
// Need uint for unsigned shift.
- uintptr_t w = bitmap_begin_[i];
+ uintptr_t w = bitmap_begin_[i].LoadRelaxed();
if (UNLIKELY(w != 0)) {
uintptr_t ptr_base = IndexToOffset(i) + heap_begin_;
while (w != 0) {
diff --git a/runtime/gc/accounting/space_bitmap.h b/runtime/gc/accounting/space_bitmap.h
index b8ff471..829b1b1 100644
--- a/runtime/gc/accounting/space_bitmap.h
+++ b/runtime/gc/accounting/space_bitmap.h
@@ -147,7 +147,7 @@
void CopyFrom(SpaceBitmap* source_bitmap);
// Starting address of our internal storage.
- uintptr_t* Begin() {
+ Atomic<uintptr_t>* Begin() {
return bitmap_begin_;
}
@@ -215,7 +215,7 @@
std::unique_ptr<MemMap> mem_map_;
// This bitmap itself, word sized for efficiency in scanning.
- uintptr_t* const bitmap_begin_;
+ Atomic<uintptr_t>* const bitmap_begin_;
// Size of this bitmap.
size_t bitmap_size_;
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index fb774a4..76f500c 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -34,32 +34,27 @@
// to gray even though the object has already been marked through. This happens if a mutator
// thread gets preempted before the AtomicSetReadBarrierPointer below, GC marks through the
// object (changes it from white to gray and back to white), and the thread runs and
- // incorrectly changes it from white to gray. We need to detect such "false gray" cases and
- // change the objects back to white at the end of marking.
+ // incorrectly changes it from white to gray. If this happens, the object will get added to the
+ // mark stack again and get changed back to white after it is processed.
if (kUseBakerReadBarrier) {
- // Test the bitmap first to reduce the chance of false gray cases.
+ // Test the bitmap first to avoid graying an object that has already been marked through most
+ // of the time.
if (bitmap->Test(ref)) {
return ref;
}
}
// This may or may not succeed, which is ok because the object may already be gray.
- bool cas_success = false;
+ bool success = false;
if (kUseBakerReadBarrier) {
- cas_success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(),
- ReadBarrier::GrayPtr());
- }
- if (bitmap->AtomicTestAndSet(ref)) {
- // Already marked.
- if (kUseBakerReadBarrier &&
- cas_success &&
- // The object could be white here if a thread gets preempted after a success at the
- // above AtomicSetReadBarrierPointer, GC has marked through it, and the thread runs up
- // to this point.
- ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr()) {
- // Register a "false-gray" object to change it from gray to white at the end of marking.
- PushOntoFalseGrayStack(ref);
- }
+ // GC will mark the bitmap when popping from mark stack. If only the GC is touching the bitmap
+ // we can avoid an expensive CAS.
+ // For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
+ // set.
+ success = ref->AtomicSetReadBarrierPointer(ReadBarrier::WhitePtr(), ReadBarrier::GrayPtr());
} else {
+ success = !bitmap->AtomicTestAndSet(ref);
+ }
+ if (success) {
// Newly marked.
if (kUseBakerReadBarrier) {
DCHECK_EQ(ref->GetReadBarrierPointer(), ReadBarrier::GrayPtr());
@@ -99,13 +94,16 @@
return ref;
}
-template<bool kGrayImmuneObject>
+template<bool kGrayImmuneObject, bool kFromGCThread>
inline mirror::Object* ConcurrentCopying::Mark(mirror::Object* from_ref) {
if (from_ref == nullptr) {
return nullptr;
}
DCHECK(heap_->collector_type_ == kCollectorTypeCC);
- if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
+ if (kFromGCThread) {
+ DCHECK(is_active_);
+ DCHECK_EQ(Thread::Current(), thread_running_gc_);
+ } else if (UNLIKELY(kUseBakerReadBarrier && !is_active_)) {
// In the lock word forward address state, the read barrier bits
// in the lock word are part of the stored forwarding address and
// invalid. This is usually OK as the from-space copy of objects
@@ -192,6 +190,16 @@
}
}
+inline bool ConcurrentCopying::IsMarkedInUnevacFromSpace(mirror::Object* from_ref) {
+ // Use load acquire on the read barrier pointer to ensure that we never see a white read barrier
+ // pointer with an unmarked bit due to reordering.
+ DCHECK(region_space_->IsInUnevacFromSpace(from_ref));
+ if (kUseBakerReadBarrier && from_ref->GetReadBarrierPointerAcquire() == ReadBarrier::GrayPtr()) {
+ return true;
+ }
+ return region_space_bitmap_->Test(from_ref);
+}
+
} // namespace collector
} // namespace gc
} // namespace art
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index 42816a0..651669e 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -1302,8 +1302,19 @@
<< " " << to_ref << " " << to_ref->GetReadBarrierPointer()
<< " is_marked=" << IsMarked(to_ref);
}
- // Scan ref fields.
- Scan(to_ref);
+ bool add_to_live_bytes = false;
+ if (region_space_->IsInUnevacFromSpace(to_ref)) {
+ // Mark the bitmap only in the GC thread here so that we don't need a CAS.
+ if (!kUseBakerReadBarrier || !region_space_bitmap_->Set(to_ref)) {
+ // It may be already marked if we accidentally pushed the same object twice due to the racy
+ // bitmap read in MarkUnevacFromSpaceRegion.
+ Scan(to_ref);
+ // Only add to the live bytes if the object was not already marked.
+ add_to_live_bytes = true;
+ }
+ } else {
+ Scan(to_ref);
+ }
if (kUseBakerReadBarrier) {
DCHECK(to_ref->GetReadBarrierPointer() == ReadBarrier::GrayPtr())
<< " " << to_ref << " " << to_ref->GetReadBarrierPointer()
@@ -1332,7 +1343,7 @@
DCHECK(!kUseBakerReadBarrier);
#endif
- if (region_space_->IsInUnevacFromSpace(to_ref)) {
+ if (add_to_live_bytes) {
// Add to the live bytes per unevacuated from space. Note this code is always run by the
// GC-running thread (no synchronization required).
DCHECK(region_space_bitmap_->Test(to_ref));
@@ -1567,7 +1578,7 @@
// OK.
return;
} else if (region_space_->IsInUnevacFromSpace(ref)) {
- CHECK(region_space_bitmap_->Test(ref)) << ref;
+ CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
} else if (region_space_->IsInFromSpace(ref)) {
// Not OK. Do extra logging.
if (obj != nullptr) {
@@ -1614,7 +1625,7 @@
// OK.
return;
} else if (region_space_->IsInUnevacFromSpace(ref)) {
- CHECK(region_space_bitmap_->Test(ref)) << ref;
+ CHECK(IsMarkedInUnevacFromSpace(ref)) << ref;
} else if (region_space_->IsInFromSpace(ref)) {
// Not OK. Do extra logging.
if (gc_root_source == nullptr) {
@@ -1654,7 +1665,7 @@
LOG(INFO) << "holder is in the to-space.";
} else if (region_space_->IsInUnevacFromSpace(obj)) {
LOG(INFO) << "holder is in the unevac from-space.";
- if (region_space_bitmap_->Test(obj)) {
+ if (IsMarkedInUnevacFromSpace(obj)) {
LOG(INFO) << "holder is marked in the region space bitmap.";
} else {
LOG(INFO) << "holder is not marked in the region space bitmap.";
@@ -1783,7 +1794,7 @@
DCHECK_EQ(Thread::Current(), thread_running_gc_);
mirror::Object* ref = obj->GetFieldObject<
mirror::Object, kVerifyNone, kWithoutReadBarrier, false>(offset);
- mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false>(ref);
+ mirror::Object* to_ref = Mark</*kGrayImmuneObject*/false, /*kFromGCThread*/true>(ref);
if (to_ref == ref) {
return;
}
@@ -2126,7 +2137,7 @@
heap_->non_moving_space_->HasAddress(to_ref))
<< "from_ref=" << from_ref << " to_ref=" << to_ref;
} else if (rtype == space::RegionSpace::RegionType::kRegionTypeUnevacFromSpace) {
- if (region_space_bitmap_->Test(from_ref)) {
+ if (IsMarkedInUnevacFromSpace(from_ref)) {
to_ref = from_ref;
} else {
to_ref = nullptr;
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index 5b0e2d6..97f4555 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -104,7 +104,7 @@
DCHECK(ref != nullptr);
return IsMarked(ref) == ref;
}
- template<bool kGrayImmuneObject = true>
+ template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref)
SHARED_REQUIRES(Locks::mutator_lock_)
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
@@ -179,6 +179,8 @@
REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
+ bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
+ SHARED_REQUIRES(Locks::mutator_lock_);
virtual bool IsMarkedHeapReference(mirror::HeapReference<mirror::Object>* field) OVERRIDE
SHARED_REQUIRES(Locks::mutator_lock_);
void SweepSystemWeaks(Thread* self)
diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h
index 0495c95..27f8bd7 100644
--- a/runtime/mirror/object-inl.h
+++ b/runtime/mirror/object-inl.h
@@ -147,6 +147,18 @@
#endif
}
+inline Object* Object::GetReadBarrierPointerAcquire() {
+#ifdef USE_BAKER_READ_BARRIER
+ DCHECK(kUseBakerReadBarrier);
+ LockWord lw(GetFieldAcquire<uint32_t>(OFFSET_OF_OBJECT_MEMBER(Object, monitor_)));
+ return reinterpret_cast<Object*>(lw.ReadBarrierState());
+#else
+ LOG(FATAL) << "Unreachable";
+ UNREACHABLE();
+#endif
+}
+
+
inline uint32_t Object::GetMarkBit() {
#ifdef USE_READ_BARRIER
return GetLockWord(false).MarkBitState();
@@ -814,6 +826,13 @@
}
}
+template<typename kSize>
+inline kSize Object::GetFieldAcquire(MemberOffset field_offset) {
+ const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value();
+ const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
+ return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadAcquire();
+}
+
template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset,
int64_t old_value, int64_t new_value) {
diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h
index 5b129bf..8649294 100644
--- a/runtime/mirror/object.h
+++ b/runtime/mirror/object.h
@@ -93,9 +93,12 @@
template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
void SetClass(Class* new_klass) SHARED_REQUIRES(Locks::mutator_lock_);
- // TODO: Clean this up and change to return int32_t
+ // TODO: Clean these up and change to return int32_t
Object* GetReadBarrierPointer() SHARED_REQUIRES(Locks::mutator_lock_);
+ // Get the read barrier pointer with release semantics, only supported for baker.
+ Object* GetReadBarrierPointerAcquire() SHARED_REQUIRES(Locks::mutator_lock_);
+
#ifndef USE_BAKER_OR_BROOKS_READ_BARRIER
NO_RETURN
#endif
@@ -574,6 +577,10 @@
template<typename kSize, bool kIsVolatile>
ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
SHARED_REQUIRES(Locks::mutator_lock_);
+ // Get a field with acquire semantics.
+ template<typename kSize>
+ ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset)
+ SHARED_REQUIRES(Locks::mutator_lock_);
// Verify the type correctness of stores to fields.
// TODO: This can cause thread suspension and isn't moving GC safe.