Merge changes Icad6d952,Ic41aa804
* changes:
Pretty print RegionSpace::RegionType and RegionSpace::RegionState values.
Stylistic and aesthetic changes.
diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc
index dc66234..451a909 100644
--- a/compiler/jni/jni_compiler_test.cc
+++ b/compiler/jni/jni_compiler_test.cc
@@ -523,7 +523,8 @@
bool ScopedDisableCheckNumStackReferences::sCheckNumStackReferences = true;
-// Check that the handle scope at the start of this block is the same as the handle scope at the end of the block.
+// Check that the handle scope at the start of this block is the same
+// as the handle scope at the end of the block.
struct ScopedCheckHandleScope {
ScopedCheckHandleScope() : handle_scope_(Thread::Current()->GetTopHandleScope()) {
}
diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h
index df9ee8c..a3fd1ba 100644
--- a/runtime/gc/accounting/space_bitmap-inl.h
+++ b/runtime/gc/accounting/space_bitmap-inl.h
@@ -62,7 +62,8 @@
return (bitmap_begin_[OffsetToIndex(offset)].LoadRelaxed() & OffsetToMask(offset)) != 0;
}
-template<size_t kAlignment> template<typename Visitor>
+template<size_t kAlignment>
+template<typename Visitor>
inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin,
uintptr_t visit_end,
Visitor&& visitor) const {
@@ -157,7 +158,8 @@
#endif
}
-template<size_t kAlignment> template<typename Visitor>
+template<size_t kAlignment>
+template<typename Visitor>
void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) {
CHECK(bitmap_begin_ != nullptr);
@@ -177,7 +179,8 @@
}
}
-template<size_t kAlignment> template<bool kSetBit>
+template<size_t kAlignment>
+template<bool kSetBit>
inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) {
uintptr_t addr = reinterpret_cast<uintptr_t>(obj);
DCHECK_GE(addr, heap_begin_);
diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h
index 85a656e..20e7545 100644
--- a/runtime/gc/collector/concurrent_copying-inl.h
+++ b/runtime/gc/collector/concurrent_copying-inl.h
@@ -52,7 +52,8 @@
// we can avoid an expensive CAS.
// For the baker case, an object is marked if either the mark bit marked or the bitmap bit is
// set.
- success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(), ReadBarrier::GrayState());
+ success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(),
+ /* rb_state */ ReadBarrier::GrayState());
} else {
success = !bitmap->AtomicTestAndSet(ref);
}
@@ -86,8 +87,8 @@
return ref;
}
// This may or may not succeed, which is ok because the object may already be gray.
- bool success = ref->AtomicSetReadBarrierState(ReadBarrier::WhiteState(),
- ReadBarrier::GrayState());
+ bool success = ref->AtomicSetReadBarrierState(/* expected_rb_state */ ReadBarrier::WhiteState(),
+ /* rb_state */ ReadBarrier::GrayState());
if (success) {
MutexLock mu(Thread::Current(), immune_gray_stack_lock_);
immune_gray_stack_.push_back(ref);
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index e925d42..8bb36c9 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -347,8 +347,9 @@
// This must come before the revoke.
size_t thread_local_objects = thread->GetThreadLocalObjectsAllocated();
concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
- reinterpret_cast<Atomic<size_t>*>(&concurrent_copying_->from_space_num_objects_at_first_pause_)->
- FetchAndAddSequentiallyConsistent(thread_local_objects);
+ reinterpret_cast<Atomic<size_t>*>(
+ &concurrent_copying_->from_space_num_objects_at_first_pause_)->
+ FetchAndAddSequentiallyConsistent(thread_local_objects);
} else {
concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread);
}
@@ -1534,7 +1535,8 @@
!IsInToSpace(referent)))) {
// Leave this reference gray in the queue so that GetReferent() will trigger a read barrier. We
// will change it to white later in ReferenceQueue::DequeuePendingReference().
- DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr) << "Left unenqueued ref gray " << to_ref;
+ DCHECK(to_ref->AsReference()->GetPendingNext() != nullptr)
+ << "Left unenqueued ref gray " << to_ref;
} else {
// We may occasionally leave a reference white in the queue if its referent happens to be
// concurrently marked after the Scan() call above has enqueued the Reference, in which case the
@@ -1552,7 +1554,7 @@
#endif
if (add_to_live_bytes) {
- // Add to the live bytes per unevacuated from space. Note this code is always run by the
+ // Add to the live bytes per unevacuated from-space. Note this code is always run by the
// GC-running thread (no synchronization required).
DCHECK(region_space_bitmap_->Test(to_ref));
size_t obj_size = to_ref->SizeOf<kDefaultVerifyFlags>();
@@ -1774,17 +1776,20 @@
if (kVerboseMode) {
LOG(INFO) << "RecordFree:"
<< " from_bytes=" << from_bytes << " from_objects=" << from_objects
- << " unevac_from_bytes=" << unevac_from_bytes << " unevac_from_objects=" << unevac_from_objects
+ << " unevac_from_bytes=" << unevac_from_bytes
+ << " unevac_from_objects=" << unevac_from_objects
<< " to_bytes=" << to_bytes << " to_objects=" << to_objects
<< " freed_bytes=" << freed_bytes << " freed_objects=" << freed_objects
<< " from_space size=" << region_space_->FromSpaceSize()
<< " unevac_from_space size=" << region_space_->UnevacFromSpaceSize()
<< " to_space size=" << region_space_->ToSpaceSize();
- LOG(INFO) << "(before) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
+ LOG(INFO) << "(before) num_bytes_allocated="
+ << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
}
RecordFree(ObjectBytePair(freed_objects, freed_bytes));
if (kVerboseMode) {
- LOG(INFO) << "(after) num_bytes_allocated=" << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
+ LOG(INFO) << "(after) num_bytes_allocated="
+ << heap_->num_bytes_allocated_.LoadSequentiallyConsistent();
}
}
@@ -2051,11 +2056,13 @@
(is_los && los_bitmap->Test(ref))) {
// OK.
} else {
- // If ref is on the allocation stack, then it may not be
+ // If `ref` is on the allocation stack, then it may not be
// marked live, but considered marked/alive (but not
// necessarily on the live stack).
- CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack. "
- << "obj=" << obj << " ref=" << ref;
+ CHECK(IsOnAllocStack(ref)) << "Unmarked ref that's not on the allocation stack."
+ << " obj=" << obj
+ << " ref=" << ref
+ << " is_los=" << std::boolalpha << is_los << std::noboolalpha;
}
}
}
@@ -2136,7 +2143,7 @@
// It was updated by the mutator.
break;
}
- // Use release cas to make sure threads reading the reference see contents of copied objects.
+ // Use release CAS to make sure threads reading the reference see contents of copied objects.
} while (!obj->CasFieldWeakReleaseObjectWithoutWriteBarrier<false, false, kVerifyNone>(
offset,
expected_ref,
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f22d025..d8d215b 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1899,10 +1899,10 @@
MutexLock mu(self, *gc_complete_lock_);
// Ensure there is only one GC at a time.
WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
- // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
- // is non zero.
- // If the collector type changed to something which doesn't benefit from homogeneous space compaction,
- // exit.
+ // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable
+ // count is non zero.
+ // If the collector type changed to something which doesn't benefit from homogeneous space
+ // compaction, exit.
if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_) ||
!main_space_->CanMoveObjects()) {
return kErrorReject;
@@ -3445,8 +3445,8 @@
TraceHeapSize(bytes_allocated);
uint64_t target_size;
collector::GcType gc_type = collector_ran->GetGcType();
- const double multiplier = HeapGrowthMultiplier(); // Use the multiplier to grow more for
- // foreground.
+ // Use the multiplier to grow more for foreground.
+ const double multiplier = HeapGrowthMultiplier();
const uint64_t adjusted_min_free = static_cast<uint64_t>(min_free_ * multiplier);
const uint64_t adjusted_max_free = static_cast<uint64_t>(max_free_ * multiplier);
if (gc_type != collector::kGcTypeSticky) {
diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h
index e74e9b1..305f0bc 100644
--- a/runtime/gc/space/region_space-inl.h
+++ b/runtime/gc/space/region_space-inl.h
@@ -24,26 +24,30 @@
namespace gc {
namespace space {
-inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+inline mirror::Object* RegionSpace::Alloc(Thread* self ATTRIBUTE_UNUSED,
+ size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated) {
num_bytes = RoundUp(num_bytes, kAlignment);
return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
bytes_tl_bulk_allocated);
}
-inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
- size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self,
+ size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated) {
Locks::mutator_lock_->AssertExclusiveHeld(self);
return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
}
template<bool kForEvac>
-inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated) {
DCHECK_ALIGNED(num_bytes, kAlignment);
mirror::Object* obj;
if (LIKELY(num_bytes <= kRegionSize)) {
@@ -79,8 +83,7 @@
}
} else {
// Large object.
- obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
- bytes_tl_bulk_allocated);
+ obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
if (LIKELY(obj != nullptr)) {
return obj;
}
@@ -88,9 +91,10 @@
return nullptr;
}
-inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated) {
DCHECK(IsAllocated() && IsInToSpace());
DCHECK_ALIGNED(num_bytes, kAlignment);
uint8_t* old_top;
@@ -238,9 +242,9 @@
template<bool kForEvac>
inline mirror::Object* RegionSpace::AllocLarge(size_t num_bytes,
- size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) {
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated) {
DCHECK_ALIGNED(num_bytes, kAlignment);
DCHECK_GT(num_bytes, kRegionSize);
size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
@@ -270,7 +274,7 @@
}
}
if (found) {
- // right points to the one region past the last free region.
+ // `right` points to the one region past the last free region.
DCHECK_EQ(left + num_regs, right);
Region* first_reg = ®ions_[left];
DCHECK(first_reg->IsFree());
@@ -345,7 +349,7 @@
DCHECK_EQ(begin_, Top());
return 0;
} else {
- DCHECK(IsAllocated()) << static_cast<uint>(state_);
+ DCHECK(IsAllocated()) << "state=" << state_;
DCHECK_LE(begin_, Top());
size_t bytes;
if (is_a_tlab_) {
@@ -358,6 +362,20 @@
}
}
+inline size_t RegionSpace::Region::ObjectsAllocated() const {
+ if (IsLarge()) {
+ DCHECK_LT(begin_ + kRegionSize, Top());
+ DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
+ return 1;
+ } else if (IsLargeTail()) {
+ DCHECK_EQ(begin_, Top());
+ DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
+ return 0;
+ } else {
+ DCHECK(IsAllocated()) << "state=" << state_;
+ return objects_allocated_;
+ }
+}
} // namespace space
} // namespace gc
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index d58b76b..8f9f1a9 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -27,7 +27,7 @@
// If a region has live objects whose size is less than this percent
// value of the region size, evaculate the region.
-static constexpr uint kEvaculateLivePercentThreshold = 75U;
+static constexpr uint kEvacuateLivePercentThreshold = 75U;
// If we protect the cleared regions.
// Only protect for target builds to prevent flaky test failures (b/63131961).
@@ -165,7 +165,7 @@
if (is_newly_allocated_) {
result = true;
} else {
- bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1);
+ bool is_live_percent_valid = (live_bytes_ != static_cast<size_t>(-1));
if (is_live_percent_valid) {
DCHECK(IsInToSpace());
DCHECK(!IsLargeTail());
@@ -177,10 +177,10 @@
// Side node: live_percent == 0 does not necessarily mean
// there's no live objects due to rounding (there may be a
// few).
- result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated;
+ result = (live_bytes_ * 100U < kEvacuateLivePercentThreshold * bytes_allocated);
} else {
DCHECK(IsLarge());
- result = live_bytes_ == 0U;
+ result = (live_bytes_ == 0U);
}
} else {
result = false;
@@ -260,7 +260,8 @@
}
}
-void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
+void RegionSpace::ClearFromSpace(/* out */ uint64_t* cleared_bytes,
+ /* out */ uint64_t* cleared_objects) {
DCHECK(cleared_bytes != nullptr);
DCHECK(cleared_objects != nullptr);
*cleared_bytes = 0;
@@ -432,7 +433,7 @@
void RegionSpace::Dump(std::ostream& os) const {
os << GetName() << " "
- << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
+ << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
}
void RegionSpace::DumpRegionForObject(std::ostream& os, mirror::Object* obj) {
@@ -532,13 +533,18 @@
}
void RegionSpace::Region::Dump(std::ostream& os) const {
- os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-"
- << reinterpret_cast<void*>(Top())
+ os << "Region[" << idx_ << "]="
+ << reinterpret_cast<void*>(begin_)
+ << "-" << reinterpret_cast<void*>(Top())
<< "-" << reinterpret_cast<void*>(end_)
- << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_)
+ << " state=" << state_
+ << " type=" << type_
<< " objects_allocated=" << objects_allocated_
- << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_
- << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
+ << " alloc_time=" << alloc_time_
+ << " live_bytes=" << live_bytes_
+ << " is_newly_allocated=" << std::boolalpha << is_newly_allocated_ << std::noboolalpha
+ << " is_a_tlab=" << std::boolalpha << is_a_tlab_ << std::noboolalpha
+ << " thread=" << thread_ << '\n';
}
size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index b4970ed..d026ffd 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -46,24 +46,33 @@
static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
static RegionSpace* Create(const std::string& name, MemMap* mem_map);
- // Allocate num_bytes, returns null if the space is full.
- mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ // Allocate `num_bytes`, returns null if the space is full.
+ mirror::Object* Alloc(Thread* self,
+ size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated)
OVERRIDE REQUIRES(!region_lock_);
// Thread-unsafe allocation for when mutators are suspended, used by the semispace collector.
- mirror::Object* AllocThreadUnsafe(Thread* self, size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size, size_t* bytes_tl_bulk_allocated)
+ mirror::Object* AllocThreadUnsafe(Thread* self,
+ size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated)
OVERRIDE REQUIRES(Locks::mutator_lock_) REQUIRES(!region_lock_);
// The main allocation routine.
template<bool kForEvac>
- ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated)
+ ALWAYS_INLINE mirror::Object* AllocNonvirtual(size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated)
REQUIRES(!region_lock_);
// Allocate/free large objects (objects that are larger than the region size).
template<bool kForEvac>
- mirror::Object* AllocLarge(size_t num_bytes, size_t* bytes_allocated, size_t* usable_size,
- size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
+ mirror::Object* AllocLarge(size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated) REQUIRES(!region_lock_);
template<bool kForEvac>
void FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) REQUIRES(!region_lock_);
@@ -176,7 +185,7 @@
template <typename Visitor>
ALWAYS_INLINE void WalkToSpace(Visitor&& visitor)
REQUIRES(Locks::mutator_lock_) {
- WalkInternal<true>(visitor);
+ WalkInternal<true /* kToSpaceOnly */>(visitor);
}
accounting::ContinuousSpaceBitmap::SweepCallback* GetSweepCallback() OVERRIDE {
@@ -236,7 +245,8 @@
size_t FromSpaceSize() REQUIRES(!region_lock_);
size_t UnevacFromSpaceSize() REQUIRES(!region_lock_);
size_t ToSpaceSize() REQUIRES(!region_lock_);
- void ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) REQUIRES(!region_lock_);
+ void ClearFromSpace(/* out */ uint64_t* cleared_bytes, /* out */ uint64_t* cleared_objects)
+ REQUIRES(!region_lock_);
void AddLiveBytes(mirror::Object* ref, size_t alloc_size) {
Region* reg = RefToRegionUnlocked(ref);
@@ -303,12 +313,13 @@
void Clear(bool zero_and_release_pages);
- ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes, size_t* bytes_allocated,
- size_t* usable_size,
- size_t* bytes_tl_bulk_allocated);
+ ALWAYS_INLINE mirror::Object* Alloc(size_t num_bytes,
+ /* out */ size_t* bytes_allocated,
+ /* out */ size_t* usable_size,
+ /* out */ size_t* bytes_tl_bulk_allocated);
bool IsFree() const {
- bool is_free = state_ == RegionState::kRegionStateFree;
+ bool is_free = (state_ == RegionState::kRegionStateFree);
if (is_free) {
DCHECK(IsInNoSpace());
DCHECK_EQ(begin_, Top());
@@ -341,7 +352,7 @@
// Large allocated.
bool IsLarge() const {
- bool is_large = state_ == RegionState::kRegionStateLarge;
+ bool is_large = (state_ == RegionState::kRegionStateLarge);
if (is_large) {
DCHECK_LT(begin_ + kRegionSize, Top());
}
@@ -350,7 +361,7 @@
// Large-tail allocated.
bool IsLargeTail() const {
- bool is_large_tail = state_ == RegionState::kRegionStateLargeTail;
+ bool is_large_tail = (state_ == RegionState::kRegionStateLargeTail);
if (is_large_tail) {
DCHECK_EQ(begin_, Top());
}
@@ -420,20 +431,7 @@
size_t BytesAllocated() const;
- size_t ObjectsAllocated() const {
- if (IsLarge()) {
- DCHECK_LT(begin_ + kRegionSize, Top());
- DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
- return 1;
- } else if (IsLargeTail()) {
- DCHECK_EQ(begin_, Top());
- DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U);
- return 0;
- } else {
- DCHECK(IsAllocated()) << static_cast<uint>(state_);
- return objects_allocated_;
- }
- }
+ size_t ObjectsAllocated() const;
uint8_t* Begin() const {
return begin_;
@@ -553,12 +551,15 @@
std::unique_ptr<Region[]> regions_ GUARDED_BY(region_lock_);
// The pointer to the region array.
+
// The upper-bound index of the non-free regions. Used to avoid scanning all regions in
- // SetFromSpace(). Invariant: for all i >= non_free_region_index_limit_, regions_[i].IsFree() is
- // true.
+ // RegionSpace::SetFromSpace and RegionSpace::ClearFromSpace.
+ //
+ // Invariant (verified by RegionSpace::VerifyNonFreeRegionLimit):
+ // for all `i >= non_free_region_index_limit_`, `regions_[i].IsFree()` is true.
size_t non_free_region_index_limit_ GUARDED_BY(region_lock_);
- Region* current_region_; // The region that's being allocated currently.
- Region* evac_region_; // The region that's being evacuated to currently.
+ Region* current_region_; // The region that's being currently allocated.
+ Region* evac_region_; // The region that's being currently evacuated to.
Region full_region_; // The dummy/sentinel region that looks full.
// Mark bitmap used by the GC.
diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc
index 2c6afa7..82f9905 100644
--- a/runtime/gc/space/space.cc
+++ b/runtime/gc/space/space.cc
@@ -125,7 +125,7 @@
void ContinuousMemMapAllocSpace::UnBindBitmaps() {
CHECK(HasBoundBitmaps());
- // At this point, the temp_bitmap holds our old mark bitmap.
+ // At this point, `temp_bitmap_` holds our old mark bitmap.
accounting::ContinuousSpaceBitmap* new_bitmap = temp_bitmap_.release();
Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap_.get(), new_bitmap);
CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get());
diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h
index 6b76048..964824a 100644
--- a/runtime/gc/space/space.h
+++ b/runtime/gc/space/space.h
@@ -389,8 +389,8 @@
}
protected:
- MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end, uint8_t* limit,
- GcRetentionPolicy gc_retention_policy)
+ MemMapSpace(const std::string& name, MemMap* mem_map, uint8_t* begin, uint8_t* end,
+ uint8_t* limit, GcRetentionPolicy gc_retention_policy)
: ContinuousSpace(name, gc_retention_policy, begin, end, limit),
mem_map_(mem_map) {
}
diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h
index 8180222..39a1db8 100644
--- a/runtime/interpreter/interpreter_common.h
+++ b/runtime/interpreter/interpreter_common.h
@@ -176,7 +176,8 @@
}
const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
const uint32_t vregC = (is_range) ? inst->VRegC_3rc() : inst->VRegC_35c();
- ObjPtr<mirror::Object> receiver = (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
+ ObjPtr<mirror::Object> receiver =
+ (type == kStatic) ? nullptr : shadow_frame.GetVRegReference(vregC);
ArtMethod* sf_method = shadow_frame.GetMethod();
ArtMethod* const called_method = FindMethodFromCode<type, do_access_check>(
method_idx, &receiver, sf_method, self);
@@ -645,7 +646,7 @@
// Explicitly instantiate all DoInvokeVirtualQuick functions.
#define EXPLICIT_DO_INVOKE_VIRTUAL_QUICK_TEMPLATE_DECL(_is_range) \
- template REQUIRES_SHARED(Locks::mutator_lock_) \
+ template REQUIRES_SHARED(Locks::mutator_lock_) \
bool DoInvokeVirtualQuick<_is_range>(Thread* self, ShadowFrame& shadow_frame, \
const Instruction* inst, uint16_t inst_data, \
JValue* result)
diff --git a/runtime/lock_word.h b/runtime/lock_word.h
index fac1a75..e89beb6 100644
--- a/runtime/lock_word.h
+++ b/runtime/lock_word.h
@@ -34,7 +34,9 @@
/* The lock value itself as stored in mirror::Object::monitor_. The two most significant bits of
* the state. The four possible states are fat locked, thin/unlocked, hash code, and forwarding
- * address. When the lock word is in the "thin" state and its bits are formatted as follows:
+ * address.
+ *
+ * When the lock word is in the "thin" state and its bits are formatted as follows:
*
* |33|2|2|222222221111|1111110000000000|
* |10|9|8|765432109876|5432109876543210|
@@ -59,7 +61,7 @@
* |11|0| ForwardingAddress |
*
* The `r` bit stores the read barrier state.
- * The `m` bit stores the mark state.
+ * The `m` bit stores the mark bit state.
*/
class LockWord {
public:
diff --git a/runtime/mirror/object-readbarrier-inl.h b/runtime/mirror/object-readbarrier-inl.h
index d81fff0..126cb04 100644
--- a/runtime/mirror/object-readbarrier-inl.h
+++ b/runtime/mirror/object-readbarrier-inl.h
@@ -187,7 +187,7 @@
expected_lw = lw;
new_lw = lw;
new_lw.SetMarkBitState(mark_bit);
- // Since this is only set from the mutator, we can use the non release Cas.
+ // Since this is only set from the mutator, we can use the non-release CAS.
} while (!CasLockWordWeakRelaxed(expected_lw, new_lw));
return true;
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7136fee..2ee7f9d 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -663,8 +663,8 @@
child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
stack_size = FixStackSize(stack_size);
- // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
- // assign it.
+ // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing
+ // to assign it.
env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
reinterpret_cast<jlong>(child_thread));
@@ -839,7 +839,8 @@
if (create_peer) {
self->CreatePeer(thread_name, as_daemon, thread_group);
if (self->IsExceptionPending()) {
- // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it.
+ // We cannot keep the exception around, as we're deleting self. Try to be helpful and log
+ // it.
{
ScopedObjectAccess soa(self);
LOG(ERROR) << "Exception creating thread peer:";
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index 59c9022..bb6ace1 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -667,7 +667,8 @@
if [ "$HOST" = "y" ]; then
max_filename_size=$(getconf NAME_MAX $DEX_LOCATION)
else
- # There is no getconf on device, fallback to standard value. See NAME_MAX in kernel <linux/limits.h>
+ # There is no getconf on device, fallback to standard value.
+ # See NAME_MAX in kernel <linux/limits.h>
max_filename_size=255
fi
# Compute VDEX_NAME.