Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "bump_pointer_space.h" |
| 18 | #include "bump_pointer_space-inl.h" |
| 19 | #include "mirror/object-inl.h" |
| 20 | #include "mirror/class-inl.h" |
| 21 | #include "thread_list.h" |
| 22 | |
| 23 | namespace art { |
| 24 | namespace gc { |
| 25 | namespace space { |
| 26 | |
| 27 | // If a region has live objects whose size is less than this percent |
| 28 | // value of the region size, evaculate the region. |
| 29 | static constexpr uint kEvaculateLivePercentThreshold = 75U; |
| 30 | |
| 31 | RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity, |
| 32 | uint8_t* requested_begin) { |
| 33 | capacity = RoundUp(capacity, kRegionSize); |
| 34 | std::string error_msg; |
| 35 | std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity, |
Vladimir Marko | 5c42c29 | 2015-02-25 12:02:49 +0000 | [diff] [blame] | 36 | PROT_READ | PROT_WRITE, true, false, |
| 37 | &error_msg)); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 38 | if (mem_map.get() == nullptr) { |
| 39 | LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " |
| 40 | << PrettySize(capacity) << " with message " << error_msg; |
| 41 | MemMap::DumpMaps(LOG(ERROR)); |
| 42 | return nullptr; |
| 43 | } |
| 44 | return new RegionSpace(name, mem_map.release()); |
| 45 | } |
| 46 | |
| 47 | RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map) |
| 48 | : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(), |
| 49 | kGcRetentionPolicyAlwaysCollect), |
| 50 | region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) { |
| 51 | size_t mem_map_size = mem_map->Size(); |
| 52 | CHECK_ALIGNED(mem_map_size, kRegionSize); |
| 53 | CHECK_ALIGNED(mem_map->Begin(), kRegionSize); |
| 54 | num_regions_ = mem_map_size / kRegionSize; |
| 55 | num_non_free_regions_ = 0U; |
| 56 | DCHECK_GT(num_regions_, 0U); |
| 57 | regions_.reset(new Region[num_regions_]); |
| 58 | uint8_t* region_addr = mem_map->Begin(); |
| 59 | for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) { |
| 60 | regions_[i] = Region(i, region_addr, region_addr + kRegionSize); |
| 61 | } |
| 62 | if (kIsDebugBuild) { |
| 63 | CHECK_EQ(regions_[0].Begin(), Begin()); |
| 64 | for (size_t i = 0; i < num_regions_; ++i) { |
| 65 | CHECK(regions_[i].IsFree()); |
| 66 | CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize); |
| 67 | if (i + 1 < num_regions_) { |
| 68 | CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin()); |
| 69 | } |
| 70 | } |
| 71 | CHECK_EQ(regions_[num_regions_ - 1].End(), Limit()); |
| 72 | } |
| 73 | full_region_ = Region(); |
| 74 | DCHECK(!full_region_.IsFree()); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 75 | DCHECK(full_region_.IsAllocated()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 76 | current_region_ = &full_region_; |
| 77 | evac_region_ = nullptr; |
| 78 | size_t ignored; |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 79 | DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 80 | } |
| 81 | |
| 82 | size_t RegionSpace::FromSpaceSize() { |
| 83 | uint64_t num_regions = 0; |
| 84 | MutexLock mu(Thread::Current(), region_lock_); |
| 85 | for (size_t i = 0; i < num_regions_; ++i) { |
| 86 | Region* r = ®ions_[i]; |
| 87 | if (r->IsInFromSpace()) { |
| 88 | ++num_regions; |
| 89 | } |
| 90 | } |
| 91 | return num_regions * kRegionSize; |
| 92 | } |
| 93 | |
| 94 | size_t RegionSpace::UnevacFromSpaceSize() { |
| 95 | uint64_t num_regions = 0; |
| 96 | MutexLock mu(Thread::Current(), region_lock_); |
| 97 | for (size_t i = 0; i < num_regions_; ++i) { |
| 98 | Region* r = ®ions_[i]; |
| 99 | if (r->IsInUnevacFromSpace()) { |
| 100 | ++num_regions; |
| 101 | } |
| 102 | } |
| 103 | return num_regions * kRegionSize; |
| 104 | } |
| 105 | |
| 106 | size_t RegionSpace::ToSpaceSize() { |
| 107 | uint64_t num_regions = 0; |
| 108 | MutexLock mu(Thread::Current(), region_lock_); |
| 109 | for (size_t i = 0; i < num_regions_; ++i) { |
| 110 | Region* r = ®ions_[i]; |
| 111 | if (r->IsInToSpace()) { |
| 112 | ++num_regions; |
| 113 | } |
| 114 | } |
| 115 | return num_regions * kRegionSize; |
| 116 | } |
| 117 | |
| 118 | inline bool RegionSpace::Region::ShouldBeEvacuated() { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 119 | DCHECK((IsAllocated() || IsLarge()) && IsInToSpace()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 120 | // if the region was allocated after the start of the |
| 121 | // previous GC or the live ratio is below threshold, evacuate |
| 122 | // it. |
| 123 | bool result; |
| 124 | if (is_newly_allocated_) { |
| 125 | result = true; |
| 126 | } else { |
| 127 | bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1); |
| 128 | if (is_live_percent_valid) { |
| 129 | uint live_percent = GetLivePercent(); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 130 | if (IsAllocated()) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 131 | // Side node: live_percent == 0 does not necessarily mean |
| 132 | // there's no live objects due to rounding (there may be a |
| 133 | // few). |
| 134 | result = live_percent < kEvaculateLivePercentThreshold; |
| 135 | } else { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 136 | DCHECK(IsLarge()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 137 | result = live_percent == 0U; |
| 138 | } |
| 139 | } else { |
| 140 | result = false; |
| 141 | } |
| 142 | } |
| 143 | return result; |
| 144 | } |
| 145 | |
| 146 | // Determine which regions to evacuate and mark them as |
| 147 | // from-space. Mark the rest as unevacuated from-space. |
| 148 | void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) { |
| 149 | ++time_; |
| 150 | if (kUseTableLookupReadBarrier) { |
| 151 | DCHECK(rb_table->IsAllCleared()); |
| 152 | rb_table->SetAll(); |
| 153 | } |
| 154 | MutexLock mu(Thread::Current(), region_lock_); |
| 155 | size_t num_expected_large_tails = 0; |
| 156 | bool prev_large_evacuated = false; |
| 157 | for (size_t i = 0; i < num_regions_; ++i) { |
| 158 | Region* r = ®ions_[i]; |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 159 | RegionState state = r->State(); |
| 160 | RegionType type = r->Type(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 161 | if (!r->IsFree()) { |
| 162 | DCHECK(r->IsInToSpace()); |
| 163 | if (LIKELY(num_expected_large_tails == 0U)) { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 164 | DCHECK((state == RegionState::kRegionStateAllocated || |
| 165 | state == RegionState::kRegionStateLarge) && |
| 166 | type == RegionType::kRegionTypeToSpace); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 167 | bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated(); |
| 168 | if (should_evacuate) { |
| 169 | r->SetAsFromSpace(); |
| 170 | DCHECK(r->IsInFromSpace()); |
| 171 | } else { |
| 172 | r->SetAsUnevacFromSpace(); |
| 173 | DCHECK(r->IsInUnevacFromSpace()); |
| 174 | } |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 175 | if (UNLIKELY(state == RegionState::kRegionStateLarge && |
| 176 | type == RegionType::kRegionTypeToSpace)) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 177 | prev_large_evacuated = should_evacuate; |
| 178 | num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1; |
| 179 | DCHECK_GT(num_expected_large_tails, 0U); |
| 180 | } |
| 181 | } else { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 182 | DCHECK(state == RegionState::kRegionStateLargeTail && |
| 183 | type == RegionType::kRegionTypeToSpace); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 184 | if (prev_large_evacuated) { |
| 185 | r->SetAsFromSpace(); |
| 186 | DCHECK(r->IsInFromSpace()); |
| 187 | } else { |
| 188 | r->SetAsUnevacFromSpace(); |
| 189 | DCHECK(r->IsInUnevacFromSpace()); |
| 190 | } |
| 191 | --num_expected_large_tails; |
| 192 | } |
| 193 | } else { |
| 194 | DCHECK_EQ(num_expected_large_tails, 0U); |
| 195 | if (kUseTableLookupReadBarrier) { |
| 196 | // Clear the rb table for to-space regions. |
| 197 | rb_table->Clear(r->Begin(), r->End()); |
| 198 | } |
| 199 | } |
| 200 | } |
| 201 | current_region_ = &full_region_; |
| 202 | evac_region_ = &full_region_; |
| 203 | } |
| 204 | |
| 205 | void RegionSpace::ClearFromSpace() { |
| 206 | MutexLock mu(Thread::Current(), region_lock_); |
| 207 | for (size_t i = 0; i < num_regions_; ++i) { |
| 208 | Region* r = ®ions_[i]; |
| 209 | if (r->IsInFromSpace()) { |
| 210 | r->Clear(); |
| 211 | --num_non_free_regions_; |
| 212 | } else if (r->IsInUnevacFromSpace()) { |
| 213 | r->SetUnevacFromSpaceAsToSpace(); |
| 214 | } |
| 215 | } |
| 216 | evac_region_ = nullptr; |
| 217 | } |
| 218 | |
| 219 | void RegionSpace::AssertAllRegionLiveBytesZeroOrCleared() { |
| 220 | if (kIsDebugBuild) { |
| 221 | MutexLock mu(Thread::Current(), region_lock_); |
| 222 | for (size_t i = 0; i < num_regions_; ++i) { |
| 223 | Region* r = ®ions_[i]; |
| 224 | size_t live_bytes = r->LiveBytes(); |
| 225 | CHECK(live_bytes == 0U || live_bytes == static_cast<size_t>(-1)) << live_bytes; |
| 226 | } |
| 227 | } |
| 228 | } |
| 229 | |
| 230 | void RegionSpace::LogFragmentationAllocFailure(std::ostream& os, |
| 231 | size_t /* failed_alloc_bytes */) { |
| 232 | size_t max_contiguous_allocation = 0; |
| 233 | MutexLock mu(Thread::Current(), region_lock_); |
| 234 | if (current_region_->End() - current_region_->Top() > 0) { |
| 235 | max_contiguous_allocation = current_region_->End() - current_region_->Top(); |
| 236 | } |
| 237 | if (num_non_free_regions_ * 2 < num_regions_) { |
| 238 | // We reserve half of the regions for evaluation only. If we |
| 239 | // occupy more than half the regions, do not report the free |
| 240 | // regions as available. |
| 241 | size_t max_contiguous_free_regions = 0; |
| 242 | size_t num_contiguous_free_regions = 0; |
| 243 | bool prev_free_region = false; |
| 244 | for (size_t i = 0; i < num_regions_; ++i) { |
| 245 | Region* r = ®ions_[i]; |
| 246 | if (r->IsFree()) { |
| 247 | if (!prev_free_region) { |
| 248 | CHECK_EQ(num_contiguous_free_regions, 0U); |
| 249 | prev_free_region = true; |
| 250 | } |
| 251 | ++num_contiguous_free_regions; |
| 252 | } else { |
| 253 | if (prev_free_region) { |
| 254 | CHECK_NE(num_contiguous_free_regions, 0U); |
| 255 | max_contiguous_free_regions = std::max(max_contiguous_free_regions, |
| 256 | num_contiguous_free_regions); |
| 257 | num_contiguous_free_regions = 0U; |
| 258 | prev_free_region = false; |
| 259 | } |
| 260 | } |
| 261 | } |
| 262 | max_contiguous_allocation = std::max(max_contiguous_allocation, |
| 263 | max_contiguous_free_regions * kRegionSize); |
| 264 | } |
| 265 | os << "; failed due to fragmentation (largest possible contiguous allocation " |
| 266 | << max_contiguous_allocation << " bytes)"; |
| 267 | // Caller's job to print failed_alloc_bytes. |
| 268 | } |
| 269 | |
| 270 | void RegionSpace::Clear() { |
| 271 | MutexLock mu(Thread::Current(), region_lock_); |
| 272 | for (size_t i = 0; i < num_regions_; ++i) { |
| 273 | Region* r = ®ions_[i]; |
| 274 | if (!r->IsFree()) { |
| 275 | --num_non_free_regions_; |
| 276 | } |
| 277 | r->Clear(); |
| 278 | } |
| 279 | current_region_ = &full_region_; |
| 280 | evac_region_ = &full_region_; |
| 281 | } |
| 282 | |
| 283 | void RegionSpace::Dump(std::ostream& os) const { |
| 284 | os << GetName() << " " |
| 285 | << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit()); |
| 286 | } |
| 287 | |
| 288 | void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) { |
| 289 | DCHECK(Contains(large_obj)); |
Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 290 | DCHECK_ALIGNED(large_obj, kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 291 | MutexLock mu(Thread::Current(), region_lock_); |
| 292 | uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj); |
| 293 | uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize); |
| 294 | CHECK_LT(begin_addr, end_addr); |
| 295 | for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) { |
| 296 | Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr)); |
| 297 | if (addr == begin_addr) { |
| 298 | DCHECK(reg->IsLarge()); |
| 299 | } else { |
| 300 | DCHECK(reg->IsLargeTail()); |
| 301 | } |
| 302 | reg->Clear(); |
| 303 | --num_non_free_regions_; |
| 304 | } |
| 305 | if (end_addr < Limit()) { |
| 306 | // If we aren't at the end of the space, check that the next region is not a large tail. |
| 307 | Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr)); |
| 308 | DCHECK(!following_reg->IsLargeTail()); |
| 309 | } |
| 310 | } |
| 311 | |
| 312 | void RegionSpace::DumpRegions(std::ostream& os) { |
| 313 | MutexLock mu(Thread::Current(), region_lock_); |
| 314 | for (size_t i = 0; i < num_regions_; ++i) { |
| 315 | regions_[i].Dump(os); |
| 316 | } |
| 317 | } |
| 318 | |
| 319 | void RegionSpace::DumpNonFreeRegions(std::ostream& os) { |
| 320 | MutexLock mu(Thread::Current(), region_lock_); |
| 321 | for (size_t i = 0; i < num_regions_; ++i) { |
| 322 | Region* reg = ®ions_[i]; |
| 323 | if (!reg->IsFree()) { |
| 324 | reg->Dump(os); |
| 325 | } |
| 326 | } |
| 327 | } |
| 328 | |
| 329 | void RegionSpace::RecordAlloc(mirror::Object* ref) { |
| 330 | CHECK(ref != nullptr); |
| 331 | Region* r = RefToRegion(ref); |
| 332 | reinterpret_cast<Atomic<uint64_t>*>(&r->objects_allocated_)->FetchAndAddSequentiallyConsistent(1); |
| 333 | } |
| 334 | |
| 335 | bool RegionSpace::AllocNewTlab(Thread* self) { |
| 336 | MutexLock mu(self, region_lock_); |
| 337 | RevokeThreadLocalBuffersLocked(self); |
| 338 | // Retain sufficient free regions for full evacuation. |
| 339 | if ((num_non_free_regions_ + 1) * 2 > num_regions_) { |
| 340 | return false; |
| 341 | } |
| 342 | for (size_t i = 0; i < num_regions_; ++i) { |
| 343 | Region* r = ®ions_[i]; |
| 344 | if (r->IsFree()) { |
| 345 | r->Unfree(time_); |
| 346 | ++num_non_free_regions_; |
Mathieu Chartier | 8dc0263 | 2016-08-27 13:27:06 -0700 | [diff] [blame] | 347 | r->SetNewlyAllocated(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 348 | r->SetTop(r->End()); |
| 349 | r->is_a_tlab_ = true; |
| 350 | r->thread_ = self; |
| 351 | self->SetTlab(r->Begin(), r->End()); |
| 352 | return true; |
| 353 | } |
| 354 | } |
| 355 | return false; |
| 356 | } |
| 357 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 358 | size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 359 | MutexLock mu(Thread::Current(), region_lock_); |
| 360 | RevokeThreadLocalBuffersLocked(thread); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 361 | return 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 362 | } |
| 363 | |
| 364 | void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { |
| 365 | uint8_t* tlab_start = thread->GetTlabStart(); |
| 366 | DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr); |
| 367 | if (tlab_start != nullptr) { |
Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 368 | DCHECK_ALIGNED(tlab_start, kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 369 | Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start)); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 370 | DCHECK(r->IsAllocated()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 371 | DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize); |
| 372 | r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(), |
| 373 | thread->GetThreadLocalBytesAllocated()); |
| 374 | r->is_a_tlab_ = false; |
| 375 | r->thread_ = nullptr; |
| 376 | } |
| 377 | thread->SetTlab(nullptr, nullptr); |
| 378 | } |
| 379 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 380 | size_t RegionSpace::RevokeAllThreadLocalBuffers() { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 381 | Thread* self = Thread::Current(); |
| 382 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 383 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 384 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 385 | for (Thread* thread : thread_list) { |
| 386 | RevokeThreadLocalBuffers(thread); |
| 387 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 388 | return 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 389 | } |
| 390 | |
| 391 | void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) { |
| 392 | if (kIsDebugBuild) { |
| 393 | DCHECK(!thread->HasTlab()); |
| 394 | } |
| 395 | } |
| 396 | |
| 397 | void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() { |
| 398 | if (kIsDebugBuild) { |
| 399 | Thread* self = Thread::Current(); |
| 400 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 401 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 402 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 403 | for (Thread* thread : thread_list) { |
| 404 | AssertThreadLocalBuffersAreRevoked(thread); |
| 405 | } |
| 406 | } |
| 407 | } |
| 408 | |
| 409 | void RegionSpace::Region::Dump(std::ostream& os) const { |
| 410 | os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" << reinterpret_cast<void*>(top_) |
| 411 | << "-" << reinterpret_cast<void*>(end_) |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 412 | << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_) |
| 413 | << " objects_allocated=" << objects_allocated_ |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 414 | << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_ |
| 415 | << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n"; |
| 416 | } |
| 417 | |
| 418 | } // namespace space |
| 419 | } // namespace gc |
| 420 | } // namespace art |