Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "bump_pointer_space.h" |
| 18 | #include "bump_pointer_space-inl.h" |
| 19 | #include "mirror/object-inl.h" |
| 20 | #include "mirror/class-inl.h" |
| 21 | #include "thread_list.h" |
| 22 | |
| 23 | namespace art { |
| 24 | namespace gc { |
| 25 | namespace space { |
| 26 | |
| 27 | // If a region has live objects whose size is less than this percent |
| 28 | // value of the region size, evaculate the region. |
| 29 | static constexpr uint kEvaculateLivePercentThreshold = 75U; |
| 30 | |
| 31 | RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity, |
| 32 | uint8_t* requested_begin) { |
| 33 | capacity = RoundUp(capacity, kRegionSize); |
| 34 | std::string error_msg; |
| 35 | std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity, |
Vladimir Marko | 5c42c29 | 2015-02-25 12:02:49 +0000 | [diff] [blame] | 36 | PROT_READ | PROT_WRITE, true, false, |
| 37 | &error_msg)); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 38 | if (mem_map.get() == nullptr) { |
| 39 | LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " |
| 40 | << PrettySize(capacity) << " with message " << error_msg; |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 41 | MemMap::DumpMaps(LOG_STREAM(ERROR)); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 42 | return nullptr; |
| 43 | } |
| 44 | return new RegionSpace(name, mem_map.release()); |
| 45 | } |
| 46 | |
| 47 | RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map) |
| 48 | : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(), |
| 49 | kGcRetentionPolicyAlwaysCollect), |
| 50 | region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) { |
| 51 | size_t mem_map_size = mem_map->Size(); |
| 52 | CHECK_ALIGNED(mem_map_size, kRegionSize); |
| 53 | CHECK_ALIGNED(mem_map->Begin(), kRegionSize); |
| 54 | num_regions_ = mem_map_size / kRegionSize; |
| 55 | num_non_free_regions_ = 0U; |
| 56 | DCHECK_GT(num_regions_, 0U); |
| 57 | regions_.reset(new Region[num_regions_]); |
| 58 | uint8_t* region_addr = mem_map->Begin(); |
| 59 | for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) { |
Mathieu Chartier | 22c8e40 | 2016-11-05 13:32:08 -0700 | [diff] [blame^] | 60 | regions_[i].Init(i, region_addr, region_addr + kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 61 | } |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 62 | mark_bitmap_.reset( |
| 63 | accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity())); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 64 | if (kIsDebugBuild) { |
| 65 | CHECK_EQ(regions_[0].Begin(), Begin()); |
| 66 | for (size_t i = 0; i < num_regions_; ++i) { |
| 67 | CHECK(regions_[i].IsFree()); |
| 68 | CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize); |
| 69 | if (i + 1 < num_regions_) { |
| 70 | CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin()); |
| 71 | } |
| 72 | } |
| 73 | CHECK_EQ(regions_[num_regions_ - 1].End(), Limit()); |
| 74 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 75 | DCHECK(!full_region_.IsFree()); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 76 | DCHECK(full_region_.IsAllocated()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 77 | current_region_ = &full_region_; |
| 78 | evac_region_ = nullptr; |
| 79 | size_t ignored; |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 80 | DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | size_t RegionSpace::FromSpaceSize() { |
| 84 | uint64_t num_regions = 0; |
| 85 | MutexLock mu(Thread::Current(), region_lock_); |
| 86 | for (size_t i = 0; i < num_regions_; ++i) { |
| 87 | Region* r = ®ions_[i]; |
| 88 | if (r->IsInFromSpace()) { |
| 89 | ++num_regions; |
| 90 | } |
| 91 | } |
| 92 | return num_regions * kRegionSize; |
| 93 | } |
| 94 | |
| 95 | size_t RegionSpace::UnevacFromSpaceSize() { |
| 96 | uint64_t num_regions = 0; |
| 97 | MutexLock mu(Thread::Current(), region_lock_); |
| 98 | for (size_t i = 0; i < num_regions_; ++i) { |
| 99 | Region* r = ®ions_[i]; |
| 100 | if (r->IsInUnevacFromSpace()) { |
| 101 | ++num_regions; |
| 102 | } |
| 103 | } |
| 104 | return num_regions * kRegionSize; |
| 105 | } |
| 106 | |
| 107 | size_t RegionSpace::ToSpaceSize() { |
| 108 | uint64_t num_regions = 0; |
| 109 | MutexLock mu(Thread::Current(), region_lock_); |
| 110 | for (size_t i = 0; i < num_regions_; ++i) { |
| 111 | Region* r = ®ions_[i]; |
| 112 | if (r->IsInToSpace()) { |
| 113 | ++num_regions; |
| 114 | } |
| 115 | } |
| 116 | return num_regions * kRegionSize; |
| 117 | } |
| 118 | |
| 119 | inline bool RegionSpace::Region::ShouldBeEvacuated() { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 120 | DCHECK((IsAllocated() || IsLarge()) && IsInToSpace()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 121 | // if the region was allocated after the start of the |
| 122 | // previous GC or the live ratio is below threshold, evacuate |
| 123 | // it. |
| 124 | bool result; |
| 125 | if (is_newly_allocated_) { |
| 126 | result = true; |
| 127 | } else { |
| 128 | bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1); |
| 129 | if (is_live_percent_valid) { |
Mathieu Chartier | 1ebf8d3 | 2016-06-09 11:51:27 -0700 | [diff] [blame] | 130 | DCHECK(IsInToSpace()); |
| 131 | DCHECK(!IsLargeTail()); |
| 132 | DCHECK_NE(live_bytes_, static_cast<size_t>(-1)); |
| 133 | DCHECK_LE(live_bytes_, BytesAllocated()); |
| 134 | const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize); |
| 135 | DCHECK_LE(live_bytes_, bytes_allocated); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 136 | if (IsAllocated()) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 137 | // Side node: live_percent == 0 does not necessarily mean |
| 138 | // there's no live objects due to rounding (there may be a |
| 139 | // few). |
Mathieu Chartier | 1ebf8d3 | 2016-06-09 11:51:27 -0700 | [diff] [blame] | 140 | result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 141 | } else { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 142 | DCHECK(IsLarge()); |
Mathieu Chartier | 1ebf8d3 | 2016-06-09 11:51:27 -0700 | [diff] [blame] | 143 | result = live_bytes_ == 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 144 | } |
| 145 | } else { |
| 146 | result = false; |
| 147 | } |
| 148 | } |
| 149 | return result; |
| 150 | } |
| 151 | |
| 152 | // Determine which regions to evacuate and mark them as |
| 153 | // from-space. Mark the rest as unevacuated from-space. |
| 154 | void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) { |
| 155 | ++time_; |
| 156 | if (kUseTableLookupReadBarrier) { |
| 157 | DCHECK(rb_table->IsAllCleared()); |
| 158 | rb_table->SetAll(); |
| 159 | } |
| 160 | MutexLock mu(Thread::Current(), region_lock_); |
| 161 | size_t num_expected_large_tails = 0; |
| 162 | bool prev_large_evacuated = false; |
| 163 | for (size_t i = 0; i < num_regions_; ++i) { |
| 164 | Region* r = ®ions_[i]; |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 165 | RegionState state = r->State(); |
| 166 | RegionType type = r->Type(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 167 | if (!r->IsFree()) { |
| 168 | DCHECK(r->IsInToSpace()); |
| 169 | if (LIKELY(num_expected_large_tails == 0U)) { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 170 | DCHECK((state == RegionState::kRegionStateAllocated || |
| 171 | state == RegionState::kRegionStateLarge) && |
| 172 | type == RegionType::kRegionTypeToSpace); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 173 | bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated(); |
| 174 | if (should_evacuate) { |
| 175 | r->SetAsFromSpace(); |
| 176 | DCHECK(r->IsInFromSpace()); |
| 177 | } else { |
| 178 | r->SetAsUnevacFromSpace(); |
| 179 | DCHECK(r->IsInUnevacFromSpace()); |
| 180 | } |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 181 | if (UNLIKELY(state == RegionState::kRegionStateLarge && |
| 182 | type == RegionType::kRegionTypeToSpace)) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 183 | prev_large_evacuated = should_evacuate; |
| 184 | num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1; |
| 185 | DCHECK_GT(num_expected_large_tails, 0U); |
| 186 | } |
| 187 | } else { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 188 | DCHECK(state == RegionState::kRegionStateLargeTail && |
| 189 | type == RegionType::kRegionTypeToSpace); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 190 | if (prev_large_evacuated) { |
| 191 | r->SetAsFromSpace(); |
| 192 | DCHECK(r->IsInFromSpace()); |
| 193 | } else { |
| 194 | r->SetAsUnevacFromSpace(); |
| 195 | DCHECK(r->IsInUnevacFromSpace()); |
| 196 | } |
| 197 | --num_expected_large_tails; |
| 198 | } |
| 199 | } else { |
| 200 | DCHECK_EQ(num_expected_large_tails, 0U); |
| 201 | if (kUseTableLookupReadBarrier) { |
| 202 | // Clear the rb table for to-space regions. |
| 203 | rb_table->Clear(r->Begin(), r->End()); |
| 204 | } |
| 205 | } |
| 206 | } |
| 207 | current_region_ = &full_region_; |
| 208 | evac_region_ = &full_region_; |
| 209 | } |
| 210 | |
| 211 | void RegionSpace::ClearFromSpace() { |
| 212 | MutexLock mu(Thread::Current(), region_lock_); |
| 213 | for (size_t i = 0; i < num_regions_; ++i) { |
| 214 | Region* r = ®ions_[i]; |
| 215 | if (r->IsInFromSpace()) { |
| 216 | r->Clear(); |
| 217 | --num_non_free_regions_; |
| 218 | } else if (r->IsInUnevacFromSpace()) { |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 219 | size_t full_count = 0; |
| 220 | while (r->IsInUnevacFromSpace()) { |
| 221 | Region* const cur = ®ions_[i + full_count]; |
| 222 | if (i + full_count >= num_regions_ || |
| 223 | cur->LiveBytes() != static_cast<size_t>(cur->Top() - cur->Begin())) { |
| 224 | break; |
| 225 | } |
| 226 | if (full_count != 0) { |
| 227 | cur->SetUnevacFromSpaceAsToSpace(); |
| 228 | } |
| 229 | ++full_count; |
| 230 | } |
| 231 | // Note that r is the full_count == 0 iteration since it is not handled by the loop. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 232 | r->SetUnevacFromSpaceAsToSpace(); |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 233 | if (full_count >= 1) { |
| 234 | GetLiveBitmap()->ClearRange( |
| 235 | reinterpret_cast<mirror::Object*>(r->Begin()), |
| 236 | reinterpret_cast<mirror::Object*>(r->Begin() + full_count * kRegionSize)); |
| 237 | // Skip over extra regions we cleared. |
| 238 | // Subtract one for the for loop. |
| 239 | i += full_count - 1; |
| 240 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 241 | } |
| 242 | } |
| 243 | evac_region_ = nullptr; |
| 244 | } |
| 245 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 246 | void RegionSpace::LogFragmentationAllocFailure(std::ostream& os, |
| 247 | size_t /* failed_alloc_bytes */) { |
| 248 | size_t max_contiguous_allocation = 0; |
| 249 | MutexLock mu(Thread::Current(), region_lock_); |
| 250 | if (current_region_->End() - current_region_->Top() > 0) { |
| 251 | max_contiguous_allocation = current_region_->End() - current_region_->Top(); |
| 252 | } |
| 253 | if (num_non_free_regions_ * 2 < num_regions_) { |
| 254 | // We reserve half of the regions for evaluation only. If we |
| 255 | // occupy more than half the regions, do not report the free |
| 256 | // regions as available. |
| 257 | size_t max_contiguous_free_regions = 0; |
| 258 | size_t num_contiguous_free_regions = 0; |
| 259 | bool prev_free_region = false; |
| 260 | for (size_t i = 0; i < num_regions_; ++i) { |
| 261 | Region* r = ®ions_[i]; |
| 262 | if (r->IsFree()) { |
| 263 | if (!prev_free_region) { |
| 264 | CHECK_EQ(num_contiguous_free_regions, 0U); |
| 265 | prev_free_region = true; |
| 266 | } |
| 267 | ++num_contiguous_free_regions; |
| 268 | } else { |
| 269 | if (prev_free_region) { |
| 270 | CHECK_NE(num_contiguous_free_regions, 0U); |
| 271 | max_contiguous_free_regions = std::max(max_contiguous_free_regions, |
| 272 | num_contiguous_free_regions); |
| 273 | num_contiguous_free_regions = 0U; |
| 274 | prev_free_region = false; |
| 275 | } |
| 276 | } |
| 277 | } |
| 278 | max_contiguous_allocation = std::max(max_contiguous_allocation, |
| 279 | max_contiguous_free_regions * kRegionSize); |
| 280 | } |
| 281 | os << "; failed due to fragmentation (largest possible contiguous allocation " |
| 282 | << max_contiguous_allocation << " bytes)"; |
| 283 | // Caller's job to print failed_alloc_bytes. |
| 284 | } |
| 285 | |
| 286 | void RegionSpace::Clear() { |
| 287 | MutexLock mu(Thread::Current(), region_lock_); |
| 288 | for (size_t i = 0; i < num_regions_; ++i) { |
| 289 | Region* r = ®ions_[i]; |
| 290 | if (!r->IsFree()) { |
| 291 | --num_non_free_regions_; |
| 292 | } |
| 293 | r->Clear(); |
| 294 | } |
| 295 | current_region_ = &full_region_; |
| 296 | evac_region_ = &full_region_; |
| 297 | } |
| 298 | |
| 299 | void RegionSpace::Dump(std::ostream& os) const { |
| 300 | os << GetName() << " " |
| 301 | << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit()); |
| 302 | } |
| 303 | |
| 304 | void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) { |
| 305 | DCHECK(Contains(large_obj)); |
Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 306 | DCHECK_ALIGNED(large_obj, kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 307 | MutexLock mu(Thread::Current(), region_lock_); |
| 308 | uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj); |
| 309 | uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize); |
| 310 | CHECK_LT(begin_addr, end_addr); |
| 311 | for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) { |
| 312 | Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr)); |
| 313 | if (addr == begin_addr) { |
| 314 | DCHECK(reg->IsLarge()); |
| 315 | } else { |
| 316 | DCHECK(reg->IsLargeTail()); |
| 317 | } |
| 318 | reg->Clear(); |
| 319 | --num_non_free_regions_; |
| 320 | } |
| 321 | if (end_addr < Limit()) { |
| 322 | // If we aren't at the end of the space, check that the next region is not a large tail. |
| 323 | Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr)); |
| 324 | DCHECK(!following_reg->IsLargeTail()); |
| 325 | } |
| 326 | } |
| 327 | |
| 328 | void RegionSpace::DumpRegions(std::ostream& os) { |
| 329 | MutexLock mu(Thread::Current(), region_lock_); |
| 330 | for (size_t i = 0; i < num_regions_; ++i) { |
| 331 | regions_[i].Dump(os); |
| 332 | } |
| 333 | } |
| 334 | |
| 335 | void RegionSpace::DumpNonFreeRegions(std::ostream& os) { |
| 336 | MutexLock mu(Thread::Current(), region_lock_); |
| 337 | for (size_t i = 0; i < num_regions_; ++i) { |
| 338 | Region* reg = ®ions_[i]; |
| 339 | if (!reg->IsFree()) { |
| 340 | reg->Dump(os); |
| 341 | } |
| 342 | } |
| 343 | } |
| 344 | |
| 345 | void RegionSpace::RecordAlloc(mirror::Object* ref) { |
| 346 | CHECK(ref != nullptr); |
| 347 | Region* r = RefToRegion(ref); |
Mathieu Chartier | 22c8e40 | 2016-11-05 13:32:08 -0700 | [diff] [blame^] | 348 | r->objects_allocated_.FetchAndAddSequentiallyConsistent(1); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | bool RegionSpace::AllocNewTlab(Thread* self) { |
| 352 | MutexLock mu(self, region_lock_); |
| 353 | RevokeThreadLocalBuffersLocked(self); |
| 354 | // Retain sufficient free regions for full evacuation. |
| 355 | if ((num_non_free_regions_ + 1) * 2 > num_regions_) { |
| 356 | return false; |
| 357 | } |
| 358 | for (size_t i = 0; i < num_regions_; ++i) { |
| 359 | Region* r = ®ions_[i]; |
| 360 | if (r->IsFree()) { |
| 361 | r->Unfree(time_); |
| 362 | ++num_non_free_regions_; |
Mathieu Chartier | 84d9425 | 2016-08-27 13:27:06 -0700 | [diff] [blame] | 363 | r->SetNewlyAllocated(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 364 | r->SetTop(r->End()); |
| 365 | r->is_a_tlab_ = true; |
| 366 | r->thread_ = self; |
| 367 | self->SetTlab(r->Begin(), r->End()); |
| 368 | return true; |
| 369 | } |
| 370 | } |
| 371 | return false; |
| 372 | } |
| 373 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 374 | size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 375 | MutexLock mu(Thread::Current(), region_lock_); |
| 376 | RevokeThreadLocalBuffersLocked(thread); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 377 | return 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 378 | } |
| 379 | |
| 380 | void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { |
| 381 | uint8_t* tlab_start = thread->GetTlabStart(); |
| 382 | DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr); |
| 383 | if (tlab_start != nullptr) { |
Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 384 | DCHECK_ALIGNED(tlab_start, kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 385 | Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start)); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 386 | DCHECK(r->IsAllocated()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 387 | DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize); |
| 388 | r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(), |
| 389 | thread->GetThreadLocalBytesAllocated()); |
| 390 | r->is_a_tlab_ = false; |
| 391 | r->thread_ = nullptr; |
| 392 | } |
| 393 | thread->SetTlab(nullptr, nullptr); |
| 394 | } |
| 395 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 396 | size_t RegionSpace::RevokeAllThreadLocalBuffers() { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 397 | Thread* self = Thread::Current(); |
| 398 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 399 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 400 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 401 | for (Thread* thread : thread_list) { |
| 402 | RevokeThreadLocalBuffers(thread); |
| 403 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 404 | return 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 405 | } |
| 406 | |
| 407 | void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) { |
| 408 | if (kIsDebugBuild) { |
| 409 | DCHECK(!thread->HasTlab()); |
| 410 | } |
| 411 | } |
| 412 | |
| 413 | void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() { |
| 414 | if (kIsDebugBuild) { |
| 415 | Thread* self = Thread::Current(); |
| 416 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 417 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 418 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 419 | for (Thread* thread : thread_list) { |
| 420 | AssertThreadLocalBuffersAreRevoked(thread); |
| 421 | } |
| 422 | } |
| 423 | } |
| 424 | |
| 425 | void RegionSpace::Region::Dump(std::ostream& os) const { |
Mathieu Chartier | 22c8e40 | 2016-11-05 13:32:08 -0700 | [diff] [blame^] | 426 | os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" |
| 427 | << reinterpret_cast<void*>(Top()) |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 428 | << "-" << reinterpret_cast<void*>(end_) |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 429 | << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_) |
| 430 | << " objects_allocated=" << objects_allocated_ |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 431 | << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_ |
| 432 | << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n"; |
| 433 | } |
| 434 | |
| 435 | } // namespace space |
| 436 | } // namespace gc |
| 437 | } // namespace art |