Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * Copyright (C) 2011 The Android Open Source Project |
| 19 | * |
| 20 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 21 | * you may not use this file except in compliance with the License. |
| 22 | * You may obtain a copy of the License at |
| 23 | * |
| 24 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 25 | * |
| 26 | * Unless required by applicable law or agreed to in writing, software |
| 27 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 28 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 29 | * See the License for the specific language governing permissions and |
| 30 | * limitations under the License. |
| 31 | */ |
| 32 | |
| 33 | #include "semi_space.h" |
| 34 | |
| 35 | #include <functional> |
| 36 | #include <numeric> |
| 37 | #include <climits> |
| 38 | #include <vector> |
| 39 | |
| 40 | #include "base/logging.h" |
| 41 | #include "base/macros.h" |
| 42 | #include "base/mutex-inl.h" |
| 43 | #include "base/timing_logger.h" |
| 44 | #include "gc/accounting/heap_bitmap.h" |
| 45 | #include "gc/accounting/mod_union_table.h" |
| 46 | #include "gc/accounting/space_bitmap-inl.h" |
| 47 | #include "gc/heap.h" |
| 48 | #include "gc/space/bump_pointer_space.h" |
| 49 | #include "gc/space/bump_pointer_space-inl.h" |
| 50 | #include "gc/space/image_space.h" |
| 51 | #include "gc/space/large_object_space.h" |
| 52 | #include "gc/space/space-inl.h" |
| 53 | #include "indirect_reference_table.h" |
| 54 | #include "intern_table.h" |
| 55 | #include "jni_internal.h" |
| 56 | #include "mark_sweep-inl.h" |
| 57 | #include "monitor.h" |
| 58 | #include "mirror/art_field.h" |
| 59 | #include "mirror/art_field-inl.h" |
| 60 | #include "mirror/class-inl.h" |
| 61 | #include "mirror/class_loader.h" |
| 62 | #include "mirror/dex_cache.h" |
| 63 | #include "mirror/object-inl.h" |
| 64 | #include "mirror/object_array.h" |
| 65 | #include "mirror/object_array-inl.h" |
| 66 | #include "runtime.h" |
| 67 | #include "semi_space-inl.h" |
| 68 | #include "thread-inl.h" |
| 69 | #include "thread_list.h" |
| 70 | #include "verifier/method_verifier.h" |
| 71 | |
| 72 | using ::art::mirror::Class; |
| 73 | using ::art::mirror::Object; |
| 74 | |
| 75 | namespace art { |
| 76 | namespace gc { |
| 77 | namespace collector { |
| 78 | |
| 79 | static constexpr bool kProtectFromSpace = true; |
| 80 | static constexpr bool kResetFromSpace = true; |
| 81 | |
| 82 | // TODO: Unduplicate logic. |
| 83 | void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { |
| 84 | // Bind live to mark bitmap if necessary. |
| 85 | if (space->GetLiveBitmap() != space->GetMarkBitmap()) { |
| 86 | BindLiveToMarkBitmap(space); |
| 87 | } |
| 88 | // Add the space to the immune region. |
| 89 | if (immune_begin_ == nullptr) { |
| 90 | DCHECK(immune_end_ == nullptr); |
| 91 | immune_begin_ = reinterpret_cast<Object*>(space->Begin()); |
| 92 | immune_end_ = reinterpret_cast<Object*>(space->End()); |
| 93 | } else { |
| 94 | const space::ContinuousSpace* prev_space = nullptr; |
| 95 | // Find out if the previous space is immune. |
| 96 | for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { |
| 97 | if (cur_space == space) { |
| 98 | break; |
| 99 | } |
| 100 | prev_space = cur_space; |
| 101 | } |
| 102 | // If previous space was immune, then extend the immune region. Relies on continuous spaces |
| 103 | // being sorted by Heap::AddContinuousSpace. |
| 104 | if (prev_space != nullptr && IsImmuneSpace(prev_space)) { |
| 105 | immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); |
| 106 | immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); |
| 107 | } |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | void SemiSpace::BindBitmaps() { |
| 112 | timings_.StartSplit("BindBitmaps"); |
| 113 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 114 | // Mark all of the spaces we never collect as immune. |
| 115 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 116 | if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect |
| 117 | || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { |
| 118 | ImmuneSpace(space); |
| 119 | } |
| 120 | } |
| 121 | timings_.EndSplit(); |
| 122 | } |
| 123 | |
| 124 | SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix) |
| 125 | : GarbageCollector(heap, |
| 126 | name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), |
| 127 | mark_stack_(nullptr), |
| 128 | immune_begin_(nullptr), |
| 129 | immune_end_(nullptr), |
| 130 | to_space_(nullptr), |
| 131 | from_space_(nullptr), |
| 132 | soft_reference_list_(nullptr), |
| 133 | weak_reference_list_(nullptr), |
| 134 | finalizer_reference_list_(nullptr), |
| 135 | phantom_reference_list_(nullptr), |
| 136 | cleared_reference_list_(nullptr), |
| 137 | self_(nullptr) { |
| 138 | } |
| 139 | |
| 140 | void SemiSpace::InitializePhase() { |
| 141 | timings_.Reset(); |
| 142 | base::TimingLogger::ScopedSplit split("InitializePhase", &timings_); |
| 143 | mark_stack_ = heap_->mark_stack_.get(); |
| 144 | DCHECK(mark_stack_ != nullptr); |
| 145 | immune_begin_ = nullptr; |
| 146 | immune_end_ = nullptr; |
| 147 | soft_reference_list_ = nullptr; |
| 148 | weak_reference_list_ = nullptr; |
| 149 | finalizer_reference_list_ = nullptr; |
| 150 | phantom_reference_list_ = nullptr; |
| 151 | cleared_reference_list_ = nullptr; |
| 152 | self_ = Thread::Current(); |
| 153 | // Do any pre GC verification. |
| 154 | timings_.NewSplit("PreGcVerification"); |
| 155 | heap_->PreGcVerification(this); |
| 156 | } |
| 157 | |
| 158 | void SemiSpace::ProcessReferences(Thread* self) { |
| 159 | base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_); |
| 160 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 161 | ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_, |
| 162 | &finalizer_reference_list_, &phantom_reference_list_); |
| 163 | } |
| 164 | |
| 165 | void SemiSpace::MarkingPhase() { |
| 166 | Thread* self = Thread::Current(); |
| 167 | Locks::mutator_lock_->AssertExclusiveHeld(self); |
| 168 | base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_); |
| 169 | // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the |
| 170 | // wrong space. |
| 171 | heap_->SwapSemiSpaces(); |
| 172 | // Assume the cleared space is already empty. |
| 173 | BindBitmaps(); |
| 174 | // Process dirty cards and add dirty cards to mod-union tables. |
| 175 | heap_->ProcessCards(timings_); |
| 176 | // Need to do this before the checkpoint since we don't want any threads to add references to |
| 177 | // the live stack during the recursive mark. |
| 178 | timings_.NewSplit("SwapStacks"); |
| 179 | heap_->SwapStacks(); |
| 180 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 181 | MarkRoots(); |
| 182 | // Mark roots of immune spaces. |
| 183 | UpdateAndMarkModUnion(); |
| 184 | // Recursively mark remaining objects. |
| 185 | MarkReachableObjects(); |
| 186 | } |
| 187 | |
| 188 | bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { |
| 189 | return |
| 190 | immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && |
| 191 | immune_end_ >= reinterpret_cast<Object*>(space->End()); |
| 192 | } |
| 193 | |
| 194 | void SemiSpace::UpdateAndMarkModUnion() { |
| 195 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 196 | // If the space is immune then we need to mark the references to other spaces. |
| 197 | if (IsImmuneSpace(space)) { |
| 198 | accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); |
| 199 | CHECK(table != nullptr); |
| 200 | // TODO: Improve naming. |
| 201 | base::TimingLogger::ScopedSplit split( |
| 202 | space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : |
| 203 | "UpdateAndMarkImageModUnionTable", |
| 204 | &timings_); |
| 205 | table->UpdateAndMarkReferences(MarkRootCallback, this); |
| 206 | } |
| 207 | } |
| 208 | } |
| 209 | |
| 210 | void SemiSpace::MarkReachableObjects() { |
| 211 | timings_.StartSplit("MarkStackAsLive"); |
| 212 | accounting::ObjectStack* live_stack = heap_->GetLiveStack(); |
| 213 | heap_->MarkAllocStackAsLive(live_stack); |
| 214 | live_stack->Reset(); |
| 215 | timings_.EndSplit(); |
| 216 | // Recursively process the mark stack. |
| 217 | ProcessMarkStack(true); |
| 218 | } |
| 219 | |
| 220 | void SemiSpace::ReclaimPhase() { |
| 221 | base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); |
| 222 | Thread* self = Thread::Current(); |
| 223 | ProcessReferences(self); |
| 224 | { |
| 225 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 226 | SweepSystemWeaks(); |
| 227 | } |
| 228 | // Record freed memory. |
| 229 | int from_bytes = from_space_->GetBytesAllocated(); |
| 230 | int to_bytes = to_space_->GetBytesAllocated(); |
| 231 | int from_objects = from_space_->GetObjectsAllocated(); |
| 232 | int to_objects = to_space_->GetObjectsAllocated(); |
| 233 | int freed_bytes = from_bytes - to_bytes; |
| 234 | int freed_objects = from_objects - to_objects; |
| 235 | CHECK_GE(freed_bytes, 0); |
| 236 | freed_bytes_.fetch_add(freed_bytes); |
| 237 | freed_objects_.fetch_add(freed_objects); |
| 238 | heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes)); |
| 239 | |
| 240 | timings_.StartSplit("PreSweepingGcVerification"); |
| 241 | heap_->PreSweepingGcVerification(this); |
| 242 | timings_.EndSplit(); |
| 243 | |
| 244 | { |
| 245 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 246 | // Reclaim unmarked objects. |
| 247 | Sweep(false); |
| 248 | // Swap the live and mark bitmaps for each space which we modified space. This is an |
| 249 | // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound |
| 250 | // bitmaps. |
| 251 | timings_.StartSplit("SwapBitmaps"); |
| 252 | SwapBitmaps(); |
| 253 | timings_.EndSplit(); |
| 254 | // Unbind the live and mark bitmaps. |
| 255 | UnBindBitmaps(); |
| 256 | } |
| 257 | // Release the memory used by the from space. |
| 258 | if (kResetFromSpace) { |
| 259 | // Clearing from space. |
| 260 | from_space_->Clear(); |
| 261 | } |
| 262 | // Protect the from space. |
| 263 | VLOG(heap) |
| 264 | << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - " |
| 265 | << reinterpret_cast<void*>(from_space_->Limit()); |
| 266 | if (kProtectFromSpace) { |
| 267 | mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE); |
| 268 | } else { |
| 269 | mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | void SemiSpace::ResizeMarkStack(size_t new_size) { |
| 274 | std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); |
| 275 | CHECK_LE(mark_stack_->Size(), new_size); |
| 276 | mark_stack_->Resize(new_size); |
| 277 | for (const auto& obj : temp) { |
| 278 | mark_stack_->PushBack(obj); |
| 279 | } |
| 280 | } |
| 281 | |
| 282 | inline void SemiSpace::MarkStackPush(Object* obj) { |
| 283 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
| 284 | ResizeMarkStack(mark_stack_->Capacity() * 2); |
| 285 | } |
| 286 | // The object must be pushed on to the mark stack. |
| 287 | mark_stack_->PushBack(obj); |
| 288 | } |
| 289 | |
| 290 | // Rare case, probably not worth inlining since it will increase instruction cache miss rate. |
| 291 | bool SemiSpace::MarkLargeObject(const Object* obj) { |
| 292 | // TODO: support >1 discontinuous space. |
| 293 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 294 | accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); |
| 295 | if (UNLIKELY(!large_objects->Test(obj))) { |
| 296 | large_objects->Set(obj); |
| 297 | return true; |
| 298 | } |
| 299 | return false; |
| 300 | } |
| 301 | |
| 302 | // Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to |
| 303 | // the to-space and have their forward address updated. Objects which have been newly marked are |
| 304 | // pushed on the mark stack. |
| 305 | Object* SemiSpace::MarkObject(Object* obj) { |
| 306 | Object* ret = obj; |
| 307 | if (obj != nullptr && !IsImmune(obj)) { |
| 308 | if (from_space_->HasAddress(obj)) { |
| 309 | mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj); |
| 310 | // If the object has already been moved, return the new forward address. |
| 311 | if (!to_space_->HasAddress(forward_address)) { |
| 312 | // Otherwise, we need to move the object and add it to the markstack for processing. |
| 313 | size_t object_size = obj->SizeOf(); |
| 314 | size_t dummy = 0; |
| 315 | forward_address = to_space_->Alloc(self_, object_size, &dummy); |
| 316 | // Copy over the object and add it to the mark stack since we still need to update it's |
| 317 | // references. |
| 318 | memcpy(reinterpret_cast<void*>(forward_address), obj, object_size); |
| 319 | // Make sure to only update the forwarding address AFTER you copy the object so that the |
| 320 | // monitor word doesn't get stomped over. |
| 321 | COMPILE_ASSERT(sizeof(uint32_t) == sizeof(mirror::Object*), |
| 322 | monitor_size_must_be_same_as_object); |
| 323 | obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address))); |
| 324 | MarkStackPush(forward_address); |
| 325 | } |
| 326 | ret = forward_address; |
| 327 | // TODO: Do we need this if in the else statement? |
| 328 | } else { |
| 329 | accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 330 | if (LIKELY(object_bitmap != nullptr)) { |
| 331 | // This object was not previously marked. |
| 332 | if (!object_bitmap->Test(obj)) { |
| 333 | object_bitmap->Set(obj); |
| 334 | MarkStackPush(obj); |
| 335 | } |
| 336 | } else { |
| 337 | DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; |
| 338 | if (MarkLargeObject(obj)) { |
| 339 | MarkStackPush(obj); |
| 340 | } |
| 341 | } |
| 342 | } |
| 343 | } |
| 344 | return ret; |
| 345 | } |
| 346 | |
| 347 | Object* SemiSpace::MarkRootCallback(Object* root, void* arg) { |
| 348 | DCHECK(root != nullptr); |
| 349 | DCHECK(arg != nullptr); |
| 350 | return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); |
| 351 | } |
| 352 | |
| 353 | // Marks all objects in the root set. |
| 354 | void SemiSpace::MarkRoots() { |
| 355 | timings_.StartSplit("MarkRoots"); |
| 356 | // TODO: Visit up image roots as well? |
| 357 | Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true); |
| 358 | timings_.EndSplit(); |
| 359 | } |
| 360 | |
| 361 | void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) { |
| 362 | CHECK(space->IsDlMallocSpace()); |
| 363 | space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); |
| 364 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 365 | accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); |
| 366 | GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); |
| 367 | } |
| 368 | |
| 369 | mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) { |
| 370 | if (from_space_->HasAddress(obj)) { |
| 371 | LOG(FATAL) << "Shouldn't happen!"; |
| 372 | return GetForwardingAddressInFromSpace(obj); |
| 373 | } |
| 374 | return obj; |
| 375 | } |
| 376 | |
| 377 | mirror::Object* SemiSpace::SystemWeakIsMarkedCallback(Object* object, void* arg) { |
| 378 | return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); |
| 379 | } |
| 380 | |
| 381 | void SemiSpace::SweepSystemWeaks() { |
| 382 | timings_.StartSplit("SweepSystemWeaks"); |
| 383 | Runtime::Current()->SweepSystemWeaks(SystemWeakIsMarkedCallback, this); |
| 384 | timings_.EndSplit(); |
| 385 | } |
| 386 | |
| 387 | struct SweepCallbackContext { |
| 388 | SemiSpace* mark_sweep; |
| 389 | space::AllocSpace* space; |
| 390 | Thread* self; |
| 391 | }; |
| 392 | |
| 393 | void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
| 394 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
| 395 | SemiSpace* gc = context->mark_sweep; |
| 396 | Heap* heap = gc->GetHeap(); |
| 397 | space::AllocSpace* space = context->space; |
| 398 | Thread* self = context->self; |
| 399 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); |
| 400 | size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); |
| 401 | heap->RecordFree(num_ptrs, freed_bytes); |
| 402 | gc->freed_objects_.fetch_add(num_ptrs); |
| 403 | gc->freed_bytes_.fetch_add(freed_bytes); |
| 404 | } |
| 405 | |
| 406 | void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
| 407 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
| 408 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); |
| 409 | Heap* heap = context->mark_sweep->GetHeap(); |
| 410 | // We don't free any actual memory to avoid dirtying the shared zygote pages. |
| 411 | for (size_t i = 0; i < num_ptrs; ++i) { |
| 412 | Object* obj = static_cast<Object*>(ptrs[i]); |
| 413 | heap->GetLiveBitmap()->Clear(obj); |
| 414 | heap->GetCardTable()->MarkCard(obj); |
| 415 | } |
| 416 | } |
| 417 | |
| 418 | void SemiSpace::Sweep(bool swap_bitmaps) { |
| 419 | DCHECK(mark_stack_->IsEmpty()); |
| 420 | base::TimingLogger::ScopedSplit("Sweep", &timings_); |
| 421 | |
| 422 | const bool partial = (GetGcType() == kGcTypePartial); |
| 423 | SweepCallbackContext scc; |
| 424 | scc.mark_sweep = this; |
| 425 | scc.self = Thread::Current(); |
| 426 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 427 | if (!space->IsDlMallocSpace()) { |
| 428 | continue; |
| 429 | } |
| 430 | // We always sweep always collect spaces. |
| 431 | bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); |
| 432 | if (!partial && !sweep_space) { |
| 433 | // We sweep full collect spaces when the GC isn't a partial GC (ie its full). |
| 434 | sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); |
| 435 | } |
| 436 | if (sweep_space && space->IsDlMallocSpace()) { |
| 437 | uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); |
| 438 | uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); |
| 439 | scc.space = space->AsDlMallocSpace(); |
| 440 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 441 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
| 442 | if (swap_bitmaps) { |
| 443 | std::swap(live_bitmap, mark_bitmap); |
| 444 | } |
| 445 | if (!space->IsZygoteSpace()) { |
| 446 | base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); |
| 447 | // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. |
| 448 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 449 | &SweepCallback, reinterpret_cast<void*>(&scc)); |
| 450 | } else { |
| 451 | base::TimingLogger::ScopedSplit split("SweepZygote", &timings_); |
| 452 | // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual |
| 453 | // memory. |
| 454 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 455 | &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); |
| 456 | } |
| 457 | } |
| 458 | } |
| 459 | |
| 460 | SweepLargeObjects(swap_bitmaps); |
| 461 | } |
| 462 | |
| 463 | void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { |
| 464 | base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); |
| 465 | // Sweep large objects |
| 466 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 467 | accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); |
| 468 | accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); |
| 469 | if (swap_bitmaps) { |
| 470 | std::swap(large_live_objects, large_mark_objects); |
| 471 | } |
| 472 | // O(n*log(n)) but hopefully there are not too many large objects. |
| 473 | size_t freed_objects = 0; |
| 474 | size_t freed_bytes = 0; |
| 475 | Thread* self = Thread::Current(); |
| 476 | for (const Object* obj : large_live_objects->GetObjects()) { |
| 477 | if (!large_mark_objects->Test(obj)) { |
| 478 | freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); |
| 479 | ++freed_objects; |
| 480 | } |
| 481 | } |
| 482 | freed_large_objects_.fetch_add(freed_objects); |
| 483 | freed_large_object_bytes_.fetch_add(freed_bytes); |
| 484 | GetHeap()->RecordFree(freed_objects, freed_bytes); |
| 485 | } |
| 486 | |
| 487 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 488 | // marked, put it on the appropriate list in the heap for later processing. |
| 489 | void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { |
| 490 | DCHECK(klass != nullptr); |
| 491 | DCHECK(klass->IsReferenceClass()); |
| 492 | DCHECK(obj != nullptr); |
| 493 | Object* referent = heap_->GetReferenceReferent(obj); |
| 494 | if (referent != nullptr) { |
| 495 | Object* forward_address = GetMarkedForwardAddress(referent); |
| 496 | if (forward_address == nullptr) { |
| 497 | Thread* self = Thread::Current(); |
| 498 | // TODO: Remove these locks, and use atomic stacks for storing references? |
| 499 | // We need to check that the references haven't already been enqueued since we can end up |
| 500 | // scanning the same reference multiple times due to dirty cards. |
| 501 | if (klass->IsSoftReferenceClass()) { |
| 502 | MutexLock mu(self, *heap_->GetSoftRefQueueLock()); |
| 503 | if (!heap_->IsEnqueued(obj)) { |
| 504 | heap_->EnqueuePendingReference(obj, &soft_reference_list_); |
| 505 | } |
| 506 | } else if (klass->IsWeakReferenceClass()) { |
| 507 | MutexLock mu(self, *heap_->GetWeakRefQueueLock()); |
| 508 | if (!heap_->IsEnqueued(obj)) { |
| 509 | heap_->EnqueuePendingReference(obj, &weak_reference_list_); |
| 510 | } |
| 511 | } else if (klass->IsFinalizerReferenceClass()) { |
| 512 | MutexLock mu(self, *heap_->GetFinalizerRefQueueLock()); |
| 513 | if (!heap_->IsEnqueued(obj)) { |
| 514 | heap_->EnqueuePendingReference(obj, &finalizer_reference_list_); |
| 515 | } |
| 516 | } else if (klass->IsPhantomReferenceClass()) { |
| 517 | MutexLock mu(self, *heap_->GetPhantomRefQueueLock()); |
| 518 | if (!heap_->IsEnqueued(obj)) { |
| 519 | heap_->EnqueuePendingReference(obj, &phantom_reference_list_); |
| 520 | } |
| 521 | } else { |
| 522 | LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex |
| 523 | << klass->GetAccessFlags(); |
| 524 | } |
| 525 | } else if (referent != forward_address) { |
| 526 | heap_->SetReferenceReferent(obj, forward_address); |
| 527 | } |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | // Visit all of the references of an object and update. |
| 532 | void SemiSpace::ScanObject(Object* obj) { |
| 533 | DCHECK(obj != NULL); |
| 534 | DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; |
| 535 | MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, |
| 536 | bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { |
| 537 | mirror::Object* new_address = MarkObject(ref); |
| 538 | if (new_address != ref) { |
| 539 | DCHECK(new_address != nullptr); |
| 540 | obj->SetFieldObject(offset, new_address, false); |
| 541 | } |
| 542 | }, kMovingClasses); |
| 543 | mirror::Class* klass = obj->GetClass(); |
| 544 | if (UNLIKELY(klass->IsReferenceClass())) { |
| 545 | DelayReferenceReferent(klass, obj); |
| 546 | } |
| 547 | } |
| 548 | |
| 549 | // Scan anything that's on the mark stack. |
| 550 | void SemiSpace::ProcessMarkStack(bool paused) { |
| 551 | timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack"); |
| 552 | while (!mark_stack_->IsEmpty()) { |
| 553 | ScanObject(mark_stack_->PopBack()); |
| 554 | } |
| 555 | timings_.EndSplit(); |
| 556 | } |
| 557 | |
| 558 | // Walks the reference list marking any references subject to the |
| 559 | // reference clearing policy. References with a black referent are |
| 560 | // removed from the list. References with white referents biased |
| 561 | // toward saving are blackened and also removed from the list. |
| 562 | void SemiSpace::PreserveSomeSoftReferences(Object** list) { |
| 563 | DCHECK(list != NULL); |
| 564 | Object* clear = NULL; |
| 565 | size_t counter = 0; |
| 566 | DCHECK(mark_stack_->IsEmpty()); |
| 567 | timings_.StartSplit("PreserveSomeSoftReferences"); |
| 568 | while (*list != NULL) { |
| 569 | Object* ref = heap_->DequeuePendingReference(list); |
| 570 | Object* referent = heap_->GetReferenceReferent(ref); |
| 571 | if (referent == NULL) { |
| 572 | // Referent was cleared by the user during marking. |
| 573 | continue; |
| 574 | } |
| 575 | Object* forward_address = GetMarkedForwardAddress(referent); |
| 576 | bool is_marked = forward_address != nullptr; |
| 577 | if (!is_marked && ((++counter) & 1)) { |
| 578 | // Referent is white and biased toward saving, mark it. |
| 579 | forward_address = MarkObject(referent); |
| 580 | if (referent != forward_address) { |
| 581 | // Update the referent if we moved it. |
| 582 | heap_->SetReferenceReferent(ref, forward_address); |
| 583 | } |
| 584 | } else { |
| 585 | if (!is_marked) { |
| 586 | // Referent is white, queue it for clearing. |
| 587 | heap_->EnqueuePendingReference(ref, &clear); |
| 588 | } else if (referent != forward_address) { |
| 589 | CHECK(forward_address != nullptr); |
| 590 | heap_->SetReferenceReferent(ref, forward_address); |
| 591 | } |
| 592 | } |
| 593 | } |
| 594 | *list = clear; |
| 595 | timings_.EndSplit(); |
| 596 | // Restart the mark with the newly black references added to the root set. |
| 597 | ProcessMarkStack(true); |
| 598 | } |
| 599 | |
| 600 | inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const |
| 601 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
| 602 | // All immune objects are assumed marked. |
| 603 | if (IsImmune(obj)) { |
| 604 | return obj; |
| 605 | } |
| 606 | if (from_space_->HasAddress(obj)) { |
| 607 | mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); |
| 608 | // If the object is forwarded then it MUST be marked. |
| 609 | if (to_space_->HasAddress(forwarding_address)) { |
| 610 | return forwarding_address; |
| 611 | } |
| 612 | // Must not be marked, return nullptr; |
| 613 | return nullptr; |
| 614 | } else if (to_space_->HasAddress(obj)) { |
| 615 | // Already forwarded, must be marked. |
| 616 | return obj; |
| 617 | } |
| 618 | return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; |
| 619 | } |
| 620 | |
| 621 | // Unlink the reference list clearing references objects with white |
| 622 | // referents. Cleared references registered to a reference queue are |
| 623 | // scheduled for appending by the heap worker thread. |
| 624 | void SemiSpace::ClearWhiteReferences(Object** list) { |
| 625 | DCHECK(list != NULL); |
| 626 | while (*list != NULL) { |
| 627 | Object* ref = heap_->DequeuePendingReference(list); |
| 628 | Object* referent = heap_->GetReferenceReferent(ref); |
| 629 | if (referent != nullptr) { |
| 630 | Object* forward_address = GetMarkedForwardAddress(referent); |
| 631 | if (forward_address == nullptr) { |
| 632 | // Referent is white, clear it. |
| 633 | heap_->ClearReferenceReferent(ref); |
| 634 | if (heap_->IsEnqueuable(ref)) { |
| 635 | heap_->EnqueueReference(ref, &cleared_reference_list_); |
| 636 | } |
| 637 | } else if (referent != forward_address) { |
| 638 | heap_->SetReferenceReferent(ref, forward_address); |
| 639 | } |
| 640 | } |
| 641 | } |
| 642 | DCHECK(*list == NULL); |
| 643 | } |
| 644 | |
| 645 | // Enqueues finalizer references with white referents. White |
| 646 | // referents are blackened, moved to the zombie field, and the |
| 647 | // referent field is cleared. |
| 648 | void SemiSpace::EnqueueFinalizerReferences(Object** list) { |
| 649 | // *list = NULL; |
| 650 | // return; |
| 651 | DCHECK(list != NULL); |
| 652 | timings_.StartSplit("EnqueueFinalizerReferences"); |
| 653 | MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset(); |
| 654 | bool has_enqueued = false; |
| 655 | while (*list != NULL) { |
| 656 | Object* ref = heap_->DequeuePendingReference(list); |
| 657 | Object* referent = heap_->GetReferenceReferent(ref); |
| 658 | if (referent != nullptr) { |
| 659 | Object* forward_address = GetMarkedForwardAddress(referent); |
| 660 | // Not marked. |
| 661 | if (forward_address == nullptr) { |
| 662 | forward_address = MarkObject(referent); |
| 663 | // If the referent is non-null the reference must queuable. |
| 664 | DCHECK(heap_->IsEnqueuable(ref)); |
| 665 | // Move the referent to the zombie field. |
| 666 | ref->SetFieldObject(zombie_offset, forward_address, false); |
| 667 | heap_->ClearReferenceReferent(ref); |
| 668 | heap_->EnqueueReference(ref, &cleared_reference_list_); |
| 669 | has_enqueued = true; |
| 670 | } else if (referent != forward_address) { |
| 671 | heap_->SetReferenceReferent(ref, forward_address); |
| 672 | } |
| 673 | } |
| 674 | } |
| 675 | timings_.EndSplit(); |
| 676 | if (has_enqueued) { |
| 677 | ProcessMarkStack(true); |
| 678 | } |
| 679 | DCHECK(*list == NULL); |
| 680 | } |
| 681 | |
| 682 | // Process reference class instances and schedule finalizations. |
| 683 | void SemiSpace::ProcessReferences(Object** soft_references, bool clear_soft, |
| 684 | Object** weak_references, |
| 685 | Object** finalizer_references, |
| 686 | Object** phantom_references) { |
| 687 | CHECK(soft_references != NULL); |
| 688 | CHECK(weak_references != NULL); |
| 689 | CHECK(finalizer_references != NULL); |
| 690 | CHECK(phantom_references != NULL); |
| 691 | CHECK(mark_stack_->IsEmpty()); |
| 692 | |
| 693 | // Unless we are in the zygote or required to clear soft references |
| 694 | // with white references, preserve some white referents. |
| 695 | if (!clear_soft && !Runtime::Current()->IsZygote()) { |
| 696 | PreserveSomeSoftReferences(soft_references); |
| 697 | } |
| 698 | |
| 699 | timings_.StartSplit("ProcessReferences"); |
| 700 | // Clear all remaining soft and weak references with white |
| 701 | // referents. |
| 702 | ClearWhiteReferences(soft_references); |
| 703 | ClearWhiteReferences(weak_references); |
| 704 | timings_.EndSplit(); |
| 705 | |
| 706 | // Preserve all white objects with finalize methods and schedule |
| 707 | // them for finalization. |
| 708 | EnqueueFinalizerReferences(finalizer_references); |
| 709 | |
| 710 | timings_.StartSplit("ProcessReferences"); |
| 711 | // Clear all f-reachable soft and weak references with white |
| 712 | // referents. |
| 713 | ClearWhiteReferences(soft_references); |
| 714 | ClearWhiteReferences(weak_references); |
| 715 | |
| 716 | // Clear all phantom references with white referents. |
| 717 | ClearWhiteReferences(phantom_references); |
| 718 | |
| 719 | // At this point all reference lists should be empty. |
| 720 | DCHECK(*soft_references == NULL); |
| 721 | DCHECK(*weak_references == NULL); |
| 722 | DCHECK(*finalizer_references == NULL); |
| 723 | DCHECK(*phantom_references == NULL); |
| 724 | timings_.EndSplit(); |
| 725 | } |
| 726 | |
| 727 | void SemiSpace::UnBindBitmaps() { |
| 728 | base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); |
| 729 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 730 | if (space->IsDlMallocSpace()) { |
| 731 | space::DlMallocSpace* alloc_space = space->AsDlMallocSpace(); |
| 732 | if (alloc_space->HasBoundBitmaps()) { |
| 733 | alloc_space->UnBindBitmaps(); |
| 734 | heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(), |
| 735 | alloc_space->GetMarkBitmap()); |
| 736 | } |
| 737 | } |
| 738 | } |
| 739 | } |
| 740 | |
| 741 | void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { |
| 742 | DCHECK(to_space != nullptr); |
| 743 | to_space_ = to_space; |
| 744 | } |
| 745 | |
| 746 | void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { |
| 747 | DCHECK(from_space != nullptr); |
| 748 | from_space_ = from_space; |
| 749 | } |
| 750 | |
| 751 | void SemiSpace::FinishPhase() { |
| 752 | base::TimingLogger::ScopedSplit split("FinishPhase", &timings_); |
| 753 | // Can't enqueue references if we hold the mutator lock. |
| 754 | Object* cleared_references = GetClearedReferences(); |
| 755 | Heap* heap = GetHeap(); |
| 756 | timings_.NewSplit("EnqueueClearedReferences"); |
| 757 | heap->EnqueueClearedReferences(&cleared_references); |
| 758 | |
| 759 | timings_.NewSplit("PostGcVerification"); |
| 760 | heap->PostGcVerification(this); |
| 761 | |
| 762 | // Null the "to" and "from" spaces since compacting from one to the other isn't valid until |
| 763 | // further action is done by the heap. |
| 764 | to_space_ = nullptr; |
| 765 | from_space_ = nullptr; |
| 766 | |
| 767 | // Update the cumulative statistics |
| 768 | total_time_ns_ += GetDurationNs(); |
| 769 | total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0, |
| 770 | std::plus<uint64_t>()); |
| 771 | total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); |
| 772 | total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); |
| 773 | |
| 774 | // Ensure that the mark stack is empty. |
| 775 | CHECK(mark_stack_->IsEmpty()); |
| 776 | |
| 777 | // Update the cumulative loggers. |
| 778 | cumulative_timings_.Start(); |
| 779 | cumulative_timings_.AddLogger(timings_); |
| 780 | cumulative_timings_.End(); |
| 781 | |
| 782 | // Clear all of the spaces' mark bitmaps. |
| 783 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 784 | accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); |
| 785 | if (bitmap != nullptr && |
| 786 | space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { |
| 787 | bitmap->Clear(); |
| 788 | } |
| 789 | } |
| 790 | mark_stack_->Reset(); |
| 791 | |
| 792 | // Reset the marked large objects. |
| 793 | space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); |
| 794 | large_objects->GetMarkObjects()->Clear(); |
| 795 | } |
| 796 | |
| 797 | } // namespace collector |
| 798 | } // namespace gc |
| 799 | } // namespace art |