Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | /* |
| 18 | * Copyright (C) 2011 The Android Open Source Project |
| 19 | * |
| 20 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 21 | * you may not use this file except in compliance with the License. |
| 22 | * You may obtain a copy of the License at |
| 23 | * |
| 24 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 25 | * |
| 26 | * Unless required by applicable law or agreed to in writing, software |
| 27 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 28 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 29 | * See the License for the specific language governing permissions and |
| 30 | * limitations under the License. |
| 31 | */ |
| 32 | |
| 33 | #include "semi_space.h" |
| 34 | |
| 35 | #include <functional> |
| 36 | #include <numeric> |
| 37 | #include <climits> |
| 38 | #include <vector> |
| 39 | |
| 40 | #include "base/logging.h" |
| 41 | #include "base/macros.h" |
| 42 | #include "base/mutex-inl.h" |
| 43 | #include "base/timing_logger.h" |
| 44 | #include "gc/accounting/heap_bitmap.h" |
| 45 | #include "gc/accounting/mod_union_table.h" |
| 46 | #include "gc/accounting/space_bitmap-inl.h" |
| 47 | #include "gc/heap.h" |
| 48 | #include "gc/space/bump_pointer_space.h" |
| 49 | #include "gc/space/bump_pointer_space-inl.h" |
| 50 | #include "gc/space/image_space.h" |
| 51 | #include "gc/space/large_object_space.h" |
| 52 | #include "gc/space/space-inl.h" |
| 53 | #include "indirect_reference_table.h" |
| 54 | #include "intern_table.h" |
| 55 | #include "jni_internal.h" |
| 56 | #include "mark_sweep-inl.h" |
| 57 | #include "monitor.h" |
| 58 | #include "mirror/art_field.h" |
| 59 | #include "mirror/art_field-inl.h" |
| 60 | #include "mirror/class-inl.h" |
| 61 | #include "mirror/class_loader.h" |
| 62 | #include "mirror/dex_cache.h" |
| 63 | #include "mirror/object-inl.h" |
| 64 | #include "mirror/object_array.h" |
| 65 | #include "mirror/object_array-inl.h" |
| 66 | #include "runtime.h" |
| 67 | #include "semi_space-inl.h" |
| 68 | #include "thread-inl.h" |
| 69 | #include "thread_list.h" |
| 70 | #include "verifier/method_verifier.h" |
| 71 | |
| 72 | using ::art::mirror::Class; |
| 73 | using ::art::mirror::Object; |
| 74 | |
| 75 | namespace art { |
| 76 | namespace gc { |
| 77 | namespace collector { |
| 78 | |
| 79 | static constexpr bool kProtectFromSpace = true; |
| 80 | static constexpr bool kResetFromSpace = true; |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 81 | // TODO: move this to a new file as a new garbage collector? |
| 82 | static constexpr bool kEnableSimplePromo = false; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 83 | |
| 84 | // TODO: Unduplicate logic. |
| 85 | void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { |
| 86 | // Bind live to mark bitmap if necessary. |
| 87 | if (space->GetLiveBitmap() != space->GetMarkBitmap()) { |
| 88 | BindLiveToMarkBitmap(space); |
| 89 | } |
| 90 | // Add the space to the immune region. |
| 91 | if (immune_begin_ == nullptr) { |
| 92 | DCHECK(immune_end_ == nullptr); |
| 93 | immune_begin_ = reinterpret_cast<Object*>(space->Begin()); |
| 94 | immune_end_ = reinterpret_cast<Object*>(space->End()); |
| 95 | } else { |
| 96 | const space::ContinuousSpace* prev_space = nullptr; |
| 97 | // Find out if the previous space is immune. |
| 98 | for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { |
| 99 | if (cur_space == space) { |
| 100 | break; |
| 101 | } |
| 102 | prev_space = cur_space; |
| 103 | } |
| 104 | // If previous space was immune, then extend the immune region. Relies on continuous spaces |
| 105 | // being sorted by Heap::AddContinuousSpace. |
| 106 | if (prev_space != nullptr && IsImmuneSpace(prev_space)) { |
| 107 | immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); |
| 108 | immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_); |
| 109 | } |
| 110 | } |
| 111 | } |
| 112 | |
| 113 | void SemiSpace::BindBitmaps() { |
| 114 | timings_.StartSplit("BindBitmaps"); |
| 115 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 116 | // Mark all of the spaces we never collect as immune. |
| 117 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 118 | if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect |
| 119 | || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) { |
| 120 | ImmuneSpace(space); |
| 121 | } |
| 122 | } |
| 123 | timings_.EndSplit(); |
| 124 | } |
| 125 | |
| 126 | SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix) |
| 127 | : GarbageCollector(heap, |
| 128 | name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), |
| 129 | mark_stack_(nullptr), |
| 130 | immune_begin_(nullptr), |
| 131 | immune_end_(nullptr), |
| 132 | to_space_(nullptr), |
| 133 | from_space_(nullptr), |
| 134 | soft_reference_list_(nullptr), |
| 135 | weak_reference_list_(nullptr), |
| 136 | finalizer_reference_list_(nullptr), |
| 137 | phantom_reference_list_(nullptr), |
| 138 | cleared_reference_list_(nullptr), |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 139 | self_(nullptr), |
| 140 | last_gc_to_space_end_(nullptr), |
| 141 | bytes_promoted_(0) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 142 | } |
| 143 | |
| 144 | void SemiSpace::InitializePhase() { |
| 145 | timings_.Reset(); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 146 | TimingLogger::ScopedSplit split("InitializePhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 147 | mark_stack_ = heap_->mark_stack_.get(); |
| 148 | DCHECK(mark_stack_ != nullptr); |
| 149 | immune_begin_ = nullptr; |
| 150 | immune_end_ = nullptr; |
| 151 | soft_reference_list_ = nullptr; |
| 152 | weak_reference_list_ = nullptr; |
| 153 | finalizer_reference_list_ = nullptr; |
| 154 | phantom_reference_list_ = nullptr; |
| 155 | cleared_reference_list_ = nullptr; |
| 156 | self_ = Thread::Current(); |
| 157 | // Do any pre GC verification. |
| 158 | timings_.NewSplit("PreGcVerification"); |
| 159 | heap_->PreGcVerification(this); |
| 160 | } |
| 161 | |
| 162 | void SemiSpace::ProcessReferences(Thread* self) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 163 | TimingLogger::ScopedSplit split("ProcessReferences", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 164 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 165 | GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, |
| 166 | &RecursiveMarkObjectCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | void SemiSpace::MarkingPhase() { |
| 170 | Thread* self = Thread::Current(); |
| 171 | Locks::mutator_lock_->AssertExclusiveHeld(self); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 172 | TimingLogger::ScopedSplit split("MarkingPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 173 | // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the |
| 174 | // wrong space. |
| 175 | heap_->SwapSemiSpaces(); |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 176 | if (kEnableSimplePromo) { |
| 177 | // If last_gc_to_space_end_ is out of the bounds of the from-space |
| 178 | // (the to-space from last GC), then point it to the beginning of |
| 179 | // the from-space. For example, the very first GC or the |
| 180 | // pre-zygote compaction. |
| 181 | if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { |
| 182 | last_gc_to_space_end_ = from_space_->Begin(); |
| 183 | } |
| 184 | // Reset this before the marking starts below. |
| 185 | bytes_promoted_ = 0; |
| 186 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 187 | // Assume the cleared space is already empty. |
| 188 | BindBitmaps(); |
| 189 | // Process dirty cards and add dirty cards to mod-union tables. |
| 190 | heap_->ProcessCards(timings_); |
| 191 | // Need to do this before the checkpoint since we don't want any threads to add references to |
| 192 | // the live stack during the recursive mark. |
| 193 | timings_.NewSplit("SwapStacks"); |
| 194 | heap_->SwapStacks(); |
| 195 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 196 | MarkRoots(); |
| 197 | // Mark roots of immune spaces. |
| 198 | UpdateAndMarkModUnion(); |
| 199 | // Recursively mark remaining objects. |
| 200 | MarkReachableObjects(); |
| 201 | } |
| 202 | |
| 203 | bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { |
| 204 | return |
| 205 | immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && |
| 206 | immune_end_ >= reinterpret_cast<Object*>(space->End()); |
| 207 | } |
| 208 | |
| 209 | void SemiSpace::UpdateAndMarkModUnion() { |
| 210 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 211 | // If the space is immune then we need to mark the references to other spaces. |
| 212 | if (IsImmuneSpace(space)) { |
| 213 | accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); |
| 214 | CHECK(table != nullptr); |
| 215 | // TODO: Improve naming. |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 216 | TimingLogger::ScopedSplit split( |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 217 | space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : |
| 218 | "UpdateAndMarkImageModUnionTable", |
| 219 | &timings_); |
| 220 | table->UpdateAndMarkReferences(MarkRootCallback, this); |
| 221 | } |
| 222 | } |
| 223 | } |
| 224 | |
| 225 | void SemiSpace::MarkReachableObjects() { |
| 226 | timings_.StartSplit("MarkStackAsLive"); |
| 227 | accounting::ObjectStack* live_stack = heap_->GetLiveStack(); |
| 228 | heap_->MarkAllocStackAsLive(live_stack); |
| 229 | live_stack->Reset(); |
| 230 | timings_.EndSplit(); |
| 231 | // Recursively process the mark stack. |
| 232 | ProcessMarkStack(true); |
| 233 | } |
| 234 | |
| 235 | void SemiSpace::ReclaimPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 236 | TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 237 | Thread* self = Thread::Current(); |
| 238 | ProcessReferences(self); |
| 239 | { |
| 240 | ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 241 | SweepSystemWeaks(); |
| 242 | } |
| 243 | // Record freed memory. |
| 244 | int from_bytes = from_space_->GetBytesAllocated(); |
| 245 | int to_bytes = to_space_->GetBytesAllocated(); |
| 246 | int from_objects = from_space_->GetObjectsAllocated(); |
| 247 | int to_objects = to_space_->GetObjectsAllocated(); |
| 248 | int freed_bytes = from_bytes - to_bytes; |
| 249 | int freed_objects = from_objects - to_objects; |
| 250 | CHECK_GE(freed_bytes, 0); |
| 251 | freed_bytes_.fetch_add(freed_bytes); |
| 252 | freed_objects_.fetch_add(freed_objects); |
| 253 | heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes)); |
| 254 | |
| 255 | timings_.StartSplit("PreSweepingGcVerification"); |
| 256 | heap_->PreSweepingGcVerification(this); |
| 257 | timings_.EndSplit(); |
| 258 | |
| 259 | { |
| 260 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
| 261 | // Reclaim unmarked objects. |
| 262 | Sweep(false); |
| 263 | // Swap the live and mark bitmaps for each space which we modified space. This is an |
| 264 | // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound |
| 265 | // bitmaps. |
| 266 | timings_.StartSplit("SwapBitmaps"); |
| 267 | SwapBitmaps(); |
| 268 | timings_.EndSplit(); |
| 269 | // Unbind the live and mark bitmaps. |
| 270 | UnBindBitmaps(); |
| 271 | } |
| 272 | // Release the memory used by the from space. |
| 273 | if (kResetFromSpace) { |
| 274 | // Clearing from space. |
| 275 | from_space_->Clear(); |
| 276 | } |
| 277 | // Protect the from space. |
| 278 | VLOG(heap) |
| 279 | << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - " |
| 280 | << reinterpret_cast<void*>(from_space_->Limit()); |
| 281 | if (kProtectFromSpace) { |
| 282 | mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE); |
| 283 | } else { |
| 284 | mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ); |
| 285 | } |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 286 | |
| 287 | if (kEnableSimplePromo) { |
| 288 | // Record the end (top) of the to space so we can distinguish |
| 289 | // between objects that were allocated since the last GC and the |
| 290 | // older objects. |
| 291 | last_gc_to_space_end_ = to_space_->End(); |
| 292 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 293 | } |
| 294 | |
| 295 | void SemiSpace::ResizeMarkStack(size_t new_size) { |
| 296 | std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); |
| 297 | CHECK_LE(mark_stack_->Size(), new_size); |
| 298 | mark_stack_->Resize(new_size); |
| 299 | for (const auto& obj : temp) { |
| 300 | mark_stack_->PushBack(obj); |
| 301 | } |
| 302 | } |
| 303 | |
| 304 | inline void SemiSpace::MarkStackPush(Object* obj) { |
| 305 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
| 306 | ResizeMarkStack(mark_stack_->Capacity() * 2); |
| 307 | } |
| 308 | // The object must be pushed on to the mark stack. |
| 309 | mark_stack_->PushBack(obj); |
| 310 | } |
| 311 | |
| 312 | // Rare case, probably not worth inlining since it will increase instruction cache miss rate. |
| 313 | bool SemiSpace::MarkLargeObject(const Object* obj) { |
| 314 | // TODO: support >1 discontinuous space. |
| 315 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 316 | accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects(); |
| 317 | if (UNLIKELY(!large_objects->Test(obj))) { |
| 318 | large_objects->Set(obj); |
| 319 | return true; |
| 320 | } |
| 321 | return false; |
| 322 | } |
| 323 | |
| 324 | // Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to |
| 325 | // the to-space and have their forward address updated. Objects which have been newly marked are |
| 326 | // pushed on the mark stack. |
| 327 | Object* SemiSpace::MarkObject(Object* obj) { |
| 328 | Object* ret = obj; |
| 329 | if (obj != nullptr && !IsImmune(obj)) { |
| 330 | if (from_space_->HasAddress(obj)) { |
| 331 | mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj); |
| 332 | // If the object has already been moved, return the new forward address. |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 333 | if (forward_address == nullptr) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 334 | // Otherwise, we need to move the object and add it to the markstack for processing. |
| 335 | size_t object_size = obj->SizeOf(); |
| 336 | size_t dummy = 0; |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 337 | if (kEnableSimplePromo && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { |
| 338 | // If it's allocated before the last GC (older), move (pseudo-promote) it to |
| 339 | // the non-moving space (as sort of an old generation.) |
| 340 | size_t bytes_promoted; |
| 341 | space::MallocSpace* non_moving_space = GetHeap()->GetNonMovingSpace(); |
| 342 | forward_address = non_moving_space->Alloc(self_, object_size, &bytes_promoted); |
| 343 | if (forward_address == nullptr) { |
| 344 | // If out of space, fall back to the to-space. |
| 345 | forward_address = to_space_->Alloc(self_, object_size, &dummy); |
| 346 | } else { |
| 347 | GetHeap()->num_bytes_allocated_.fetch_add(bytes_promoted); |
| 348 | bytes_promoted_ += bytes_promoted; |
| 349 | // Mark forward_address on the live bit map. |
| 350 | accounting::SpaceBitmap* live_bitmap = non_moving_space->GetLiveBitmap(); |
| 351 | DCHECK(live_bitmap != nullptr); |
| 352 | DCHECK(!live_bitmap->Test(forward_address)); |
| 353 | live_bitmap->Set(forward_address); |
| 354 | // Mark forward_address on the mark bit map. |
| 355 | accounting::SpaceBitmap* mark_bitmap = non_moving_space->GetMarkBitmap(); |
| 356 | DCHECK(mark_bitmap != nullptr); |
| 357 | DCHECK(!mark_bitmap->Test(forward_address)); |
| 358 | mark_bitmap->Set(forward_address); |
| 359 | } |
| 360 | DCHECK(forward_address != nullptr); |
| 361 | } else { |
| 362 | // If it's allocated after the last GC (younger), copy it to the to-space. |
| 363 | forward_address = to_space_->Alloc(self_, object_size, &dummy); |
| 364 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 365 | // Copy over the object and add it to the mark stack since we still need to update it's |
| 366 | // references. |
| 367 | memcpy(reinterpret_cast<void*>(forward_address), obj, object_size); |
| 368 | // Make sure to only update the forwarding address AFTER you copy the object so that the |
| 369 | // monitor word doesn't get stomped over. |
| 370 | COMPILE_ASSERT(sizeof(uint32_t) == sizeof(mirror::Object*), |
| 371 | monitor_size_must_be_same_as_object); |
| 372 | obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address))); |
| 373 | MarkStackPush(forward_address); |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 374 | } else { |
| 375 | DCHECK(to_space_->HasAddress(forward_address) || |
| 376 | (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forward_address))); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 377 | } |
| 378 | ret = forward_address; |
| 379 | // TODO: Do we need this if in the else statement? |
| 380 | } else { |
| 381 | accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 382 | if (LIKELY(object_bitmap != nullptr)) { |
| 383 | // This object was not previously marked. |
| 384 | if (!object_bitmap->Test(obj)) { |
| 385 | object_bitmap->Set(obj); |
| 386 | MarkStackPush(obj); |
| 387 | } |
| 388 | } else { |
| 389 | DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; |
| 390 | if (MarkLargeObject(obj)) { |
| 391 | MarkStackPush(obj); |
| 392 | } |
| 393 | } |
| 394 | } |
| 395 | } |
| 396 | return ret; |
| 397 | } |
| 398 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 399 | Object* SemiSpace::RecursiveMarkObjectCallback(Object* root, void* arg) { |
| 400 | DCHECK(root != nullptr); |
| 401 | DCHECK(arg != nullptr); |
| 402 | SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg); |
| 403 | mirror::Object* ret = semi_space->MarkObject(root); |
| 404 | semi_space->ProcessMarkStack(true); |
| 405 | return ret; |
| 406 | } |
| 407 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 408 | Object* SemiSpace::MarkRootCallback(Object* root, void* arg) { |
| 409 | DCHECK(root != nullptr); |
| 410 | DCHECK(arg != nullptr); |
| 411 | return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); |
| 412 | } |
| 413 | |
| 414 | // Marks all objects in the root set. |
| 415 | void SemiSpace::MarkRoots() { |
| 416 | timings_.StartSplit("MarkRoots"); |
| 417 | // TODO: Visit up image roots as well? |
| 418 | Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true); |
| 419 | timings_.EndSplit(); |
| 420 | } |
| 421 | |
| 422 | void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 423 | CHECK(space->IsMallocSpace()); |
| 424 | space::MallocSpace* alloc_space = space->AsMallocSpace(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 425 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 426 | accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); |
| 427 | GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); |
| 428 | } |
| 429 | |
| 430 | mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) { |
| 431 | if (from_space_->HasAddress(obj)) { |
| 432 | LOG(FATAL) << "Shouldn't happen!"; |
| 433 | return GetForwardingAddressInFromSpace(obj); |
| 434 | } |
| 435 | return obj; |
| 436 | } |
| 437 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 438 | mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 439 | return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); |
| 440 | } |
| 441 | |
| 442 | void SemiSpace::SweepSystemWeaks() { |
| 443 | timings_.StartSplit("SweepSystemWeaks"); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 444 | Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 445 | timings_.EndSplit(); |
| 446 | } |
| 447 | |
| 448 | struct SweepCallbackContext { |
| 449 | SemiSpace* mark_sweep; |
| 450 | space::AllocSpace* space; |
| 451 | Thread* self; |
| 452 | }; |
| 453 | |
| 454 | void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
| 455 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
| 456 | SemiSpace* gc = context->mark_sweep; |
| 457 | Heap* heap = gc->GetHeap(); |
| 458 | space::AllocSpace* space = context->space; |
| 459 | Thread* self = context->self; |
| 460 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); |
| 461 | size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs); |
| 462 | heap->RecordFree(num_ptrs, freed_bytes); |
| 463 | gc->freed_objects_.fetch_add(num_ptrs); |
| 464 | gc->freed_bytes_.fetch_add(freed_bytes); |
| 465 | } |
| 466 | |
| 467 | void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) { |
| 468 | SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); |
| 469 | Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); |
| 470 | Heap* heap = context->mark_sweep->GetHeap(); |
| 471 | // We don't free any actual memory to avoid dirtying the shared zygote pages. |
| 472 | for (size_t i = 0; i < num_ptrs; ++i) { |
| 473 | Object* obj = static_cast<Object*>(ptrs[i]); |
| 474 | heap->GetLiveBitmap()->Clear(obj); |
| 475 | heap->GetCardTable()->MarkCard(obj); |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | void SemiSpace::Sweep(bool swap_bitmaps) { |
| 480 | DCHECK(mark_stack_->IsEmpty()); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 481 | TimingLogger::ScopedSplit("Sweep", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 482 | |
| 483 | const bool partial = (GetGcType() == kGcTypePartial); |
| 484 | SweepCallbackContext scc; |
| 485 | scc.mark_sweep = this; |
| 486 | scc.self = Thread::Current(); |
| 487 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 488 | if (!space->IsMallocSpace()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 489 | continue; |
| 490 | } |
| 491 | // We always sweep always collect spaces. |
| 492 | bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect); |
| 493 | if (!partial && !sweep_space) { |
| 494 | // We sweep full collect spaces when the GC isn't a partial GC (ie its full). |
| 495 | sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect); |
| 496 | } |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 497 | if (sweep_space && space->IsMallocSpace()) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 498 | uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); |
| 499 | uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 500 | scc.space = space->AsMallocSpace(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 501 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 502 | accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap(); |
| 503 | if (swap_bitmaps) { |
| 504 | std::swap(live_bitmap, mark_bitmap); |
| 505 | } |
| 506 | if (!space->IsZygoteSpace()) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 507 | TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 508 | // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. |
| 509 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 510 | &SweepCallback, reinterpret_cast<void*>(&scc)); |
| 511 | } else { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 512 | TimingLogger::ScopedSplit split("SweepZygote", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 513 | // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual |
| 514 | // memory. |
| 515 | accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end, |
| 516 | &ZygoteSweepCallback, reinterpret_cast<void*>(&scc)); |
| 517 | } |
| 518 | } |
| 519 | } |
| 520 | |
| 521 | SweepLargeObjects(swap_bitmaps); |
| 522 | } |
| 523 | |
| 524 | void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 525 | TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 526 | // Sweep large objects |
| 527 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 528 | accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects(); |
| 529 | accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects(); |
| 530 | if (swap_bitmaps) { |
| 531 | std::swap(large_live_objects, large_mark_objects); |
| 532 | } |
| 533 | // O(n*log(n)) but hopefully there are not too many large objects. |
| 534 | size_t freed_objects = 0; |
| 535 | size_t freed_bytes = 0; |
| 536 | Thread* self = Thread::Current(); |
| 537 | for (const Object* obj : large_live_objects->GetObjects()) { |
| 538 | if (!large_mark_objects->Test(obj)) { |
| 539 | freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj)); |
| 540 | ++freed_objects; |
| 541 | } |
| 542 | } |
| 543 | freed_large_objects_.fetch_add(freed_objects); |
| 544 | freed_large_object_bytes_.fetch_add(freed_bytes); |
| 545 | GetHeap()->RecordFree(freed_objects, freed_bytes); |
| 546 | } |
| 547 | |
| 548 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 549 | // marked, put it on the appropriate list in the heap for later processing. |
| 550 | void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 551 | heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | // Visit all of the references of an object and update. |
| 555 | void SemiSpace::ScanObject(Object* obj) { |
| 556 | DCHECK(obj != NULL); |
| 557 | DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; |
| 558 | MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, |
| 559 | bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS { |
| 560 | mirror::Object* new_address = MarkObject(ref); |
| 561 | if (new_address != ref) { |
| 562 | DCHECK(new_address != nullptr); |
| 563 | obj->SetFieldObject(offset, new_address, false); |
| 564 | } |
| 565 | }, kMovingClasses); |
| 566 | mirror::Class* klass = obj->GetClass(); |
| 567 | if (UNLIKELY(klass->IsReferenceClass())) { |
| 568 | DelayReferenceReferent(klass, obj); |
| 569 | } |
| 570 | } |
| 571 | |
| 572 | // Scan anything that's on the mark stack. |
| 573 | void SemiSpace::ProcessMarkStack(bool paused) { |
| 574 | timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack"); |
| 575 | while (!mark_stack_->IsEmpty()) { |
| 576 | ScanObject(mark_stack_->PopBack()); |
| 577 | } |
| 578 | timings_.EndSplit(); |
| 579 | } |
| 580 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 581 | inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const |
| 582 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
| 583 | // All immune objects are assumed marked. |
| 584 | if (IsImmune(obj)) { |
| 585 | return obj; |
| 586 | } |
| 587 | if (from_space_->HasAddress(obj)) { |
| 588 | mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); |
| 589 | // If the object is forwarded then it MUST be marked. |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame^] | 590 | DCHECK(forwarding_address == nullptr || to_space_->HasAddress(forwarding_address) || |
| 591 | (kEnableSimplePromo && GetHeap()->GetNonMovingSpace()->HasAddress(forwarding_address))); |
| 592 | if (forwarding_address != nullptr) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 593 | return forwarding_address; |
| 594 | } |
| 595 | // Must not be marked, return nullptr; |
| 596 | return nullptr; |
| 597 | } else if (to_space_->HasAddress(obj)) { |
| 598 | // Already forwarded, must be marked. |
| 599 | return obj; |
| 600 | } |
| 601 | return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; |
| 602 | } |
| 603 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 604 | void SemiSpace::UnBindBitmaps() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 605 | TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 606 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 607 | if (space->IsMallocSpace()) { |
| 608 | space::MallocSpace* alloc_space = space->AsMallocSpace(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 609 | if (alloc_space->HasBoundBitmaps()) { |
| 610 | alloc_space->UnBindBitmaps(); |
| 611 | heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(), |
| 612 | alloc_space->GetMarkBitmap()); |
| 613 | } |
| 614 | } |
| 615 | } |
| 616 | } |
| 617 | |
| 618 | void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { |
| 619 | DCHECK(to_space != nullptr); |
| 620 | to_space_ = to_space; |
| 621 | } |
| 622 | |
| 623 | void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { |
| 624 | DCHECK(from_space != nullptr); |
| 625 | from_space_ = from_space; |
| 626 | } |
| 627 | |
| 628 | void SemiSpace::FinishPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 629 | TimingLogger::ScopedSplit split("FinishPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 630 | // Can't enqueue references if we hold the mutator lock. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 631 | Heap* heap = GetHeap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 632 | timings_.NewSplit("PostGcVerification"); |
| 633 | heap->PostGcVerification(this); |
| 634 | |
| 635 | // Null the "to" and "from" spaces since compacting from one to the other isn't valid until |
| 636 | // further action is done by the heap. |
| 637 | to_space_ = nullptr; |
| 638 | from_space_ = nullptr; |
| 639 | |
| 640 | // Update the cumulative statistics |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 641 | total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); |
| 642 | total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); |
| 643 | |
| 644 | // Ensure that the mark stack is empty. |
| 645 | CHECK(mark_stack_->IsEmpty()); |
| 646 | |
| 647 | // Update the cumulative loggers. |
| 648 | cumulative_timings_.Start(); |
| 649 | cumulative_timings_.AddLogger(timings_); |
| 650 | cumulative_timings_.End(); |
| 651 | |
| 652 | // Clear all of the spaces' mark bitmaps. |
| 653 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 654 | accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); |
| 655 | if (bitmap != nullptr && |
| 656 | space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { |
| 657 | bitmap->Clear(); |
| 658 | } |
| 659 | } |
| 660 | mark_stack_->Reset(); |
| 661 | |
| 662 | // Reset the marked large objects. |
| 663 | space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); |
| 664 | large_objects->GetMarkObjects()->Clear(); |
| 665 | } |
| 666 | |
| 667 | } // namespace collector |
| 668 | } // namespace gc |
| 669 | } // namespace art |