Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 17 | #include "semi_space.h" |
| 18 | |
| 19 | #include <functional> |
| 20 | #include <numeric> |
| 21 | #include <climits> |
| 22 | #include <vector> |
| 23 | |
| 24 | #include "base/logging.h" |
| 25 | #include "base/macros.h" |
| 26 | #include "base/mutex-inl.h" |
| 27 | #include "base/timing_logger.h" |
| 28 | #include "gc/accounting/heap_bitmap.h" |
| 29 | #include "gc/accounting/mod_union_table.h" |
| 30 | #include "gc/accounting/space_bitmap-inl.h" |
| 31 | #include "gc/heap.h" |
| 32 | #include "gc/space/bump_pointer_space.h" |
| 33 | #include "gc/space/bump_pointer_space-inl.h" |
| 34 | #include "gc/space/image_space.h" |
| 35 | #include "gc/space/large_object_space.h" |
| 36 | #include "gc/space/space-inl.h" |
| 37 | #include "indirect_reference_table.h" |
| 38 | #include "intern_table.h" |
| 39 | #include "jni_internal.h" |
| 40 | #include "mark_sweep-inl.h" |
| 41 | #include "monitor.h" |
| 42 | #include "mirror/art_field.h" |
| 43 | #include "mirror/art_field-inl.h" |
| 44 | #include "mirror/class-inl.h" |
| 45 | #include "mirror/class_loader.h" |
| 46 | #include "mirror/dex_cache.h" |
| 47 | #include "mirror/object-inl.h" |
| 48 | #include "mirror/object_array.h" |
| 49 | #include "mirror/object_array-inl.h" |
| 50 | #include "runtime.h" |
| 51 | #include "semi_space-inl.h" |
| 52 | #include "thread-inl.h" |
| 53 | #include "thread_list.h" |
| 54 | #include "verifier/method_verifier.h" |
| 55 | |
| 56 | using ::art::mirror::Class; |
| 57 | using ::art::mirror::Object; |
| 58 | |
| 59 | namespace art { |
| 60 | namespace gc { |
| 61 | namespace collector { |
| 62 | |
| 63 | static constexpr bool kProtectFromSpace = true; |
| 64 | static constexpr bool kResetFromSpace = true; |
| 65 | |
| 66 | // TODO: Unduplicate logic. |
| 67 | void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { |
| 68 | // Bind live to mark bitmap if necessary. |
| 69 | if (space->GetLiveBitmap() != space->GetMarkBitmap()) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 70 | CHECK(space->IsContinuousMemMapAllocSpace()); |
| 71 | space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 72 | } |
| 73 | // Add the space to the immune region. |
| 74 | if (immune_begin_ == nullptr) { |
| 75 | DCHECK(immune_end_ == nullptr); |
| 76 | immune_begin_ = reinterpret_cast<Object*>(space->Begin()); |
| 77 | immune_end_ = reinterpret_cast<Object*>(space->End()); |
| 78 | } else { |
| 79 | const space::ContinuousSpace* prev_space = nullptr; |
| 80 | // Find out if the previous space is immune. |
| 81 | for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) { |
| 82 | if (cur_space == space) { |
| 83 | break; |
| 84 | } |
| 85 | prev_space = cur_space; |
| 86 | } |
| 87 | // If previous space was immune, then extend the immune region. Relies on continuous spaces |
| 88 | // being sorted by Heap::AddContinuousSpace. |
| 89 | if (prev_space != nullptr && IsImmuneSpace(prev_space)) { |
| 90 | immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 91 | // Use Limit() instead of End() because otherwise if the |
| 92 | // generational mode is enabled, the alloc space might expand |
| 93 | // due to promotion and the sense of immunity may change in the |
| 94 | // middle of a GC. |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 95 | immune_end_ = std::max(reinterpret_cast<Object*>(space->Limit()), immune_end_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 96 | } |
| 97 | } |
| 98 | } |
| 99 | |
| 100 | void SemiSpace::BindBitmaps() { |
| 101 | timings_.StartSplit("BindBitmaps"); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 102 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 103 | // Mark all of the spaces we never collect as immune. |
| 104 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 105 | if (space->GetLiveBitmap() != nullptr) { |
| 106 | if (space == to_space_) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 107 | CHECK(to_space_->IsContinuousMemMapAllocSpace()); |
| 108 | to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 109 | } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 110 | || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect |
| 111 | // Add the main free list space and the non-moving |
| 112 | // space to the immune space if a bump pointer space |
| 113 | // only collection. |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 114 | || (generational_ && !whole_heap_collection_ && |
| 115 | (space == GetHeap()->GetNonMovingSpace() || |
| 116 | space == GetHeap()->GetPrimaryFreeListSpace()))) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 117 | ImmuneSpace(space); |
| 118 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 119 | } |
| 120 | } |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 121 | if (generational_ && !whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 122 | // We won't collect the large object space if a bump pointer space only collection. |
| 123 | is_large_object_space_immune_ = true; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 124 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 125 | timings_.EndSplit(); |
| 126 | } |
| 127 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 128 | SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 129 | : GarbageCollector(heap, |
| 130 | name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), |
| 131 | mark_stack_(nullptr), |
| 132 | immune_begin_(nullptr), |
| 133 | immune_end_(nullptr), |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 134 | is_large_object_space_immune_(false), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 135 | to_space_(nullptr), |
| 136 | from_space_(nullptr), |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 137 | self_(nullptr), |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 138 | generational_(generational), |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 139 | last_gc_to_space_end_(nullptr), |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 140 | bytes_promoted_(0), |
| 141 | whole_heap_collection_(true), |
| 142 | whole_heap_collection_interval_counter_(0) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 143 | } |
| 144 | |
| 145 | void SemiSpace::InitializePhase() { |
| 146 | timings_.Reset(); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 147 | TimingLogger::ScopedSplit split("InitializePhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 148 | mark_stack_ = heap_->mark_stack_.get(); |
| 149 | DCHECK(mark_stack_ != nullptr); |
| 150 | immune_begin_ = nullptr; |
| 151 | immune_end_ = nullptr; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 152 | is_large_object_space_immune_ = false; |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame^] | 153 | saved_bytes_ = 0; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 154 | self_ = Thread::Current(); |
| 155 | // Do any pre GC verification. |
| 156 | timings_.NewSplit("PreGcVerification"); |
| 157 | heap_->PreGcVerification(this); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 158 | // Set the initial bitmap. |
| 159 | to_space_live_bitmap_ = to_space_->GetLiveBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 160 | } |
| 161 | |
| 162 | void SemiSpace::ProcessReferences(Thread* self) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 163 | TimingLogger::ScopedSplit split("ProcessReferences", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 164 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 165 | GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, |
| 166 | &RecursiveMarkObjectCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | void SemiSpace::MarkingPhase() { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 170 | if (generational_) { |
| 171 | if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || |
| 172 | clear_soft_references_) { |
| 173 | // If an explicit, native allocation-triggered, or last attempt |
| 174 | // collection, collect the whole heap (and reset the interval |
| 175 | // counter to be consistent.) |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 176 | whole_heap_collection_ = true; |
| 177 | whole_heap_collection_interval_counter_ = 0; |
| 178 | } |
| 179 | if (whole_heap_collection_) { |
| 180 | VLOG(heap) << "Whole heap collection"; |
| 181 | } else { |
| 182 | VLOG(heap) << "Bump pointer space only collection"; |
| 183 | } |
| 184 | } |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 185 | Locks::mutator_lock_->AssertExclusiveHeld(self_); |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 186 | |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 187 | TimingLogger::ScopedSplit split("MarkingPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 188 | // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the |
| 189 | // wrong space. |
| 190 | heap_->SwapSemiSpaces(); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 191 | if (generational_) { |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 192 | // If last_gc_to_space_end_ is out of the bounds of the from-space |
| 193 | // (the to-space from last GC), then point it to the beginning of |
| 194 | // the from-space. For example, the very first GC or the |
| 195 | // pre-zygote compaction. |
| 196 | if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { |
| 197 | last_gc_to_space_end_ = from_space_->Begin(); |
| 198 | } |
| 199 | // Reset this before the marking starts below. |
| 200 | bytes_promoted_ = 0; |
| 201 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 202 | // Assume the cleared space is already empty. |
| 203 | BindBitmaps(); |
| 204 | // Process dirty cards and add dirty cards to mod-union tables. |
| 205 | heap_->ProcessCards(timings_); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 206 | // Clear the whole card table since we can not get any additional dirty cards during the |
| 207 | // paused GC. This saves memory but only works for pause the world collectors. |
| 208 | timings_.NewSplit("ClearCardTable"); |
| 209 | heap_->GetCardTable()->ClearCardTable(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 210 | // Need to do this before the checkpoint since we don't want any threads to add references to |
| 211 | // the live stack during the recursive mark. |
| 212 | timings_.NewSplit("SwapStacks"); |
| 213 | heap_->SwapStacks(); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 214 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 215 | MarkRoots(); |
| 216 | // Mark roots of immune spaces. |
| 217 | UpdateAndMarkModUnion(); |
| 218 | // Recursively mark remaining objects. |
| 219 | MarkReachableObjects(); |
| 220 | } |
| 221 | |
| 222 | bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const { |
| 223 | return |
| 224 | immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) && |
| 225 | immune_end_ >= reinterpret_cast<Object*>(space->End()); |
| 226 | } |
| 227 | |
| 228 | void SemiSpace::UpdateAndMarkModUnion() { |
| 229 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 230 | // If the space is immune then we need to mark the references to other spaces. |
| 231 | if (IsImmuneSpace(space)) { |
| 232 | accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 233 | if (table != nullptr) { |
| 234 | // TODO: Improve naming. |
| 235 | TimingLogger::ScopedSplit split( |
| 236 | space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : |
| 237 | "UpdateAndMarkImageModUnionTable", |
| 238 | &timings_); |
| 239 | table->UpdateAndMarkReferences(MarkRootCallback, this); |
| 240 | } else { |
| 241 | // If a bump pointer space only collection, the non-moving |
| 242 | // space is added to the immune space. But the non-moving |
| 243 | // space doesn't have a mod union table. Instead, its live |
| 244 | // bitmap will be scanned later in MarkReachableObjects(). |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 245 | DCHECK(generational_ && !whole_heap_collection_ && |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 246 | (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())); |
| 247 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 248 | } |
| 249 | } |
| 250 | } |
| 251 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 252 | class SemiSpaceScanObjectVisitor { |
| 253 | public: |
| 254 | explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} |
| 255 | void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { |
| 256 | // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an |
| 257 | // exclusive lock on the mutator lock, but |
| 258 | // SpaceBitmap::VisitMarkedRange() only requires the shared lock. |
| 259 | DCHECK(obj != nullptr); |
| 260 | semi_space_->ScanObject(obj); |
| 261 | } |
| 262 | private: |
| 263 | SemiSpace* semi_space_; |
| 264 | }; |
| 265 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 266 | void SemiSpace::MarkReachableObjects() { |
| 267 | timings_.StartSplit("MarkStackAsLive"); |
| 268 | accounting::ObjectStack* live_stack = heap_->GetLiveStack(); |
| 269 | heap_->MarkAllocStackAsLive(live_stack); |
| 270 | live_stack->Reset(); |
| 271 | timings_.EndSplit(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 272 | |
| 273 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 274 | // If the space is immune and has no mod union table (the |
| 275 | // non-moving space when the bump pointer space only collection is |
| 276 | // enabled,) then we need to scan its live bitmap as roots |
| 277 | // (including the objects on the live stack which have just marked |
| 278 | // in the live bitmap above in MarkAllocStackAsLive().) |
| 279 | if (IsImmuneSpace(space) && heap_->FindModUnionTableFromSpace(space) == nullptr) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 280 | DCHECK(generational_ && !whole_heap_collection_ && |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 281 | (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); |
| 282 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 283 | SemiSpaceScanObjectVisitor visitor(this); |
| 284 | live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), |
| 285 | reinterpret_cast<uintptr_t>(space->End()), |
| 286 | visitor); |
| 287 | } |
| 288 | } |
| 289 | |
| 290 | if (is_large_object_space_immune_) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 291 | DCHECK(generational_ && !whole_heap_collection_); |
Hiroshi Yamauchi | ba5870d | 2014-01-29 15:31:03 -0800 | [diff] [blame] | 292 | // Delay copying the live set to the marked set until here from |
| 293 | // BindBitmaps() as the large objects on the allocation stack may |
| 294 | // be newly added to the live set above in MarkAllocStackAsLive(). |
| 295 | GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); |
| 296 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 297 | // When the large object space is immune, we need to scan the |
| 298 | // large object space as roots as they contain references to their |
| 299 | // classes (primitive array classes) that could move though they |
| 300 | // don't contain any other references. |
| 301 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 302 | accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); |
| 303 | SemiSpaceScanObjectVisitor visitor(this); |
| 304 | for (const Object* obj : large_live_objects->GetObjects()) { |
| 305 | visitor(const_cast<Object*>(obj)); |
| 306 | } |
| 307 | } |
| 308 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 309 | // Recursively process the mark stack. |
| 310 | ProcessMarkStack(true); |
| 311 | } |
| 312 | |
| 313 | void SemiSpace::ReclaimPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 314 | TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 315 | ProcessReferences(self_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 316 | { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 317 | ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 318 | SweepSystemWeaks(); |
| 319 | } |
| 320 | // Record freed memory. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 321 | uint64_t from_bytes = from_space_->GetBytesAllocated(); |
| 322 | uint64_t to_bytes = to_space_->GetBytesAllocated(); |
| 323 | uint64_t from_objects = from_space_->GetObjectsAllocated(); |
| 324 | uint64_t to_objects = to_space_->GetObjectsAllocated(); |
| 325 | CHECK_LE(to_objects, from_objects); |
| 326 | int64_t freed_bytes = from_bytes - to_bytes; |
| 327 | int64_t freed_objects = from_objects - to_objects; |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 328 | freed_bytes_.FetchAndAdd(freed_bytes); |
| 329 | freed_objects_.FetchAndAdd(freed_objects); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 330 | // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed |
| 331 | // space. |
| 332 | heap_->RecordFree(freed_objects, freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 333 | timings_.StartSplit("PreSweepingGcVerification"); |
| 334 | heap_->PreSweepingGcVerification(this); |
| 335 | timings_.EndSplit(); |
| 336 | |
| 337 | { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 338 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 339 | // Reclaim unmarked objects. |
| 340 | Sweep(false); |
| 341 | // Swap the live and mark bitmaps for each space which we modified space. This is an |
| 342 | // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound |
| 343 | // bitmaps. |
| 344 | timings_.StartSplit("SwapBitmaps"); |
| 345 | SwapBitmaps(); |
| 346 | timings_.EndSplit(); |
| 347 | // Unbind the live and mark bitmaps. |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 348 | TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); |
| 349 | GetHeap()->UnBindBitmaps(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 350 | } |
| 351 | // Release the memory used by the from space. |
| 352 | if (kResetFromSpace) { |
| 353 | // Clearing from space. |
| 354 | from_space_->Clear(); |
| 355 | } |
| 356 | // Protect the from space. |
| 357 | VLOG(heap) |
| 358 | << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - " |
| 359 | << reinterpret_cast<void*>(from_space_->Limit()); |
| 360 | if (kProtectFromSpace) { |
| 361 | mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE); |
| 362 | } else { |
| 363 | mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ); |
| 364 | } |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame^] | 365 | if (saved_bytes_ > 0) { |
| 366 | VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); |
| 367 | } |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 368 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 369 | if (generational_) { |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 370 | // Record the end (top) of the to space so we can distinguish |
| 371 | // between objects that were allocated since the last GC and the |
| 372 | // older objects. |
| 373 | last_gc_to_space_end_ = to_space_->End(); |
| 374 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 375 | } |
| 376 | |
| 377 | void SemiSpace::ResizeMarkStack(size_t new_size) { |
| 378 | std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); |
| 379 | CHECK_LE(mark_stack_->Size(), new_size); |
| 380 | mark_stack_->Resize(new_size); |
| 381 | for (const auto& obj : temp) { |
| 382 | mark_stack_->PushBack(obj); |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | inline void SemiSpace::MarkStackPush(Object* obj) { |
| 387 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
| 388 | ResizeMarkStack(mark_stack_->Capacity() * 2); |
| 389 | } |
| 390 | // The object must be pushed on to the mark stack. |
| 391 | mark_stack_->PushBack(obj); |
| 392 | } |
| 393 | |
| 394 | // Rare case, probably not worth inlining since it will increase instruction cache miss rate. |
| 395 | bool SemiSpace::MarkLargeObject(const Object* obj) { |
| 396 | // TODO: support >1 discontinuous space. |
| 397 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 398 | DCHECK(large_object_space->Contains(obj)); |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 399 | accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 400 | if (UNLIKELY(!large_objects->Test(obj))) { |
| 401 | large_objects->Set(obj); |
| 402 | return true; |
| 403 | } |
| 404 | return false; |
| 405 | } |
| 406 | |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame^] | 407 | static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { |
| 408 | if (LIKELY(size <= static_cast<size_t>(kPageSize))) { |
| 409 | // We will dirty the current page and somewhere in the middle of the next page. This means |
| 410 | // that the next object copied will also dirty that page. |
| 411 | // TODO: Worth considering the last object copied? We may end up dirtying one page which is |
| 412 | // not necessary per GC. |
| 413 | memcpy(dest, src, size); |
| 414 | return 0; |
| 415 | } |
| 416 | size_t saved_bytes = 0; |
| 417 | byte* byte_dest = reinterpret_cast<byte*>(dest); |
| 418 | if (kIsDebugBuild) { |
| 419 | for (size_t i = 0; i < size; ++i) { |
| 420 | CHECK_EQ(byte_dest[i], 0U); |
| 421 | } |
| 422 | } |
| 423 | // Process the start of the page. The page must already be dirty, don't bother with checking. |
| 424 | const byte* byte_src = reinterpret_cast<const byte*>(src); |
| 425 | const byte* limit = byte_src + size; |
| 426 | size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; |
| 427 | // Copy the bytes until the start of the next page. |
| 428 | memcpy(dest, src, page_remain); |
| 429 | byte_src += page_remain; |
| 430 | byte_dest += page_remain; |
| 431 | CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); |
| 432 | CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); |
| 433 | CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); |
| 434 | while (byte_src + kPageSize < limit) { |
| 435 | bool all_zero = true; |
| 436 | uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); |
| 437 | const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); |
| 438 | for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { |
| 439 | // Assumes the destination of the copy is all zeros. |
| 440 | if (word_src[i] != 0) { |
| 441 | all_zero = false; |
| 442 | word_dest[i] = word_src[i]; |
| 443 | } |
| 444 | } |
| 445 | if (all_zero) { |
| 446 | // Avoided copying into the page since it was all zeros. |
| 447 | saved_bytes += kPageSize; |
| 448 | } |
| 449 | byte_src += kPageSize; |
| 450 | byte_dest += kPageSize; |
| 451 | } |
| 452 | // Handle the part of the page at the end. |
| 453 | memcpy(byte_dest, byte_src, limit - byte_src); |
| 454 | return saved_bytes; |
| 455 | } |
| 456 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 457 | mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { |
| 458 | size_t object_size = obj->SizeOf(); |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 459 | size_t bytes_allocated; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 460 | mirror::Object* forward_address = nullptr; |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 461 | if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 462 | // If it's allocated before the last GC (older), move |
| 463 | // (pseudo-promote) it to the main free list space (as sort |
| 464 | // of an old generation.) |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 465 | size_t bytes_promoted; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 466 | space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); |
| 467 | forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 468 | if (forward_address == nullptr) { |
| 469 | // If out of space, fall back to the to-space. |
| 470 | forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); |
| 471 | } else { |
| 472 | GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); |
| 473 | bytes_promoted_ += bytes_promoted; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 474 | // Handle the bitmaps marking. |
| 475 | accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 476 | DCHECK(live_bitmap != nullptr); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 477 | accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 478 | DCHECK(mark_bitmap != nullptr); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 479 | DCHECK(!live_bitmap->Test(forward_address)); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 480 | if (!whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 481 | // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. |
| 482 | DCHECK_EQ(live_bitmap, mark_bitmap); |
| 483 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 484 | // If a bump pointer space only collection, delay the live |
| 485 | // bitmap marking of the promoted object until it's popped off |
| 486 | // the mark stack (ProcessMarkStack()). The rationale: we may |
| 487 | // be in the middle of scanning the objects in the promo |
| 488 | // destination space for |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 489 | // non-moving-space-to-bump-pointer-space references by |
| 490 | // iterating over the marked bits of the live bitmap |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 491 | // (MarkReachableObjects()). If we don't delay it (and instead |
| 492 | // mark the promoted object here), the above promo destination |
| 493 | // space scan could encounter the just-promoted object and |
| 494 | // forward the references in the promoted object's fields even |
| 495 | // through it is pushed onto the mark stack. If this happens, |
| 496 | // the promoted object would be in an inconsistent state, that |
| 497 | // is, it's on the mark stack (gray) but its fields are |
| 498 | // already forwarded (black), which would cause a |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 499 | // DCHECK(!to_space_->HasAddress(obj)) failure below. |
| 500 | } else { |
| 501 | // Mark forward_address on the live bit map. |
| 502 | live_bitmap->Set(forward_address); |
| 503 | // Mark forward_address on the mark bit map. |
| 504 | DCHECK(!mark_bitmap->Test(forward_address)); |
| 505 | mark_bitmap->Set(forward_address); |
| 506 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 507 | } |
| 508 | DCHECK(forward_address != nullptr); |
| 509 | } else { |
| 510 | // If it's allocated after the last GC (younger), copy it to the to-space. |
| 511 | forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated); |
| 512 | } |
| 513 | // Copy over the object and add it to the mark stack since we still need to update its |
| 514 | // references. |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame^] | 515 | saved_bytes_ += |
| 516 | CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 517 | if (to_space_live_bitmap_ != nullptr) { |
| 518 | to_space_live_bitmap_->Set(forward_address); |
| 519 | } |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 520 | DCHECK(to_space_->HasAddress(forward_address) || |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 521 | (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 522 | return forward_address; |
| 523 | } |
| 524 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 525 | // Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to |
| 526 | // the to-space and have their forward address updated. Objects which have been newly marked are |
| 527 | // pushed on the mark stack. |
| 528 | Object* SemiSpace::MarkObject(Object* obj) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 529 | Object* forward_address = obj; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 530 | if (obj != nullptr && !IsImmune(obj)) { |
| 531 | if (from_space_->HasAddress(obj)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 532 | forward_address = GetForwardingAddressInFromSpace(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 533 | // If the object has already been moved, return the new forward address. |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 534 | if (forward_address == nullptr) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 535 | forward_address = MarkNonForwardedObject(obj); |
| 536 | DCHECK(forward_address != nullptr); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 537 | // Make sure to only update the forwarding address AFTER you copy the object so that the |
| 538 | // monitor word doesn't get stomped over. |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 539 | obj->SetLockWord(LockWord::FromForwardingAddress( |
| 540 | reinterpret_cast<size_t>(forward_address))); |
| 541 | // Push the object onto the mark stack for later processing. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 542 | MarkStackPush(forward_address); |
| 543 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 544 | // TODO: Do we need this if in the else statement? |
| 545 | } else { |
| 546 | accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 547 | if (LIKELY(object_bitmap != nullptr)) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 548 | if (generational_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 549 | // If a bump pointer space only collection, we should not |
| 550 | // reach here as we don't/won't mark the objects in the |
| 551 | // non-moving space (except for the promoted objects.) Note |
| 552 | // the non-moving space is added to the immune space. |
| 553 | DCHECK(whole_heap_collection_); |
| 554 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 555 | // This object was not previously marked. |
| 556 | if (!object_bitmap->Test(obj)) { |
| 557 | object_bitmap->Set(obj); |
| 558 | MarkStackPush(obj); |
| 559 | } |
| 560 | } else { |
Mathieu Chartier | d1e05bf | 2014-02-04 17:11:58 -0800 | [diff] [blame] | 561 | CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 562 | if (MarkLargeObject(obj)) { |
| 563 | MarkStackPush(obj); |
| 564 | } |
| 565 | } |
| 566 | } |
| 567 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 568 | return forward_address; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 569 | } |
| 570 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 571 | mirror::Object* SemiSpace::RecursiveMarkObjectCallback(mirror::Object* root, void* arg) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 572 | DCHECK(root != nullptr); |
| 573 | DCHECK(arg != nullptr); |
| 574 | SemiSpace* semi_space = reinterpret_cast<SemiSpace*>(arg); |
| 575 | mirror::Object* ret = semi_space->MarkObject(root); |
| 576 | semi_space->ProcessMarkStack(true); |
| 577 | return ret; |
| 578 | } |
| 579 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 580 | Object* SemiSpace::MarkRootCallback(Object* root, void* arg, uint32_t /*thread_id*/, |
| 581 | RootType /*root_type*/) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 582 | DCHECK(root != nullptr); |
| 583 | DCHECK(arg != nullptr); |
| 584 | return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); |
| 585 | } |
| 586 | |
| 587 | // Marks all objects in the root set. |
| 588 | void SemiSpace::MarkRoots() { |
| 589 | timings_.StartSplit("MarkRoots"); |
| 590 | // TODO: Visit up image roots as well? |
| 591 | Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true); |
| 592 | timings_.EndSplit(); |
| 593 | } |
| 594 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 595 | mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 596 | return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); |
| 597 | } |
| 598 | |
| 599 | void SemiSpace::SweepSystemWeaks() { |
| 600 | timings_.StartSplit("SweepSystemWeaks"); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 601 | Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 602 | timings_.EndSplit(); |
| 603 | } |
| 604 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 605 | bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 606 | return space != from_space_ && space != to_space_ && !IsImmuneSpace(space); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 607 | } |
| 608 | |
| 609 | void SemiSpace::Sweep(bool swap_bitmaps) { |
| 610 | DCHECK(mark_stack_->IsEmpty()); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 611 | TimingLogger::ScopedSplit("Sweep", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 612 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 613 | if (space->IsContinuousMemMapAllocSpace()) { |
| 614 | space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); |
| 615 | if (!ShouldSweepSpace(alloc_space)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 616 | continue; |
| 617 | } |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 618 | TimingLogger::ScopedSplit split( |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 619 | alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 620 | size_t freed_objects = 0; |
| 621 | size_t freed_bytes = 0; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 622 | alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 623 | heap_->RecordFree(freed_objects, freed_bytes); |
| 624 | freed_objects_.FetchAndAdd(freed_objects); |
| 625 | freed_bytes_.FetchAndAdd(freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 626 | } |
| 627 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 628 | if (!is_large_object_space_immune_) { |
| 629 | SweepLargeObjects(swap_bitmaps); |
| 630 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 631 | } |
| 632 | |
| 633 | void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 634 | DCHECK(!is_large_object_space_immune_); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 635 | TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 636 | size_t freed_objects = 0; |
| 637 | size_t freed_bytes = 0; |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 638 | GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 639 | freed_large_objects_.FetchAndAdd(freed_objects); |
| 640 | freed_large_object_bytes_.FetchAndAdd(freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 641 | GetHeap()->RecordFree(freed_objects, freed_bytes); |
| 642 | } |
| 643 | |
| 644 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 645 | // marked, put it on the appropriate list in the heap for later processing. |
| 646 | void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 647 | heap_->DelayReferenceReferent(klass, obj, MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 648 | } |
| 649 | |
| 650 | // Visit all of the references of an object and update. |
| 651 | void SemiSpace::ScanObject(Object* obj) { |
| 652 | DCHECK(obj != NULL); |
| 653 | DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; |
| 654 | MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset, |
Bernhard Rosenkränzer | 4605362 | 2013-12-12 02:15:52 +0100 | [diff] [blame] | 655 | bool /* is_static */) ALWAYS_INLINE_LAMBDA NO_THREAD_SAFETY_ANALYSIS { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 656 | mirror::Object* new_address = MarkObject(ref); |
| 657 | if (new_address != ref) { |
| 658 | DCHECK(new_address != nullptr); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 659 | // Don't need to mark the card since we updating the object address and not changing the |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 660 | // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this |
| 661 | // case since it does not dirty cards and use additional memory. |
| 662 | obj->SetFieldObjectWithoutWriteBarrier(offset, new_address, false); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 663 | } |
| 664 | }, kMovingClasses); |
| 665 | mirror::Class* klass = obj->GetClass(); |
| 666 | if (UNLIKELY(klass->IsReferenceClass())) { |
| 667 | DelayReferenceReferent(klass, obj); |
| 668 | } |
| 669 | } |
| 670 | |
| 671 | // Scan anything that's on the mark stack. |
| 672 | void SemiSpace::ProcessMarkStack(bool paused) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 673 | space::MallocSpace* promo_dest_space = NULL; |
| 674 | accounting::SpaceBitmap* live_bitmap = NULL; |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 675 | if (generational_ && !whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 676 | // If a bump pointer space only collection (and the promotion is |
| 677 | // enabled,) we delay the live-bitmap marking of promoted objects |
| 678 | // from MarkObject() until this function. |
| 679 | promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); |
| 680 | live_bitmap = promo_dest_space->GetLiveBitmap(); |
| 681 | DCHECK(live_bitmap != nullptr); |
| 682 | accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); |
| 683 | DCHECK(mark_bitmap != nullptr); |
| 684 | DCHECK_EQ(live_bitmap, mark_bitmap); |
| 685 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 686 | timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack"); |
| 687 | while (!mark_stack_->IsEmpty()) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 688 | Object* obj = mark_stack_->PopBack(); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 689 | if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 690 | // obj has just been promoted. Mark the live bitmap for it, |
| 691 | // which is delayed from MarkObject(). |
| 692 | DCHECK(!live_bitmap->Test(obj)); |
| 693 | live_bitmap->Set(obj); |
| 694 | } |
| 695 | ScanObject(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 696 | } |
| 697 | timings_.EndSplit(); |
| 698 | } |
| 699 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 700 | inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const |
| 701 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
| 702 | // All immune objects are assumed marked. |
| 703 | if (IsImmune(obj)) { |
| 704 | return obj; |
| 705 | } |
| 706 | if (from_space_->HasAddress(obj)) { |
| 707 | mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 708 | return forwarding_address; // Returns either the forwarding address or nullptr. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 709 | } else if (to_space_->HasAddress(obj)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 710 | // Should be unlikely. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 711 | // Already forwarded, must be marked. |
| 712 | return obj; |
| 713 | } |
| 714 | return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; |
| 715 | } |
| 716 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 717 | void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { |
| 718 | DCHECK(to_space != nullptr); |
| 719 | to_space_ = to_space; |
| 720 | } |
| 721 | |
| 722 | void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { |
| 723 | DCHECK(from_space != nullptr); |
| 724 | from_space_ = from_space; |
| 725 | } |
| 726 | |
| 727 | void SemiSpace::FinishPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 728 | TimingLogger::ScopedSplit split("FinishPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 729 | Heap* heap = GetHeap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 730 | timings_.NewSplit("PostGcVerification"); |
| 731 | heap->PostGcVerification(this); |
| 732 | |
| 733 | // Null the "to" and "from" spaces since compacting from one to the other isn't valid until |
| 734 | // further action is done by the heap. |
| 735 | to_space_ = nullptr; |
| 736 | from_space_ = nullptr; |
| 737 | |
| 738 | // Update the cumulative statistics |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 739 | total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); |
| 740 | total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); |
| 741 | |
| 742 | // Ensure that the mark stack is empty. |
| 743 | CHECK(mark_stack_->IsEmpty()); |
| 744 | |
| 745 | // Update the cumulative loggers. |
| 746 | cumulative_timings_.Start(); |
| 747 | cumulative_timings_.AddLogger(timings_); |
| 748 | cumulative_timings_.End(); |
| 749 | |
| 750 | // Clear all of the spaces' mark bitmaps. |
| 751 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 752 | accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); |
| 753 | if (bitmap != nullptr && |
| 754 | space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { |
| 755 | bitmap->Clear(); |
| 756 | } |
| 757 | } |
| 758 | mark_stack_->Reset(); |
| 759 | |
| 760 | // Reset the marked large objects. |
| 761 | space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); |
| 762 | large_objects->GetMarkObjects()->Clear(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 763 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 764 | if (generational_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 765 | // Decide whether to do a whole heap collection or a bump pointer |
| 766 | // only space collection at the next collection by updating |
| 767 | // whole_heap_collection. Enable whole_heap_collection once every |
| 768 | // kDefaultWholeHeapCollectionInterval collections. |
| 769 | if (!whole_heap_collection_) { |
| 770 | --whole_heap_collection_interval_counter_; |
| 771 | DCHECK_GE(whole_heap_collection_interval_counter_, 0); |
| 772 | if (whole_heap_collection_interval_counter_ == 0) { |
| 773 | whole_heap_collection_ = true; |
| 774 | } |
| 775 | } else { |
| 776 | DCHECK_EQ(whole_heap_collection_interval_counter_, 0); |
| 777 | whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; |
| 778 | whole_heap_collection_ = false; |
| 779 | } |
| 780 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 781 | } |
| 782 | |
| 783 | } // namespace collector |
| 784 | } // namespace gc |
| 785 | } // namespace art |