Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 17 | #include "semi_space.h" |
| 18 | |
| 19 | #include <functional> |
| 20 | #include <numeric> |
| 21 | #include <climits> |
| 22 | #include <vector> |
| 23 | |
| 24 | #include "base/logging.h" |
| 25 | #include "base/macros.h" |
| 26 | #include "base/mutex-inl.h" |
| 27 | #include "base/timing_logger.h" |
| 28 | #include "gc/accounting/heap_bitmap.h" |
| 29 | #include "gc/accounting/mod_union_table.h" |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 30 | #include "gc/accounting/remembered_set.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 31 | #include "gc/accounting/space_bitmap-inl.h" |
| 32 | #include "gc/heap.h" |
| 33 | #include "gc/space/bump_pointer_space.h" |
| 34 | #include "gc/space/bump_pointer_space-inl.h" |
| 35 | #include "gc/space/image_space.h" |
| 36 | #include "gc/space/large_object_space.h" |
| 37 | #include "gc/space/space-inl.h" |
| 38 | #include "indirect_reference_table.h" |
| 39 | #include "intern_table.h" |
| 40 | #include "jni_internal.h" |
| 41 | #include "mark_sweep-inl.h" |
| 42 | #include "monitor.h" |
| 43 | #include "mirror/art_field.h" |
| 44 | #include "mirror/art_field-inl.h" |
| 45 | #include "mirror/class-inl.h" |
| 46 | #include "mirror/class_loader.h" |
| 47 | #include "mirror/dex_cache.h" |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 48 | #include "mirror/reference-inl.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 49 | #include "mirror/object-inl.h" |
| 50 | #include "mirror/object_array.h" |
| 51 | #include "mirror/object_array-inl.h" |
| 52 | #include "runtime.h" |
| 53 | #include "semi_space-inl.h" |
| 54 | #include "thread-inl.h" |
| 55 | #include "thread_list.h" |
| 56 | #include "verifier/method_verifier.h" |
| 57 | |
| 58 | using ::art::mirror::Class; |
| 59 | using ::art::mirror::Object; |
| 60 | |
| 61 | namespace art { |
| 62 | namespace gc { |
| 63 | namespace collector { |
| 64 | |
| 65 | static constexpr bool kProtectFromSpace = true; |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 66 | static constexpr bool kClearFromSpace = true; |
| 67 | static constexpr bool kStoreStackTraces = false; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 68 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 69 | void SemiSpace::BindBitmaps() { |
| 70 | timings_.StartSplit("BindBitmaps"); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 71 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 72 | // Mark all of the spaces we never collect as immune. |
| 73 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 74 | if (space->GetLiveBitmap() != nullptr) { |
| 75 | if (space == to_space_) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 76 | CHECK(to_space_->IsContinuousMemMapAllocSpace()); |
| 77 | to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 78 | } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 79 | || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect |
| 80 | // Add the main free list space and the non-moving |
| 81 | // space to the immune space if a bump pointer space |
| 82 | // only collection. |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 83 | || (generational_ && !whole_heap_collection_ && |
| 84 | (space == GetHeap()->GetNonMovingSpace() || |
| 85 | space == GetHeap()->GetPrimaryFreeListSpace()))) { |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 86 | CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 87 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 88 | } |
| 89 | } |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 90 | if (generational_ && !whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 91 | // We won't collect the large object space if a bump pointer space only collection. |
| 92 | is_large_object_space_immune_ = true; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 93 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 94 | timings_.EndSplit(); |
| 95 | } |
| 96 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 97 | SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 98 | : GarbageCollector(heap, |
| 99 | name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), |
| 100 | mark_stack_(nullptr), |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 101 | is_large_object_space_immune_(false), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 102 | to_space_(nullptr), |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 103 | to_space_live_bitmap_(nullptr), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 104 | from_space_(nullptr), |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 105 | self_(nullptr), |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 106 | generational_(generational), |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 107 | last_gc_to_space_end_(nullptr), |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 108 | bytes_promoted_(0), |
| 109 | whole_heap_collection_(true), |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 110 | whole_heap_collection_interval_counter_(0), |
| 111 | saved_bytes_(0) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 112 | } |
| 113 | |
| 114 | void SemiSpace::InitializePhase() { |
| 115 | timings_.Reset(); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 116 | TimingLogger::ScopedSplit split("InitializePhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 117 | mark_stack_ = heap_->mark_stack_.get(); |
| 118 | DCHECK(mark_stack_ != nullptr); |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 119 | immune_region_.Reset(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 120 | is_large_object_space_immune_ = false; |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 121 | saved_bytes_ = 0; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 122 | self_ = Thread::Current(); |
| 123 | // Do any pre GC verification. |
| 124 | timings_.NewSplit("PreGcVerification"); |
| 125 | heap_->PreGcVerification(this); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 126 | // Set the initial bitmap. |
| 127 | to_space_live_bitmap_ = to_space_->GetLiveBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | void SemiSpace::ProcessReferences(Thread* self) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 131 | TimingLogger::ScopedSplit split("ProcessReferences", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 132 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 133 | GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 134 | &MarkObjectCallback, &ProcessMarkStackCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 135 | } |
| 136 | |
| 137 | void SemiSpace::MarkingPhase() { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 138 | if (kStoreStackTraces) { |
| 139 | Locks::mutator_lock_->AssertExclusiveHeld(self_); |
| 140 | // Store the stack traces into the runtime fault string in case we get a heap corruption |
| 141 | // related crash later. |
| 142 | ThreadState old_state = self_->SetStateUnsafe(kRunnable); |
| 143 | std::ostringstream oss; |
| 144 | Runtime* runtime = Runtime::Current(); |
| 145 | runtime->GetThreadList()->DumpForSigQuit(oss); |
| 146 | runtime->GetThreadList()->DumpNativeStacks(oss); |
| 147 | runtime->SetFaultMessage(oss.str()); |
| 148 | CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); |
| 149 | } |
| 150 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 151 | if (generational_) { |
| 152 | if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || |
| 153 | clear_soft_references_) { |
| 154 | // If an explicit, native allocation-triggered, or last attempt |
| 155 | // collection, collect the whole heap (and reset the interval |
| 156 | // counter to be consistent.) |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 157 | whole_heap_collection_ = true; |
| 158 | whole_heap_collection_interval_counter_ = 0; |
| 159 | } |
| 160 | if (whole_heap_collection_) { |
| 161 | VLOG(heap) << "Whole heap collection"; |
| 162 | } else { |
| 163 | VLOG(heap) << "Bump pointer space only collection"; |
| 164 | } |
| 165 | } |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 166 | Locks::mutator_lock_->AssertExclusiveHeld(self_); |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 167 | |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 168 | TimingLogger::ScopedSplit split("MarkingPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 169 | // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the |
| 170 | // wrong space. |
| 171 | heap_->SwapSemiSpaces(); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 172 | if (generational_) { |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 173 | // If last_gc_to_space_end_ is out of the bounds of the from-space |
| 174 | // (the to-space from last GC), then point it to the beginning of |
| 175 | // the from-space. For example, the very first GC or the |
| 176 | // pre-zygote compaction. |
| 177 | if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { |
| 178 | last_gc_to_space_end_ = from_space_->Begin(); |
| 179 | } |
| 180 | // Reset this before the marking starts below. |
| 181 | bytes_promoted_ = 0; |
| 182 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 183 | // Assume the cleared space is already empty. |
| 184 | BindBitmaps(); |
| 185 | // Process dirty cards and add dirty cards to mod-union tables. |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 186 | heap_->ProcessCards(timings_, kUseRememberedSet && generational_); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 187 | // Clear the whole card table since we can not get any additional dirty cards during the |
| 188 | // paused GC. This saves memory but only works for pause the world collectors. |
| 189 | timings_.NewSplit("ClearCardTable"); |
| 190 | heap_->GetCardTable()->ClearCardTable(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 191 | // Need to do this before the checkpoint since we don't want any threads to add references to |
| 192 | // the live stack during the recursive mark. |
| 193 | timings_.NewSplit("SwapStacks"); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 194 | if (kUseThreadLocalAllocationStack) { |
| 195 | heap_->RevokeAllThreadLocalAllocationStacks(self_); |
| 196 | } |
| 197 | heap_->SwapStacks(self_); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 198 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 199 | MarkRoots(); |
| 200 | // Mark roots of immune spaces. |
| 201 | UpdateAndMarkModUnion(); |
| 202 | // Recursively mark remaining objects. |
| 203 | MarkReachableObjects(); |
| 204 | } |
| 205 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 206 | void SemiSpace::UpdateAndMarkModUnion() { |
| 207 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 208 | // If the space is immune then we need to mark the references to other spaces. |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 209 | if (immune_region_.ContainsSpace(space)) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 210 | accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 211 | if (table != nullptr) { |
| 212 | // TODO: Improve naming. |
| 213 | TimingLogger::ScopedSplit split( |
| 214 | space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : |
| 215 | "UpdateAndMarkImageModUnionTable", |
| 216 | &timings_); |
Mathieu Chartier | 815873e | 2014-02-13 18:02:13 -0800 | [diff] [blame] | 217 | table->UpdateAndMarkReferences(MarkObjectCallback, this); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 218 | } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { |
| 219 | DCHECK(kUseRememberedSet); |
| 220 | // If a bump pointer space only collection, the non-moving |
| 221 | // space is added to the immune space. The non-moving space |
| 222 | // doesn't have a mod union table, but has a remembered |
| 223 | // set. Its dirty cards will be scanned later in |
| 224 | // MarkReachableObjects(). |
| 225 | DCHECK(generational_ && !whole_heap_collection_ && |
| 226 | (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) |
| 227 | << "Space " << space->GetName() << " " |
| 228 | << "generational_=" << generational_ << " " |
| 229 | << "whole_heap_collection_=" << whole_heap_collection_ << " "; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 230 | } else { |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 231 | DCHECK(!kUseRememberedSet); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 232 | // If a bump pointer space only collection, the non-moving |
| 233 | // space is added to the immune space. But the non-moving |
| 234 | // space doesn't have a mod union table. Instead, its live |
| 235 | // bitmap will be scanned later in MarkReachableObjects(). |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 236 | DCHECK(generational_ && !whole_heap_collection_ && |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 237 | (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) |
| 238 | << "Space " << space->GetName() << " " |
| 239 | << "generational_=" << generational_ << " " |
| 240 | << "whole_heap_collection_=" << whole_heap_collection_ << " "; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 241 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 242 | } |
| 243 | } |
| 244 | } |
| 245 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 246 | class SemiSpaceScanObjectVisitor { |
| 247 | public: |
| 248 | explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} |
| 249 | void operator()(Object* obj) const NO_THREAD_SAFETY_ANALYSIS { |
| 250 | // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an |
| 251 | // exclusive lock on the mutator lock, but |
| 252 | // SpaceBitmap::VisitMarkedRange() only requires the shared lock. |
| 253 | DCHECK(obj != nullptr); |
| 254 | semi_space_->ScanObject(obj); |
| 255 | } |
| 256 | private: |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 257 | SemiSpace* const semi_space_; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 258 | }; |
| 259 | |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 260 | // Used to verify that there's no references to the from-space. |
| 261 | class SemiSpaceVerifyNoFromSpaceReferencesVisitor { |
| 262 | public: |
| 263 | explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : |
| 264 | from_space_(from_space) {} |
| 265 | |
| 266 | void operator()(Object* obj, Object* ref, const MemberOffset& offset, bool /* is_static */) |
| 267 | const ALWAYS_INLINE { |
| 268 | if (from_space_->HasAddress(ref)) { |
| 269 | Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); |
| 270 | } |
| 271 | DCHECK(!from_space_->HasAddress(ref)); |
| 272 | } |
| 273 | private: |
| 274 | space::ContinuousMemMapAllocSpace* from_space_; |
| 275 | }; |
| 276 | |
| 277 | void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { |
| 278 | DCHECK(obj != NULL); |
| 279 | DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; |
| 280 | SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); |
Mathieu Chartier | 0e54cd0 | 2014-03-20 12:41:23 -0700 | [diff] [blame] | 281 | MarkSweep::VisitObjectReferences<kMovingClasses>(obj, visitor); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { |
| 285 | public: |
| 286 | explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} |
| 287 | void operator()(Object* obj) const |
| 288 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { |
| 289 | DCHECK(obj != nullptr); |
| 290 | semi_space_->VerifyNoFromSpaceReferences(obj); |
| 291 | } |
| 292 | private: |
| 293 | SemiSpace* const semi_space_; |
| 294 | }; |
| 295 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 296 | void SemiSpace::MarkReachableObjects() { |
| 297 | timings_.StartSplit("MarkStackAsLive"); |
| 298 | accounting::ObjectStack* live_stack = heap_->GetLiveStack(); |
| 299 | heap_->MarkAllocStackAsLive(live_stack); |
| 300 | live_stack->Reset(); |
| 301 | timings_.EndSplit(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 302 | |
| 303 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 304 | // If the space is immune and has no mod union table (the |
| 305 | // non-moving space when the bump pointer space only collection is |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 306 | // enabled,) then we need to scan its live bitmap or dirty cards as roots |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 307 | // (including the objects on the live stack which have just marked |
| 308 | // in the live bitmap above in MarkAllocStackAsLive().) |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 309 | if (immune_region_.ContainsSpace(space) && |
| 310 | heap_->FindModUnionTableFromSpace(space) == nullptr) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 311 | DCHECK(generational_ && !whole_heap_collection_ && |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 312 | (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 313 | accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); |
| 314 | if (kUseRememberedSet) { |
| 315 | DCHECK(rem_set != nullptr); |
| 316 | rem_set->UpdateAndMarkReferences(MarkObjectCallback, from_space_, this); |
| 317 | if (kIsDebugBuild) { |
| 318 | // Verify that there are no from-space references that |
| 319 | // remain in the space, that is, the remembered set (and the |
| 320 | // card table) didn't miss any from-space references in the |
| 321 | // space. |
| 322 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 323 | SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); |
| 324 | live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), |
| 325 | reinterpret_cast<uintptr_t>(space->End()), |
| 326 | visitor); |
| 327 | } |
| 328 | } else { |
| 329 | DCHECK(rem_set == nullptr); |
| 330 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 331 | SemiSpaceScanObjectVisitor visitor(this); |
| 332 | live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), |
| 333 | reinterpret_cast<uintptr_t>(space->End()), |
| 334 | visitor); |
| 335 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 336 | } |
| 337 | } |
| 338 | |
| 339 | if (is_large_object_space_immune_) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 340 | DCHECK(generational_ && !whole_heap_collection_); |
Hiroshi Yamauchi | ba5870d | 2014-01-29 15:31:03 -0800 | [diff] [blame] | 341 | // Delay copying the live set to the marked set until here from |
| 342 | // BindBitmaps() as the large objects on the allocation stack may |
| 343 | // be newly added to the live set above in MarkAllocStackAsLive(). |
| 344 | GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); |
| 345 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 346 | // When the large object space is immune, we need to scan the |
| 347 | // large object space as roots as they contain references to their |
| 348 | // classes (primitive array classes) that could move though they |
| 349 | // don't contain any other references. |
| 350 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 351 | accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); |
| 352 | SemiSpaceScanObjectVisitor visitor(this); |
| 353 | for (const Object* obj : large_live_objects->GetObjects()) { |
| 354 | visitor(const_cast<Object*>(obj)); |
| 355 | } |
| 356 | } |
| 357 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 358 | // Recursively process the mark stack. |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 359 | ProcessMarkStack(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 360 | } |
| 361 | |
| 362 | void SemiSpace::ReclaimPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 363 | TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 364 | ProcessReferences(self_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 365 | { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 366 | ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 367 | SweepSystemWeaks(); |
| 368 | } |
| 369 | // Record freed memory. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 370 | uint64_t from_bytes = from_space_->GetBytesAllocated(); |
| 371 | uint64_t to_bytes = to_space_->GetBytesAllocated(); |
| 372 | uint64_t from_objects = from_space_->GetObjectsAllocated(); |
| 373 | uint64_t to_objects = to_space_->GetObjectsAllocated(); |
| 374 | CHECK_LE(to_objects, from_objects); |
| 375 | int64_t freed_bytes = from_bytes - to_bytes; |
| 376 | int64_t freed_objects = from_objects - to_objects; |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 377 | freed_bytes_.FetchAndAdd(freed_bytes); |
| 378 | freed_objects_.FetchAndAdd(freed_objects); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 379 | // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed |
| 380 | // space. |
| 381 | heap_->RecordFree(freed_objects, freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 382 | timings_.StartSplit("PreSweepingGcVerification"); |
| 383 | heap_->PreSweepingGcVerification(this); |
| 384 | timings_.EndSplit(); |
| 385 | |
| 386 | { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 387 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 388 | // Reclaim unmarked objects. |
| 389 | Sweep(false); |
| 390 | // Swap the live and mark bitmaps for each space which we modified space. This is an |
| 391 | // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound |
| 392 | // bitmaps. |
| 393 | timings_.StartSplit("SwapBitmaps"); |
| 394 | SwapBitmaps(); |
| 395 | timings_.EndSplit(); |
| 396 | // Unbind the live and mark bitmaps. |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 397 | TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); |
| 398 | GetHeap()->UnBindBitmaps(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 399 | } |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 400 | if (kClearFromSpace) { |
| 401 | // Release the memory used by the from space. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 402 | from_space_->Clear(); |
| 403 | } |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 404 | from_space_->Reset(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 405 | // Protect the from space. |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 406 | VLOG(heap) << "Protecting space " << *from_space_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 407 | if (kProtectFromSpace) { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 408 | from_space_->GetMemMap()->Protect(PROT_NONE); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 409 | } else { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 410 | from_space_->GetMemMap()->Protect(PROT_READ); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 411 | } |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 412 | if (saved_bytes_ > 0) { |
| 413 | VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); |
| 414 | } |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 415 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 416 | if (generational_) { |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 417 | // Record the end (top) of the to space so we can distinguish |
| 418 | // between objects that were allocated since the last GC and the |
| 419 | // older objects. |
| 420 | last_gc_to_space_end_ = to_space_->End(); |
| 421 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 422 | } |
| 423 | |
| 424 | void SemiSpace::ResizeMarkStack(size_t new_size) { |
| 425 | std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); |
| 426 | CHECK_LE(mark_stack_->Size(), new_size); |
| 427 | mark_stack_->Resize(new_size); |
| 428 | for (const auto& obj : temp) { |
| 429 | mark_stack_->PushBack(obj); |
| 430 | } |
| 431 | } |
| 432 | |
| 433 | inline void SemiSpace::MarkStackPush(Object* obj) { |
| 434 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
| 435 | ResizeMarkStack(mark_stack_->Capacity() * 2); |
| 436 | } |
| 437 | // The object must be pushed on to the mark stack. |
| 438 | mark_stack_->PushBack(obj); |
| 439 | } |
| 440 | |
| 441 | // Rare case, probably not worth inlining since it will increase instruction cache miss rate. |
| 442 | bool SemiSpace::MarkLargeObject(const Object* obj) { |
| 443 | // TODO: support >1 discontinuous space. |
| 444 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 445 | DCHECK(large_object_space->Contains(obj)); |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 446 | accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 447 | if (UNLIKELY(!large_objects->Test(obj))) { |
| 448 | large_objects->Set(obj); |
| 449 | return true; |
| 450 | } |
| 451 | return false; |
| 452 | } |
| 453 | |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 454 | static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { |
| 455 | if (LIKELY(size <= static_cast<size_t>(kPageSize))) { |
| 456 | // We will dirty the current page and somewhere in the middle of the next page. This means |
| 457 | // that the next object copied will also dirty that page. |
| 458 | // TODO: Worth considering the last object copied? We may end up dirtying one page which is |
| 459 | // not necessary per GC. |
| 460 | memcpy(dest, src, size); |
| 461 | return 0; |
| 462 | } |
| 463 | size_t saved_bytes = 0; |
| 464 | byte* byte_dest = reinterpret_cast<byte*>(dest); |
| 465 | if (kIsDebugBuild) { |
| 466 | for (size_t i = 0; i < size; ++i) { |
| 467 | CHECK_EQ(byte_dest[i], 0U); |
| 468 | } |
| 469 | } |
| 470 | // Process the start of the page. The page must already be dirty, don't bother with checking. |
| 471 | const byte* byte_src = reinterpret_cast<const byte*>(src); |
| 472 | const byte* limit = byte_src + size; |
| 473 | size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; |
| 474 | // Copy the bytes until the start of the next page. |
| 475 | memcpy(dest, src, page_remain); |
| 476 | byte_src += page_remain; |
| 477 | byte_dest += page_remain; |
| 478 | CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); |
| 479 | CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); |
| 480 | CHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); |
| 481 | while (byte_src + kPageSize < limit) { |
| 482 | bool all_zero = true; |
| 483 | uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); |
| 484 | const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); |
| 485 | for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { |
| 486 | // Assumes the destination of the copy is all zeros. |
| 487 | if (word_src[i] != 0) { |
| 488 | all_zero = false; |
| 489 | word_dest[i] = word_src[i]; |
| 490 | } |
| 491 | } |
| 492 | if (all_zero) { |
| 493 | // Avoided copying into the page since it was all zeros. |
| 494 | saved_bytes += kPageSize; |
| 495 | } |
| 496 | byte_src += kPageSize; |
| 497 | byte_dest += kPageSize; |
| 498 | } |
| 499 | // Handle the part of the page at the end. |
| 500 | memcpy(byte_dest, byte_src, limit - byte_src); |
| 501 | return saved_bytes; |
| 502 | } |
| 503 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 504 | mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { |
| 505 | size_t object_size = obj->SizeOf(); |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 506 | size_t bytes_allocated; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 507 | mirror::Object* forward_address = nullptr; |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 508 | if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 509 | // If it's allocated before the last GC (older), move |
| 510 | // (pseudo-promote) it to the main free list space (as sort |
| 511 | // of an old generation.) |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 512 | size_t bytes_promoted; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 513 | space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 514 | forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 515 | if (forward_address == nullptr) { |
| 516 | // If out of space, fall back to the to-space. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 517 | forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 518 | } else { |
| 519 | GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); |
| 520 | bytes_promoted_ += bytes_promoted; |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 521 | // Dirty the card at the destionation as it may contain |
| 522 | // references (including the class pointer) to the bump pointer |
| 523 | // space. |
| 524 | GetHeap()->WriteBarrierEveryFieldOf(forward_address); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 525 | // Handle the bitmaps marking. |
| 526 | accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 527 | DCHECK(live_bitmap != nullptr); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 528 | accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 529 | DCHECK(mark_bitmap != nullptr); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 530 | DCHECK(!live_bitmap->Test(forward_address)); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 531 | if (!whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 532 | // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. |
| 533 | DCHECK_EQ(live_bitmap, mark_bitmap); |
| 534 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 535 | // If a bump pointer space only collection, delay the live |
| 536 | // bitmap marking of the promoted object until it's popped off |
| 537 | // the mark stack (ProcessMarkStack()). The rationale: we may |
| 538 | // be in the middle of scanning the objects in the promo |
| 539 | // destination space for |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 540 | // non-moving-space-to-bump-pointer-space references by |
| 541 | // iterating over the marked bits of the live bitmap |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 542 | // (MarkReachableObjects()). If we don't delay it (and instead |
| 543 | // mark the promoted object here), the above promo destination |
| 544 | // space scan could encounter the just-promoted object and |
| 545 | // forward the references in the promoted object's fields even |
| 546 | // through it is pushed onto the mark stack. If this happens, |
| 547 | // the promoted object would be in an inconsistent state, that |
| 548 | // is, it's on the mark stack (gray) but its fields are |
| 549 | // already forwarded (black), which would cause a |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 550 | // DCHECK(!to_space_->HasAddress(obj)) failure below. |
| 551 | } else { |
| 552 | // Mark forward_address on the live bit map. |
| 553 | live_bitmap->Set(forward_address); |
| 554 | // Mark forward_address on the mark bit map. |
| 555 | DCHECK(!mark_bitmap->Test(forward_address)); |
| 556 | mark_bitmap->Set(forward_address); |
| 557 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 558 | } |
| 559 | DCHECK(forward_address != nullptr); |
| 560 | } else { |
| 561 | // If it's allocated after the last GC (younger), copy it to the to-space. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 562 | forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 563 | } |
| 564 | // Copy over the object and add it to the mark stack since we still need to update its |
| 565 | // references. |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 566 | saved_bytes_ += |
| 567 | CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); |
Hiroshi Yamauchi | 9d04a20 | 2014-01-31 13:35:49 -0800 | [diff] [blame] | 568 | if (kUseBrooksPointer) { |
| 569 | obj->AssertSelfBrooksPointer(); |
| 570 | DCHECK_EQ(forward_address->GetBrooksPointer(), obj); |
| 571 | forward_address->SetBrooksPointer(forward_address); |
| 572 | forward_address->AssertSelfBrooksPointer(); |
| 573 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 574 | if (to_space_live_bitmap_ != nullptr) { |
| 575 | to_space_live_bitmap_->Set(forward_address); |
| 576 | } |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 577 | DCHECK(to_space_->HasAddress(forward_address) || |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 578 | (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 579 | return forward_address; |
| 580 | } |
| 581 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 582 | // Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to |
| 583 | // the to-space and have their forward address updated. Objects which have been newly marked are |
| 584 | // pushed on the mark stack. |
| 585 | Object* SemiSpace::MarkObject(Object* obj) { |
Hiroshi Yamauchi | 9d04a20 | 2014-01-31 13:35:49 -0800 | [diff] [blame] | 586 | if (kUseBrooksPointer) { |
| 587 | // Verify all the objects have the correct forward pointer installed. |
| 588 | if (obj != nullptr) { |
| 589 | obj->AssertSelfBrooksPointer(); |
| 590 | } |
| 591 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 592 | Object* forward_address = obj; |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 593 | if (obj != nullptr && !immune_region_.ContainsObject(obj)) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 594 | if (from_space_->HasAddress(obj)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 595 | forward_address = GetForwardingAddressInFromSpace(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 596 | // If the object has already been moved, return the new forward address. |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 597 | if (forward_address == nullptr) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 598 | forward_address = MarkNonForwardedObject(obj); |
| 599 | DCHECK(forward_address != nullptr); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 600 | // Make sure to only update the forwarding address AFTER you copy the object so that the |
| 601 | // monitor word doesn't get stomped over. |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 602 | obj->SetLockWord(LockWord::FromForwardingAddress( |
| 603 | reinterpret_cast<size_t>(forward_address))); |
| 604 | // Push the object onto the mark stack for later processing. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 605 | MarkStackPush(forward_address); |
| 606 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 607 | // TODO: Do we need this if in the else statement? |
| 608 | } else { |
| 609 | accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj); |
| 610 | if (LIKELY(object_bitmap != nullptr)) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 611 | if (generational_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 612 | // If a bump pointer space only collection, we should not |
| 613 | // reach here as we don't/won't mark the objects in the |
| 614 | // non-moving space (except for the promoted objects.) Note |
| 615 | // the non-moving space is added to the immune space. |
| 616 | DCHECK(whole_heap_collection_); |
| 617 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 618 | // This object was not previously marked. |
| 619 | if (!object_bitmap->Test(obj)) { |
| 620 | object_bitmap->Set(obj); |
| 621 | MarkStackPush(obj); |
| 622 | } |
| 623 | } else { |
Mathieu Chartier | d1e05bf | 2014-02-04 17:11:58 -0800 | [diff] [blame] | 624 | CHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_"; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 625 | if (MarkLargeObject(obj)) { |
| 626 | MarkStackPush(obj); |
| 627 | } |
| 628 | } |
| 629 | } |
| 630 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 631 | return forward_address; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 632 | } |
| 633 | |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 634 | void SemiSpace::ProcessMarkStackCallback(void* arg) { |
| 635 | DCHECK(arg != nullptr); |
| 636 | reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); |
| 637 | } |
| 638 | |
| 639 | mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 640 | DCHECK(root != nullptr); |
| 641 | DCHECK(arg != nullptr); |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 642 | return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 643 | } |
| 644 | |
Mathieu Chartier | 815873e | 2014-02-13 18:02:13 -0800 | [diff] [blame] | 645 | void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, |
| 646 | RootType /*root_type*/) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 647 | DCHECK(root != nullptr); |
| 648 | DCHECK(arg != nullptr); |
Mathieu Chartier | 815873e | 2014-02-13 18:02:13 -0800 | [diff] [blame] | 649 | *root = reinterpret_cast<SemiSpace*>(arg)->MarkObject(*root); |
| 650 | } |
| 651 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 652 | // Marks all objects in the root set. |
| 653 | void SemiSpace::MarkRoots() { |
| 654 | timings_.StartSplit("MarkRoots"); |
| 655 | // TODO: Visit up image roots as well? |
Mathieu Chartier | 893263b | 2014-03-04 11:07:42 -0800 | [diff] [blame] | 656 | Runtime::Current()->VisitRoots(MarkRootCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 657 | timings_.EndSplit(); |
| 658 | } |
| 659 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 660 | mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 661 | return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); |
| 662 | } |
| 663 | |
| 664 | void SemiSpace::SweepSystemWeaks() { |
| 665 | timings_.StartSplit("SweepSystemWeaks"); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 666 | Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 667 | timings_.EndSplit(); |
| 668 | } |
| 669 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 670 | bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 671 | return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 672 | } |
| 673 | |
| 674 | void SemiSpace::Sweep(bool swap_bitmaps) { |
| 675 | DCHECK(mark_stack_->IsEmpty()); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 676 | TimingLogger::ScopedSplit("Sweep", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 677 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 678 | if (space->IsContinuousMemMapAllocSpace()) { |
| 679 | space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); |
| 680 | if (!ShouldSweepSpace(alloc_space)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 681 | continue; |
| 682 | } |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 683 | TimingLogger::ScopedSplit split( |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 684 | alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 685 | size_t freed_objects = 0; |
| 686 | size_t freed_bytes = 0; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 687 | alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 688 | heap_->RecordFree(freed_objects, freed_bytes); |
| 689 | freed_objects_.FetchAndAdd(freed_objects); |
| 690 | freed_bytes_.FetchAndAdd(freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 691 | } |
| 692 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 693 | if (!is_large_object_space_immune_) { |
| 694 | SweepLargeObjects(swap_bitmaps); |
| 695 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 696 | } |
| 697 | |
| 698 | void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 699 | DCHECK(!is_large_object_space_immune_); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 700 | TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 701 | size_t freed_objects = 0; |
| 702 | size_t freed_bytes = 0; |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 703 | GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 704 | freed_large_objects_.FetchAndAdd(freed_objects); |
| 705 | freed_large_object_bytes_.FetchAndAdd(freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 706 | GetHeap()->RecordFree(freed_objects, freed_bytes); |
| 707 | } |
| 708 | |
| 709 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 710 | // marked, put it on the appropriate list in the heap for later processing. |
| 711 | void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) { |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 712 | heap_->DelayReferenceReferent(klass, obj->AsReference(), MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 713 | } |
| 714 | |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 715 | class SemiSpaceMarkObjectVisitor { |
| 716 | public: |
| 717 | explicit SemiSpaceMarkObjectVisitor(SemiSpace* semi_space) : semi_space_(semi_space) { |
| 718 | } |
| 719 | |
| 720 | void operator()(Object* obj, Object* ref, const MemberOffset& offset, bool /* is_static */) |
| 721 | const ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS /* EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) */ { |
| 722 | mirror::Object* new_address = semi_space_->MarkObject(ref); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 723 | if (new_address != ref) { |
| 724 | DCHECK(new_address != nullptr); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 725 | // Don't need to mark the card since we updating the object address and not changing the |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 726 | // actual objects its pointing to. Using SetFieldObjectWithoutWriteBarrier is better in this |
| 727 | // case since it does not dirty cards and use additional memory. |
Sebastien Hertz | d2fe10a | 2014-01-15 10:20:56 +0100 | [diff] [blame] | 728 | // Since we do not change the actual object, we can safely use non-transactional mode. Also |
| 729 | // disable check as we could run inside a transaction. |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 730 | obj->SetFieldObjectWithoutWriteBarrier<false, false, kVerifyNone>(offset, new_address, false); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 731 | } |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 732 | } |
| 733 | private: |
| 734 | SemiSpace* const semi_space_; |
| 735 | }; |
| 736 | |
| 737 | // Visit all of the references of an object and update. |
| 738 | void SemiSpace::ScanObject(Object* obj) { |
| 739 | DCHECK(obj != NULL); |
| 740 | DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; |
| 741 | SemiSpaceMarkObjectVisitor visitor(this); |
Mathieu Chartier | 0e54cd0 | 2014-03-20 12:41:23 -0700 | [diff] [blame] | 742 | MarkSweep::VisitObjectReferences<kMovingClasses>(obj, visitor); |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 743 | mirror::Class* klass = obj->GetClass<kVerifyNone>(); |
| 744 | if (UNLIKELY(klass->IsReferenceClass<kVerifyNone>())) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 745 | DelayReferenceReferent(klass, obj); |
| 746 | } |
| 747 | } |
| 748 | |
| 749 | // Scan anything that's on the mark stack. |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 750 | void SemiSpace::ProcessMarkStack() { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 751 | space::MallocSpace* promo_dest_space = NULL; |
| 752 | accounting::SpaceBitmap* live_bitmap = NULL; |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 753 | if (generational_ && !whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 754 | // If a bump pointer space only collection (and the promotion is |
| 755 | // enabled,) we delay the live-bitmap marking of promoted objects |
| 756 | // from MarkObject() until this function. |
| 757 | promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); |
| 758 | live_bitmap = promo_dest_space->GetLiveBitmap(); |
| 759 | DCHECK(live_bitmap != nullptr); |
| 760 | accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); |
| 761 | DCHECK(mark_bitmap != nullptr); |
| 762 | DCHECK_EQ(live_bitmap, mark_bitmap); |
| 763 | } |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 764 | timings_.StartSplit("ProcessMarkStack"); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 765 | while (!mark_stack_->IsEmpty()) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 766 | Object* obj = mark_stack_->PopBack(); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 767 | if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 768 | // obj has just been promoted. Mark the live bitmap for it, |
| 769 | // which is delayed from MarkObject(). |
| 770 | DCHECK(!live_bitmap->Test(obj)); |
| 771 | live_bitmap->Set(obj); |
| 772 | } |
| 773 | ScanObject(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 774 | } |
| 775 | timings_.EndSplit(); |
| 776 | } |
| 777 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 778 | inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const |
| 779 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
| 780 | // All immune objects are assumed marked. |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 781 | if (immune_region_.ContainsObject(obj)) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 782 | return obj; |
| 783 | } |
| 784 | if (from_space_->HasAddress(obj)) { |
| 785 | mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj)); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 786 | return forwarding_address; // Returns either the forwarding address or nullptr. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 787 | } else if (to_space_->HasAddress(obj)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 788 | // Should be unlikely. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 789 | // Already forwarded, must be marked. |
| 790 | return obj; |
| 791 | } |
| 792 | return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; |
| 793 | } |
| 794 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 795 | void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { |
| 796 | DCHECK(to_space != nullptr); |
| 797 | to_space_ = to_space; |
| 798 | } |
| 799 | |
| 800 | void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { |
| 801 | DCHECK(from_space != nullptr); |
| 802 | from_space_ = from_space; |
| 803 | } |
| 804 | |
| 805 | void SemiSpace::FinishPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 806 | TimingLogger::ScopedSplit split("FinishPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 807 | Heap* heap = GetHeap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 808 | timings_.NewSplit("PostGcVerification"); |
| 809 | heap->PostGcVerification(this); |
| 810 | |
| 811 | // Null the "to" and "from" spaces since compacting from one to the other isn't valid until |
| 812 | // further action is done by the heap. |
| 813 | to_space_ = nullptr; |
| 814 | from_space_ = nullptr; |
| 815 | |
| 816 | // Update the cumulative statistics |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 817 | total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects(); |
| 818 | total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes(); |
| 819 | |
| 820 | // Ensure that the mark stack is empty. |
| 821 | CHECK(mark_stack_->IsEmpty()); |
| 822 | |
| 823 | // Update the cumulative loggers. |
| 824 | cumulative_timings_.Start(); |
| 825 | cumulative_timings_.AddLogger(timings_); |
| 826 | cumulative_timings_.End(); |
| 827 | |
| 828 | // Clear all of the spaces' mark bitmaps. |
| 829 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
| 830 | accounting::SpaceBitmap* bitmap = space->GetMarkBitmap(); |
| 831 | if (bitmap != nullptr && |
| 832 | space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) { |
| 833 | bitmap->Clear(); |
| 834 | } |
| 835 | } |
| 836 | mark_stack_->Reset(); |
| 837 | |
| 838 | // Reset the marked large objects. |
| 839 | space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace(); |
| 840 | large_objects->GetMarkObjects()->Clear(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 841 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 842 | if (generational_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 843 | // Decide whether to do a whole heap collection or a bump pointer |
| 844 | // only space collection at the next collection by updating |
| 845 | // whole_heap_collection. Enable whole_heap_collection once every |
| 846 | // kDefaultWholeHeapCollectionInterval collections. |
| 847 | if (!whole_heap_collection_) { |
| 848 | --whole_heap_collection_interval_counter_; |
| 849 | DCHECK_GE(whole_heap_collection_interval_counter_, 0); |
| 850 | if (whole_heap_collection_interval_counter_ == 0) { |
| 851 | whole_heap_collection_ = true; |
| 852 | } |
| 853 | } else { |
| 854 | DCHECK_EQ(whole_heap_collection_interval_counter_, 0); |
| 855 | whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; |
| 856 | whole_heap_collection_ = false; |
| 857 | } |
| 858 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 859 | } |
| 860 | |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 861 | void SemiSpace::RevokeAllThreadLocalBuffers() { |
| 862 | timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); |
| 863 | GetHeap()->RevokeAllThreadLocalBuffers(); |
| 864 | timings_.EndSplit(); |
| 865 | } |
| 866 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 867 | } // namespace collector |
| 868 | } // namespace gc |
| 869 | } // namespace art |