Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Mathieu Chartier | 3b05e9b | 2014-03-25 09:29:43 -0700 | [diff] [blame] | 17 | #include "semi_space-inl.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 18 | |
| 19 | #include <functional> |
| 20 | #include <numeric> |
| 21 | #include <climits> |
| 22 | #include <vector> |
| 23 | |
| 24 | #include "base/logging.h" |
| 25 | #include "base/macros.h" |
| 26 | #include "base/mutex-inl.h" |
| 27 | #include "base/timing_logger.h" |
Mathieu Chartier | 4aeec17 | 2014-03-27 16:09:46 -0700 | [diff] [blame] | 28 | #include "gc/accounting/heap_bitmap-inl.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 29 | #include "gc/accounting/mod_union_table.h" |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 30 | #include "gc/accounting/remembered_set.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 31 | #include "gc/accounting/space_bitmap-inl.h" |
| 32 | #include "gc/heap.h" |
| 33 | #include "gc/space/bump_pointer_space.h" |
| 34 | #include "gc/space/bump_pointer_space-inl.h" |
| 35 | #include "gc/space/image_space.h" |
| 36 | #include "gc/space/large_object_space.h" |
| 37 | #include "gc/space/space-inl.h" |
| 38 | #include "indirect_reference_table.h" |
| 39 | #include "intern_table.h" |
| 40 | #include "jni_internal.h" |
| 41 | #include "mark_sweep-inl.h" |
| 42 | #include "monitor.h" |
| 43 | #include "mirror/art_field.h" |
| 44 | #include "mirror/art_field-inl.h" |
| 45 | #include "mirror/class-inl.h" |
| 46 | #include "mirror/class_loader.h" |
| 47 | #include "mirror/dex_cache.h" |
Mathieu Chartier | 8fa2dad | 2014-03-13 12:22:56 -0700 | [diff] [blame] | 48 | #include "mirror/reference-inl.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 49 | #include "mirror/object-inl.h" |
| 50 | #include "mirror/object_array.h" |
| 51 | #include "mirror/object_array-inl.h" |
| 52 | #include "runtime.h" |
Mathieu Chartier | 3b05e9b | 2014-03-25 09:29:43 -0700 | [diff] [blame] | 53 | #include "stack.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 54 | #include "thread-inl.h" |
| 55 | #include "thread_list.h" |
| 56 | #include "verifier/method_verifier.h" |
| 57 | |
| 58 | using ::art::mirror::Class; |
| 59 | using ::art::mirror::Object; |
| 60 | |
| 61 | namespace art { |
| 62 | namespace gc { |
| 63 | namespace collector { |
| 64 | |
| 65 | static constexpr bool kProtectFromSpace = true; |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 66 | static constexpr bool kStoreStackTraces = false; |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 67 | static constexpr bool kUseBytesPromoted = true; |
| 68 | static constexpr size_t kBytesPromotedThreshold = 4 * MB; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 69 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 70 | void SemiSpace::BindBitmaps() { |
| 71 | timings_.StartSplit("BindBitmaps"); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 72 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 73 | // Mark all of the spaces we never collect as immune. |
| 74 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 75 | if (space->GetLiveBitmap() != nullptr) { |
| 76 | if (space == to_space_) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 77 | CHECK(to_space_->IsContinuousMemMapAllocSpace()); |
| 78 | to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 79 | } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 80 | || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect |
| 81 | // Add the main free list space and the non-moving |
| 82 | // space to the immune space if a bump pointer space |
| 83 | // only collection. |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 84 | || (generational_ && !whole_heap_collection_ && |
| 85 | (space == GetHeap()->GetNonMovingSpace() || |
| 86 | space == GetHeap()->GetPrimaryFreeListSpace()))) { |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 87 | CHECK(immune_region_.AddContinuousSpace(space)) << "Failed to add space " << *space; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 88 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 89 | } |
| 90 | } |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 91 | if (generational_ && !whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 92 | // We won't collect the large object space if a bump pointer space only collection. |
| 93 | is_large_object_space_immune_ = true; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 94 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 95 | timings_.EndSplit(); |
| 96 | } |
| 97 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 98 | SemiSpace::SemiSpace(Heap* heap, bool generational, const std::string& name_prefix) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 99 | : GarbageCollector(heap, |
| 100 | name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"), |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 101 | to_space_(nullptr), |
| 102 | from_space_(nullptr), |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 103 | generational_(generational), |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 104 | last_gc_to_space_end_(nullptr), |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 105 | bytes_promoted_(0), |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 106 | bytes_promoted_since_last_whole_heap_collection_(0), |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 107 | whole_heap_collection_(true), |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 108 | whole_heap_collection_interval_counter_(0), |
| 109 | collector_name_(name_) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 110 | } |
| 111 | |
| 112 | void SemiSpace::InitializePhase() { |
| 113 | timings_.Reset(); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 114 | TimingLogger::ScopedSplit split("InitializePhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 115 | mark_stack_ = heap_->mark_stack_.get(); |
| 116 | DCHECK(mark_stack_ != nullptr); |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 117 | immune_region_.Reset(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 118 | is_large_object_space_immune_ = false; |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 119 | saved_bytes_ = 0; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 120 | self_ = Thread::Current(); |
| 121 | // Do any pre GC verification. |
| 122 | timings_.NewSplit("PreGcVerification"); |
| 123 | heap_->PreGcVerification(this); |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame^] | 124 | CHECK(from_space_->CanMoveObjects()) << "Attempting to move from " << *from_space_; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 125 | // Set the initial bitmap. |
| 126 | to_space_live_bitmap_ = to_space_->GetLiveBitmap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | void SemiSpace::ProcessReferences(Thread* self) { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 130 | TimingLogger::ScopedSplit split("ProcessReferences", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 131 | WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 132 | GetHeap()->ProcessReferences(timings_, clear_soft_references_, &MarkedForwardingAddressCallback, |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 133 | &MarkObjectCallback, &ProcessMarkStackCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 134 | } |
| 135 | |
| 136 | void SemiSpace::MarkingPhase() { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 137 | if (kStoreStackTraces) { |
| 138 | Locks::mutator_lock_->AssertExclusiveHeld(self_); |
| 139 | // Store the stack traces into the runtime fault string in case we get a heap corruption |
| 140 | // related crash later. |
| 141 | ThreadState old_state = self_->SetStateUnsafe(kRunnable); |
| 142 | std::ostringstream oss; |
| 143 | Runtime* runtime = Runtime::Current(); |
| 144 | runtime->GetThreadList()->DumpForSigQuit(oss); |
| 145 | runtime->GetThreadList()->DumpNativeStacks(oss); |
| 146 | runtime->SetFaultMessage(oss.str()); |
| 147 | CHECK_EQ(self_->SetStateUnsafe(old_state), kRunnable); |
| 148 | } |
| 149 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 150 | if (generational_) { |
| 151 | if (gc_cause_ == kGcCauseExplicit || gc_cause_ == kGcCauseForNativeAlloc || |
| 152 | clear_soft_references_) { |
| 153 | // If an explicit, native allocation-triggered, or last attempt |
| 154 | // collection, collect the whole heap (and reset the interval |
| 155 | // counter to be consistent.) |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 156 | whole_heap_collection_ = true; |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 157 | if (!kUseBytesPromoted) { |
| 158 | whole_heap_collection_interval_counter_ = 0; |
| 159 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 160 | } |
| 161 | if (whole_heap_collection_) { |
| 162 | VLOG(heap) << "Whole heap collection"; |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 163 | name_ = collector_name_ + " whole"; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 164 | } else { |
| 165 | VLOG(heap) << "Bump pointer space only collection"; |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 166 | name_ = collector_name_ + " bps"; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 167 | } |
| 168 | } |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 169 | |
| 170 | if (!clear_soft_references_) { |
| 171 | if (!generational_) { |
| 172 | // If non-generational, always clear soft references. |
| 173 | clear_soft_references_ = true; |
| 174 | } else { |
| 175 | // If generational, clear soft references if a whole heap collection. |
| 176 | if (whole_heap_collection_) { |
| 177 | clear_soft_references_ = true; |
| 178 | } |
| 179 | } |
| 180 | } |
| 181 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 182 | Locks::mutator_lock_->AssertExclusiveHeld(self_); |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 183 | |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 184 | TimingLogger::ScopedSplit split("MarkingPhase", &timings_); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 185 | if (generational_) { |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 186 | // If last_gc_to_space_end_ is out of the bounds of the from-space |
| 187 | // (the to-space from last GC), then point it to the beginning of |
| 188 | // the from-space. For example, the very first GC or the |
| 189 | // pre-zygote compaction. |
| 190 | if (!from_space_->HasAddress(reinterpret_cast<mirror::Object*>(last_gc_to_space_end_))) { |
| 191 | last_gc_to_space_end_ = from_space_->Begin(); |
| 192 | } |
| 193 | // Reset this before the marking starts below. |
| 194 | bytes_promoted_ = 0; |
| 195 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 196 | // Assume the cleared space is already empty. |
| 197 | BindBitmaps(); |
| 198 | // Process dirty cards and add dirty cards to mod-union tables. |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 199 | heap_->ProcessCards(timings_, kUseRememberedSet && generational_); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 200 | // Clear the whole card table since we can not get any additional dirty cards during the |
| 201 | // paused GC. This saves memory but only works for pause the world collectors. |
| 202 | timings_.NewSplit("ClearCardTable"); |
| 203 | heap_->GetCardTable()->ClearCardTable(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 204 | // Need to do this before the checkpoint since we don't want any threads to add references to |
| 205 | // the live stack during the recursive mark. |
| 206 | timings_.NewSplit("SwapStacks"); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 207 | if (kUseThreadLocalAllocationStack) { |
| 208 | heap_->RevokeAllThreadLocalAllocationStacks(self_); |
| 209 | } |
| 210 | heap_->SwapStacks(self_); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 211 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 212 | MarkRoots(); |
| 213 | // Mark roots of immune spaces. |
| 214 | UpdateAndMarkModUnion(); |
| 215 | // Recursively mark remaining objects. |
| 216 | MarkReachableObjects(); |
| 217 | } |
| 218 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 219 | void SemiSpace::UpdateAndMarkModUnion() { |
| 220 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 221 | // If the space is immune then we need to mark the references to other spaces. |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 222 | if (immune_region_.ContainsSpace(space)) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 223 | accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 224 | if (table != nullptr) { |
| 225 | // TODO: Improve naming. |
| 226 | TimingLogger::ScopedSplit split( |
| 227 | space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" : |
| 228 | "UpdateAndMarkImageModUnionTable", |
| 229 | &timings_); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 230 | table->UpdateAndMarkReferences(MarkHeapReferenceCallback, this); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 231 | } else if (heap_->FindRememberedSetFromSpace(space) != nullptr) { |
| 232 | DCHECK(kUseRememberedSet); |
| 233 | // If a bump pointer space only collection, the non-moving |
| 234 | // space is added to the immune space. The non-moving space |
| 235 | // doesn't have a mod union table, but has a remembered |
| 236 | // set. Its dirty cards will be scanned later in |
| 237 | // MarkReachableObjects(). |
| 238 | DCHECK(generational_ && !whole_heap_collection_ && |
| 239 | (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) |
| 240 | << "Space " << space->GetName() << " " |
| 241 | << "generational_=" << generational_ << " " |
| 242 | << "whole_heap_collection_=" << whole_heap_collection_ << " "; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 243 | } else { |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 244 | DCHECK(!kUseRememberedSet); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 245 | // If a bump pointer space only collection, the non-moving |
| 246 | // space is added to the immune space. But the non-moving |
| 247 | // space doesn't have a mod union table. Instead, its live |
| 248 | // bitmap will be scanned later in MarkReachableObjects(). |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 249 | DCHECK(generational_ && !whole_heap_collection_ && |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 250 | (space == heap_->GetNonMovingSpace() || space == heap_->GetPrimaryFreeListSpace())) |
| 251 | << "Space " << space->GetName() << " " |
| 252 | << "generational_=" << generational_ << " " |
| 253 | << "whole_heap_collection_=" << whole_heap_collection_ << " "; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 254 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 255 | } |
| 256 | } |
| 257 | } |
| 258 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 259 | class SemiSpaceScanObjectVisitor { |
| 260 | public: |
| 261 | explicit SemiSpaceScanObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 262 | void operator()(Object* obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 263 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 264 | // TODO: fix NO_THREAD_SAFETY_ANALYSIS. ScanObject() requires an |
| 265 | // exclusive lock on the mutator lock, but |
| 266 | // SpaceBitmap::VisitMarkedRange() only requires the shared lock. |
| 267 | DCHECK(obj != nullptr); |
| 268 | semi_space_->ScanObject(obj); |
| 269 | } |
| 270 | private: |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 271 | SemiSpace* const semi_space_; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 272 | }; |
| 273 | |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 274 | // Used to verify that there's no references to the from-space. |
| 275 | class SemiSpaceVerifyNoFromSpaceReferencesVisitor { |
| 276 | public: |
| 277 | explicit SemiSpaceVerifyNoFromSpaceReferencesVisitor(space::ContinuousMemMapAllocSpace* from_space) : |
| 278 | from_space_(from_space) {} |
| 279 | |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 280 | void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const |
| 281 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) ALWAYS_INLINE { |
| 282 | mirror::Object* ref = obj->GetFieldObject<mirror::Object>(offset, false); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 283 | if (from_space_->HasAddress(ref)) { |
| 284 | Runtime::Current()->GetHeap()->DumpObject(LOG(INFO), obj); |
Mathieu Chartier | 3b05e9b | 2014-03-25 09:29:43 -0700 | [diff] [blame] | 285 | LOG(FATAL) << ref << " found in from space"; |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 286 | } |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 287 | } |
| 288 | private: |
| 289 | space::ContinuousMemMapAllocSpace* from_space_; |
| 290 | }; |
| 291 | |
| 292 | void SemiSpace::VerifyNoFromSpaceReferences(Object* obj) { |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 293 | DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; |
| 294 | SemiSpaceVerifyNoFromSpaceReferencesVisitor visitor(from_space_); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 295 | obj->VisitReferences<kMovingClasses>(visitor); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 296 | } |
| 297 | |
| 298 | class SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor { |
| 299 | public: |
| 300 | explicit SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor(SemiSpace* ss) : semi_space_(ss) {} |
| 301 | void operator()(Object* obj) const |
| 302 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) { |
| 303 | DCHECK(obj != nullptr); |
| 304 | semi_space_->VerifyNoFromSpaceReferences(obj); |
| 305 | } |
| 306 | private: |
| 307 | SemiSpace* const semi_space_; |
| 308 | }; |
| 309 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 310 | void SemiSpace::MarkReachableObjects() { |
| 311 | timings_.StartSplit("MarkStackAsLive"); |
| 312 | accounting::ObjectStack* live_stack = heap_->GetLiveStack(); |
| 313 | heap_->MarkAllocStackAsLive(live_stack); |
| 314 | live_stack->Reset(); |
| 315 | timings_.EndSplit(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 316 | |
| 317 | for (auto& space : heap_->GetContinuousSpaces()) { |
| 318 | // If the space is immune and has no mod union table (the |
| 319 | // non-moving space when the bump pointer space only collection is |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 320 | // enabled,) then we need to scan its live bitmap or dirty cards as roots |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 321 | // (including the objects on the live stack which have just marked |
| 322 | // in the live bitmap above in MarkAllocStackAsLive().) |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 323 | if (immune_region_.ContainsSpace(space) && |
| 324 | heap_->FindModUnionTableFromSpace(space) == nullptr) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 325 | DCHECK(generational_ && !whole_heap_collection_ && |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 326 | (space == GetHeap()->GetNonMovingSpace() || space == GetHeap()->GetPrimaryFreeListSpace())); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 327 | accounting::RememberedSet* rem_set = heap_->FindRememberedSetFromSpace(space); |
| 328 | if (kUseRememberedSet) { |
| 329 | DCHECK(rem_set != nullptr); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 330 | rem_set->UpdateAndMarkReferences(MarkHeapReferenceCallback, from_space_, this); |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 331 | if (kIsDebugBuild) { |
| 332 | // Verify that there are no from-space references that |
| 333 | // remain in the space, that is, the remembered set (and the |
| 334 | // card table) didn't miss any from-space references in the |
| 335 | // space. |
| 336 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 337 | SemiSpaceVerifyNoFromSpaceReferencesObjectVisitor visitor(this); |
| 338 | live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), |
| 339 | reinterpret_cast<uintptr_t>(space->End()), |
| 340 | visitor); |
| 341 | } |
| 342 | } else { |
| 343 | DCHECK(rem_set == nullptr); |
| 344 | accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); |
| 345 | SemiSpaceScanObjectVisitor visitor(this); |
| 346 | live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()), |
| 347 | reinterpret_cast<uintptr_t>(space->End()), |
| 348 | visitor); |
| 349 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 350 | } |
| 351 | } |
| 352 | |
| 353 | if (is_large_object_space_immune_) { |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 354 | DCHECK(generational_ && !whole_heap_collection_); |
Hiroshi Yamauchi | ba5870d | 2014-01-29 15:31:03 -0800 | [diff] [blame] | 355 | // Delay copying the live set to the marked set until here from |
| 356 | // BindBitmaps() as the large objects on the allocation stack may |
| 357 | // be newly added to the live set above in MarkAllocStackAsLive(). |
| 358 | GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); |
| 359 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 360 | // When the large object space is immune, we need to scan the |
| 361 | // large object space as roots as they contain references to their |
| 362 | // classes (primitive array classes) that could move though they |
| 363 | // don't contain any other references. |
| 364 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
| 365 | accounting::ObjectSet* large_live_objects = large_object_space->GetLiveObjects(); |
| 366 | SemiSpaceScanObjectVisitor visitor(this); |
| 367 | for (const Object* obj : large_live_objects->GetObjects()) { |
| 368 | visitor(const_cast<Object*>(obj)); |
| 369 | } |
| 370 | } |
| 371 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 372 | // Recursively process the mark stack. |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 373 | ProcessMarkStack(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 374 | } |
| 375 | |
| 376 | void SemiSpace::ReclaimPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 377 | TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 378 | ProcessReferences(self_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 379 | { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 380 | ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 381 | SweepSystemWeaks(); |
| 382 | } |
| 383 | // Record freed memory. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 384 | uint64_t from_bytes = from_space_->GetBytesAllocated(); |
| 385 | uint64_t to_bytes = to_space_->GetBytesAllocated(); |
| 386 | uint64_t from_objects = from_space_->GetObjectsAllocated(); |
| 387 | uint64_t to_objects = to_space_->GetObjectsAllocated(); |
| 388 | CHECK_LE(to_objects, from_objects); |
| 389 | int64_t freed_bytes = from_bytes - to_bytes; |
| 390 | int64_t freed_objects = from_objects - to_objects; |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 391 | freed_bytes_.FetchAndAdd(freed_bytes); |
| 392 | freed_objects_.FetchAndAdd(freed_objects); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 393 | // Note: Freed bytes can be negative if we copy form a compacted space to a free-list backed |
| 394 | // space. |
| 395 | heap_->RecordFree(freed_objects, freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 396 | timings_.StartSplit("PreSweepingGcVerification"); |
| 397 | heap_->PreSweepingGcVerification(this); |
| 398 | timings_.EndSplit(); |
| 399 | |
| 400 | { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 401 | WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 402 | // Reclaim unmarked objects. |
| 403 | Sweep(false); |
| 404 | // Swap the live and mark bitmaps for each space which we modified space. This is an |
| 405 | // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound |
| 406 | // bitmaps. |
| 407 | timings_.StartSplit("SwapBitmaps"); |
| 408 | SwapBitmaps(); |
| 409 | timings_.EndSplit(); |
| 410 | // Unbind the live and mark bitmaps. |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 411 | TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); |
| 412 | GetHeap()->UnBindBitmaps(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 413 | } |
Mathieu Chartier | 31f4414 | 2014-04-08 14:40:03 -0700 | [diff] [blame^] | 414 | from_space_->Clear(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 415 | // Protect the from space. |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 416 | VLOG(heap) << "Protecting space " << *from_space_; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 417 | if (kProtectFromSpace) { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 418 | from_space_->GetMemMap()->Protect(PROT_NONE); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 419 | } else { |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 420 | from_space_->GetMemMap()->Protect(PROT_READ); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 421 | } |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 422 | if (saved_bytes_ > 0) { |
| 423 | VLOG(heap) << "Avoided dirtying " << PrettySize(saved_bytes_); |
| 424 | } |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 425 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 426 | if (generational_) { |
Hiroshi Yamauchi | 4b1782f | 2013-12-05 16:46:22 -0800 | [diff] [blame] | 427 | // Record the end (top) of the to space so we can distinguish |
| 428 | // between objects that were allocated since the last GC and the |
| 429 | // older objects. |
| 430 | last_gc_to_space_end_ = to_space_->End(); |
| 431 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 432 | } |
| 433 | |
| 434 | void SemiSpace::ResizeMarkStack(size_t new_size) { |
| 435 | std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End()); |
| 436 | CHECK_LE(mark_stack_->Size(), new_size); |
| 437 | mark_stack_->Resize(new_size); |
| 438 | for (const auto& obj : temp) { |
| 439 | mark_stack_->PushBack(obj); |
| 440 | } |
| 441 | } |
| 442 | |
| 443 | inline void SemiSpace::MarkStackPush(Object* obj) { |
| 444 | if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) { |
| 445 | ResizeMarkStack(mark_stack_->Capacity() * 2); |
| 446 | } |
| 447 | // The object must be pushed on to the mark stack. |
| 448 | mark_stack_->PushBack(obj); |
| 449 | } |
| 450 | |
| 451 | // Rare case, probably not worth inlining since it will increase instruction cache miss rate. |
| 452 | bool SemiSpace::MarkLargeObject(const Object* obj) { |
| 453 | // TODO: support >1 discontinuous space. |
| 454 | space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace(); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 455 | DCHECK(large_object_space->Contains(obj)); |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 456 | accounting::ObjectSet* large_objects = large_object_space->GetMarkObjects(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 457 | if (UNLIKELY(!large_objects->Test(obj))) { |
| 458 | large_objects->Set(obj); |
| 459 | return true; |
| 460 | } |
| 461 | return false; |
| 462 | } |
| 463 | |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 464 | static inline size_t CopyAvoidingDirtyingPages(void* dest, const void* src, size_t size) { |
| 465 | if (LIKELY(size <= static_cast<size_t>(kPageSize))) { |
| 466 | // We will dirty the current page and somewhere in the middle of the next page. This means |
| 467 | // that the next object copied will also dirty that page. |
| 468 | // TODO: Worth considering the last object copied? We may end up dirtying one page which is |
| 469 | // not necessary per GC. |
| 470 | memcpy(dest, src, size); |
| 471 | return 0; |
| 472 | } |
| 473 | size_t saved_bytes = 0; |
| 474 | byte* byte_dest = reinterpret_cast<byte*>(dest); |
| 475 | if (kIsDebugBuild) { |
| 476 | for (size_t i = 0; i < size; ++i) { |
| 477 | CHECK_EQ(byte_dest[i], 0U); |
| 478 | } |
| 479 | } |
| 480 | // Process the start of the page. The page must already be dirty, don't bother with checking. |
| 481 | const byte* byte_src = reinterpret_cast<const byte*>(src); |
| 482 | const byte* limit = byte_src + size; |
| 483 | size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; |
| 484 | // Copy the bytes until the start of the next page. |
| 485 | memcpy(dest, src, page_remain); |
| 486 | byte_src += page_remain; |
| 487 | byte_dest += page_remain; |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 488 | DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); |
| 489 | DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), sizeof(uintptr_t)); |
| 490 | DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_src), sizeof(uintptr_t)); |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 491 | while (byte_src + kPageSize < limit) { |
| 492 | bool all_zero = true; |
| 493 | uintptr_t* word_dest = reinterpret_cast<uintptr_t*>(byte_dest); |
| 494 | const uintptr_t* word_src = reinterpret_cast<const uintptr_t*>(byte_src); |
| 495 | for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { |
| 496 | // Assumes the destination of the copy is all zeros. |
| 497 | if (word_src[i] != 0) { |
| 498 | all_zero = false; |
| 499 | word_dest[i] = word_src[i]; |
| 500 | } |
| 501 | } |
| 502 | if (all_zero) { |
| 503 | // Avoided copying into the page since it was all zeros. |
| 504 | saved_bytes += kPageSize; |
| 505 | } |
| 506 | byte_src += kPageSize; |
| 507 | byte_dest += kPageSize; |
| 508 | } |
| 509 | // Handle the part of the page at the end. |
| 510 | memcpy(byte_dest, byte_src, limit - byte_src); |
| 511 | return saved_bytes; |
| 512 | } |
| 513 | |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 514 | mirror::Object* SemiSpace::MarkNonForwardedObject(mirror::Object* obj) { |
| 515 | size_t object_size = obj->SizeOf(); |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 516 | size_t bytes_allocated; |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 517 | mirror::Object* forward_address = nullptr; |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 518 | if (generational_ && reinterpret_cast<byte*>(obj) < last_gc_to_space_end_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 519 | // If it's allocated before the last GC (older), move |
| 520 | // (pseudo-promote) it to the main free list space (as sort |
| 521 | // of an old generation.) |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 522 | size_t bytes_promoted; |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 523 | space::MallocSpace* promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 524 | forward_address = promo_dest_space->Alloc(self_, object_size, &bytes_promoted, nullptr); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 525 | if (forward_address == nullptr) { |
| 526 | // If out of space, fall back to the to-space. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 527 | forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 528 | } else { |
| 529 | GetHeap()->num_bytes_allocated_.FetchAndAdd(bytes_promoted); |
| 530 | bytes_promoted_ += bytes_promoted; |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 531 | // Dirty the card at the destionation as it may contain |
| 532 | // references (including the class pointer) to the bump pointer |
| 533 | // space. |
| 534 | GetHeap()->WriteBarrierEveryFieldOf(forward_address); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 535 | // Handle the bitmaps marking. |
| 536 | accounting::SpaceBitmap* live_bitmap = promo_dest_space->GetLiveBitmap(); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 537 | DCHECK(live_bitmap != nullptr); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 538 | accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 539 | DCHECK(mark_bitmap != nullptr); |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 540 | DCHECK(!live_bitmap->Test(forward_address)); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 541 | if (!whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 542 | // If collecting the bump pointer spaces only, live_bitmap == mark_bitmap. |
| 543 | DCHECK_EQ(live_bitmap, mark_bitmap); |
| 544 | |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 545 | // If a bump pointer space only collection, delay the live |
| 546 | // bitmap marking of the promoted object until it's popped off |
| 547 | // the mark stack (ProcessMarkStack()). The rationale: we may |
| 548 | // be in the middle of scanning the objects in the promo |
| 549 | // destination space for |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 550 | // non-moving-space-to-bump-pointer-space references by |
| 551 | // iterating over the marked bits of the live bitmap |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 552 | // (MarkReachableObjects()). If we don't delay it (and instead |
| 553 | // mark the promoted object here), the above promo destination |
| 554 | // space scan could encounter the just-promoted object and |
| 555 | // forward the references in the promoted object's fields even |
| 556 | // through it is pushed onto the mark stack. If this happens, |
| 557 | // the promoted object would be in an inconsistent state, that |
| 558 | // is, it's on the mark stack (gray) but its fields are |
| 559 | // already forwarded (black), which would cause a |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 560 | // DCHECK(!to_space_->HasAddress(obj)) failure below. |
| 561 | } else { |
| 562 | // Mark forward_address on the live bit map. |
| 563 | live_bitmap->Set(forward_address); |
| 564 | // Mark forward_address on the mark bit map. |
| 565 | DCHECK(!mark_bitmap->Test(forward_address)); |
| 566 | mark_bitmap->Set(forward_address); |
| 567 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 568 | } |
| 569 | DCHECK(forward_address != nullptr); |
| 570 | } else { |
| 571 | // If it's allocated after the last GC (younger), copy it to the to-space. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 572 | forward_address = to_space_->Alloc(self_, object_size, &bytes_allocated, nullptr); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 573 | } |
| 574 | // Copy over the object and add it to the mark stack since we still need to update its |
| 575 | // references. |
Mathieu Chartier | ad35d90 | 2014-02-11 16:20:42 -0800 | [diff] [blame] | 576 | saved_bytes_ += |
| 577 | CopyAvoidingDirtyingPages(reinterpret_cast<void*>(forward_address), obj, object_size); |
Hiroshi Yamauchi | 624468c | 2014-03-31 15:14:47 -0700 | [diff] [blame] | 578 | if (kUseBakerOrBrooksReadBarrier) { |
| 579 | obj->AssertReadBarrierPointer(); |
| 580 | if (kUseBrooksReadBarrier) { |
| 581 | DCHECK_EQ(forward_address->GetReadBarrierPointer(), obj); |
| 582 | forward_address->SetReadBarrierPointer(forward_address); |
| 583 | } |
| 584 | forward_address->AssertReadBarrierPointer(); |
Hiroshi Yamauchi | 9d04a20 | 2014-01-31 13:35:49 -0800 | [diff] [blame] | 585 | } |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 586 | if (to_space_live_bitmap_ != nullptr) { |
| 587 | to_space_live_bitmap_->Set(forward_address); |
| 588 | } |
Mathieu Chartier | 5dc08a6 | 2014-01-10 10:10:23 -0800 | [diff] [blame] | 589 | DCHECK(to_space_->HasAddress(forward_address) || |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 590 | (generational_ && GetHeap()->GetPrimaryFreeListSpace()->HasAddress(forward_address))); |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 591 | return forward_address; |
| 592 | } |
| 593 | |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 594 | void SemiSpace::ProcessMarkStackCallback(void* arg) { |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 595 | reinterpret_cast<SemiSpace*>(arg)->ProcessMarkStack(); |
| 596 | } |
| 597 | |
| 598 | mirror::Object* SemiSpace::MarkObjectCallback(mirror::Object* root, void* arg) { |
Mathieu Chartier | 3b05e9b | 2014-03-25 09:29:43 -0700 | [diff] [blame] | 599 | auto ref = StackReference<mirror::Object>::FromMirrorPtr(root); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 600 | reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); |
| 601 | return ref.AsMirrorPtr(); |
| 602 | } |
| 603 | |
| 604 | void SemiSpace::MarkHeapReferenceCallback(mirror::HeapReference<mirror::Object>* obj_ptr, |
| 605 | void* arg) { |
| 606 | reinterpret_cast<SemiSpace*>(arg)->MarkObject(obj_ptr); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 607 | } |
| 608 | |
Mathieu Chartier | 815873e | 2014-02-13 18:02:13 -0800 | [diff] [blame] | 609 | void SemiSpace::MarkRootCallback(Object** root, void* arg, uint32_t /*thread_id*/, |
| 610 | RootType /*root_type*/) { |
Mathieu Chartier | 3b05e9b | 2014-03-25 09:29:43 -0700 | [diff] [blame] | 611 | auto ref = StackReference<mirror::Object>::FromMirrorPtr(*root); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 612 | reinterpret_cast<SemiSpace*>(arg)->MarkObject(&ref); |
| 613 | if (*root != ref.AsMirrorPtr()) { |
| 614 | *root = ref.AsMirrorPtr(); |
| 615 | } |
Mathieu Chartier | 815873e | 2014-02-13 18:02:13 -0800 | [diff] [blame] | 616 | } |
| 617 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 618 | // Marks all objects in the root set. |
| 619 | void SemiSpace::MarkRoots() { |
| 620 | timings_.StartSplit("MarkRoots"); |
| 621 | // TODO: Visit up image roots as well? |
Mathieu Chartier | 893263b | 2014-03-04 11:07:42 -0800 | [diff] [blame] | 622 | Runtime::Current()->VisitRoots(MarkRootCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 623 | timings_.EndSplit(); |
| 624 | } |
| 625 | |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 626 | mirror::Object* SemiSpace::MarkedForwardingAddressCallback(mirror::Object* object, void* arg) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 627 | return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); |
| 628 | } |
| 629 | |
| 630 | void SemiSpace::SweepSystemWeaks() { |
| 631 | timings_.StartSplit("SweepSystemWeaks"); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 632 | Runtime::Current()->SweepSystemWeaks(MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 633 | timings_.EndSplit(); |
| 634 | } |
| 635 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 636 | bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 637 | return space != from_space_ && space != to_space_ && !immune_region_.ContainsSpace(space); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 638 | } |
| 639 | |
| 640 | void SemiSpace::Sweep(bool swap_bitmaps) { |
| 641 | DCHECK(mark_stack_->IsEmpty()); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 642 | TimingLogger::ScopedSplit("Sweep", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 643 | for (const auto& space : GetHeap()->GetContinuousSpaces()) { |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 644 | if (space->IsContinuousMemMapAllocSpace()) { |
| 645 | space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); |
| 646 | if (!ShouldSweepSpace(alloc_space)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 647 | continue; |
| 648 | } |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 649 | TimingLogger::ScopedSplit split( |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 650 | alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 651 | size_t freed_objects = 0; |
| 652 | size_t freed_bytes = 0; |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 653 | alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); |
Mathieu Chartier | ec05007 | 2014-01-07 16:00:07 -0800 | [diff] [blame] | 654 | heap_->RecordFree(freed_objects, freed_bytes); |
| 655 | freed_objects_.FetchAndAdd(freed_objects); |
| 656 | freed_bytes_.FetchAndAdd(freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 657 | } |
| 658 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 659 | if (!is_large_object_space_immune_) { |
| 660 | SweepLargeObjects(swap_bitmaps); |
| 661 | } |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 662 | } |
| 663 | |
| 664 | void SemiSpace::SweepLargeObjects(bool swap_bitmaps) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 665 | DCHECK(!is_large_object_space_immune_); |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 666 | TimingLogger::ScopedSplit("SweepLargeObjects", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 667 | size_t freed_objects = 0; |
| 668 | size_t freed_bytes = 0; |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 669 | GetHeap()->GetLargeObjectsSpace()->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); |
Ian Rogers | b122a4b | 2013-11-19 18:00:50 -0800 | [diff] [blame] | 670 | freed_large_objects_.FetchAndAdd(freed_objects); |
| 671 | freed_large_object_bytes_.FetchAndAdd(freed_bytes); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 672 | GetHeap()->RecordFree(freed_objects, freed_bytes); |
| 673 | } |
| 674 | |
| 675 | // Process the "referent" field in a java.lang.ref.Reference. If the referent has not yet been |
| 676 | // marked, put it on the appropriate list in the heap for later processing. |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 677 | void SemiSpace::DelayReferenceReferent(mirror::Class* klass, mirror::Reference* reference) { |
| 678 | heap_->DelayReferenceReferent(klass, reference, MarkedForwardingAddressCallback, this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 679 | } |
| 680 | |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 681 | class SemiSpaceMarkObjectVisitor { |
| 682 | public: |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 683 | explicit SemiSpaceMarkObjectVisitor(SemiSpace* collector) : collector_(collector) { |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 684 | } |
| 685 | |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 686 | void operator()(Object* obj, MemberOffset offset, bool /* is_static */) const ALWAYS_INLINE |
| 687 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { |
Mathieu Chartier | 580a8df | 2014-03-26 15:15:57 -0700 | [diff] [blame] | 688 | // Object was already verified when we scanned it. |
| 689 | collector_->MarkObject(obj->GetFieldObjectReferenceAddr<kVerifyNone>(offset)); |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 690 | } |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 691 | |
| 692 | void operator()(mirror::Class* klass, mirror::Reference* ref) const |
| 693 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 694 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
| 695 | collector_->DelayReferenceReferent(klass, ref); |
| 696 | } |
| 697 | |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 698 | private: |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 699 | SemiSpace* const collector_; |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 700 | }; |
| 701 | |
| 702 | // Visit all of the references of an object and update. |
| 703 | void SemiSpace::ScanObject(Object* obj) { |
Ian Rogers | 719d1a3 | 2014-03-06 12:13:39 -0800 | [diff] [blame] | 704 | DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space"; |
| 705 | SemiSpaceMarkObjectVisitor visitor(this); |
Mathieu Chartier | 407f702 | 2014-02-18 14:37:05 -0800 | [diff] [blame] | 706 | obj->VisitReferences<kMovingClasses>(visitor, visitor); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 707 | } |
| 708 | |
| 709 | // Scan anything that's on the mark stack. |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 710 | void SemiSpace::ProcessMarkStack() { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 711 | space::MallocSpace* promo_dest_space = NULL; |
| 712 | accounting::SpaceBitmap* live_bitmap = NULL; |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 713 | if (generational_ && !whole_heap_collection_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 714 | // If a bump pointer space only collection (and the promotion is |
| 715 | // enabled,) we delay the live-bitmap marking of promoted objects |
| 716 | // from MarkObject() until this function. |
| 717 | promo_dest_space = GetHeap()->GetPrimaryFreeListSpace(); |
| 718 | live_bitmap = promo_dest_space->GetLiveBitmap(); |
| 719 | DCHECK(live_bitmap != nullptr); |
| 720 | accounting::SpaceBitmap* mark_bitmap = promo_dest_space->GetMarkBitmap(); |
| 721 | DCHECK(mark_bitmap != nullptr); |
| 722 | DCHECK_EQ(live_bitmap, mark_bitmap); |
| 723 | } |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 724 | timings_.StartSplit("ProcessMarkStack"); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 725 | while (!mark_stack_->IsEmpty()) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 726 | Object* obj = mark_stack_->PopBack(); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 727 | if (generational_ && !whole_heap_collection_ && promo_dest_space->HasAddress(obj)) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 728 | // obj has just been promoted. Mark the live bitmap for it, |
| 729 | // which is delayed from MarkObject(). |
| 730 | DCHECK(!live_bitmap->Test(obj)); |
| 731 | live_bitmap->Set(obj); |
| 732 | } |
| 733 | ScanObject(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 734 | } |
| 735 | timings_.EndSplit(); |
| 736 | } |
| 737 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 738 | inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const |
| 739 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
| 740 | // All immune objects are assumed marked. |
Mathieu Chartier | 8d56210 | 2014-03-12 17:42:10 -0700 | [diff] [blame] | 741 | if (immune_region_.ContainsObject(obj)) { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 742 | return obj; |
| 743 | } |
| 744 | if (from_space_->HasAddress(obj)) { |
Mathieu Chartier | 4aeec17 | 2014-03-27 16:09:46 -0700 | [diff] [blame] | 745 | // Returns either the forwarding address or nullptr. |
| 746 | return GetForwardingAddressInFromSpace(obj); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 747 | } else if (to_space_->HasAddress(obj)) { |
Mathieu Chartier | 85a43c0 | 2014-01-07 17:59:00 -0800 | [diff] [blame] | 748 | // Should be unlikely. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 749 | // Already forwarded, must be marked. |
| 750 | return obj; |
| 751 | } |
| 752 | return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; |
| 753 | } |
| 754 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 755 | void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { |
| 756 | DCHECK(to_space != nullptr); |
| 757 | to_space_ = to_space; |
| 758 | } |
| 759 | |
| 760 | void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { |
| 761 | DCHECK(from_space != nullptr); |
| 762 | from_space_ = from_space; |
| 763 | } |
| 764 | |
| 765 | void SemiSpace::FinishPhase() { |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 766 | TimingLogger::ScopedSplit split("FinishPhase", &timings_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 767 | Heap* heap = GetHeap(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 768 | timings_.NewSplit("PostGcVerification"); |
| 769 | heap->PostGcVerification(this); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 770 | // Null the "to" and "from" spaces since compacting from one to the other isn't valid until |
| 771 | // further action is done by the heap. |
| 772 | to_space_ = nullptr; |
| 773 | from_space_ = nullptr; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 774 | CHECK(mark_stack_->IsEmpty()); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 775 | mark_stack_->Reset(); |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 776 | if (generational_) { |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 777 | // Decide whether to do a whole heap collection or a bump pointer |
| 778 | // only space collection at the next collection by updating |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 779 | // whole_heap_collection. |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 780 | if (!whole_heap_collection_) { |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 781 | if (!kUseBytesPromoted) { |
| 782 | // Enable whole_heap_collection once every |
| 783 | // kDefaultWholeHeapCollectionInterval collections. |
| 784 | --whole_heap_collection_interval_counter_; |
| 785 | DCHECK_GE(whole_heap_collection_interval_counter_, 0); |
| 786 | if (whole_heap_collection_interval_counter_ == 0) { |
| 787 | whole_heap_collection_ = true; |
| 788 | } |
| 789 | } else { |
| 790 | // Enable whole_heap_collection if the bytes promoted since |
| 791 | // the last whole heap collection exceeds a threshold. |
| 792 | bytes_promoted_since_last_whole_heap_collection_ += bytes_promoted_; |
| 793 | if (bytes_promoted_since_last_whole_heap_collection_ >= kBytesPromotedThreshold) { |
| 794 | whole_heap_collection_ = true; |
| 795 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 796 | } |
| 797 | } else { |
Hiroshi Yamauchi | df386c5 | 2014-04-08 16:21:52 -0700 | [diff] [blame] | 798 | if (!kUseBytesPromoted) { |
| 799 | DCHECK_EQ(whole_heap_collection_interval_counter_, 0); |
| 800 | whole_heap_collection_interval_counter_ = kDefaultWholeHeapCollectionInterval; |
| 801 | whole_heap_collection_ = false; |
| 802 | } else { |
| 803 | // Reset it. |
| 804 | bytes_promoted_since_last_whole_heap_collection_ = bytes_promoted_; |
| 805 | whole_heap_collection_ = false; |
| 806 | } |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 807 | } |
| 808 | } |
Mathieu Chartier | 4aeec17 | 2014-03-27 16:09:46 -0700 | [diff] [blame] | 809 | // Clear all of the spaces' mark bitmaps. |
| 810 | WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); |
| 811 | heap_->ClearMarkedObjects(); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 812 | } |
| 813 | |
Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 814 | void SemiSpace::RevokeAllThreadLocalBuffers() { |
| 815 | timings_.StartSplit("(Paused)RevokeAllThreadLocalBuffers"); |
| 816 | GetHeap()->RevokeAllThreadLocalBuffers(); |
| 817 | timings_.EndSplit(); |
| 818 | } |
| 819 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 820 | } // namespace collector |
| 821 | } // namespace gc |
| 822 | } // namespace art |