Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ |
| 18 | #define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ |
| 19 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 20 | #include "barrier.h" |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 21 | #include "garbage_collector.h" |
Mathieu Chartier | 763a31e | 2015-11-16 16:05:55 -0800 | [diff] [blame] | 22 | #include "immune_spaces.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 23 | #include "jni.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 24 | #include "mirror/object_reference.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 25 | #include "offsets.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 26 | #include "safe_map.h" |
| 27 | |
| 28 | #include <unordered_map> |
| 29 | #include <vector> |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 30 | |
| 31 | namespace art { |
Hiroshi Yamauchi | febd0cf | 2016-09-14 19:31:25 -0700 | [diff] [blame] | 32 | class Closure; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 33 | class RootInfo; |
| 34 | |
Andreas Gampe | 5d08fcc | 2017-06-05 17:56:46 -0700 | [diff] [blame] | 35 | namespace mirror { |
| 36 | class Object; |
| 37 | } // namespace mirror |
| 38 | |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 39 | namespace gc { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 40 | |
| 41 | namespace accounting { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 42 | template<typename T> class AtomicStack; |
| 43 | typedef AtomicStack<mirror::Object> ObjectStack; |
| 44 | template <size_t kAlignment> class SpaceBitmap; |
| 45 | typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap; |
| 46 | class HeapBitmap; |
| 47 | class ReadBarrierTable; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 48 | } // namespace accounting |
| 49 | |
| 50 | namespace space { |
Igor Murashkin | 2ffb703 | 2017-11-08 13:35:21 -0800 | [diff] [blame] | 51 | class RegionSpace; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 52 | } // namespace space |
| 53 | |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 54 | namespace collector { |
| 55 | |
| 56 | class ConcurrentCopying : public GarbageCollector { |
| 57 | public: |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 58 | // Enable the no-from-space-refs verification at the pause. |
Hiroshi Yamauchi | daf61a1 | 2016-06-10 14:27:38 -0700 | [diff] [blame] | 59 | static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 60 | // Enable the from-space bytes/objects check. |
Hiroshi Yamauchi | daf61a1 | 2016-06-10 14:27:38 -0700 | [diff] [blame] | 61 | static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 62 | // Enable verbose mode. |
Hiroshi Yamauchi | 3c44893 | 2016-01-22 16:26:50 -0800 | [diff] [blame] | 63 | static constexpr bool kVerboseMode = false; |
Mathieu Chartier | 36a270a | 2016-07-28 18:08:51 -0700 | [diff] [blame] | 64 | // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty |
| 65 | // pages. |
| 66 | static constexpr bool kGrayDirtyImmuneObjects = true; |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 67 | |
Chih-Hung Hsieh | a593118 | 2016-09-01 15:08:13 -0700 | [diff] [blame] | 68 | explicit ConcurrentCopying(Heap* heap, |
| 69 | const std::string& name_prefix = "", |
| 70 | bool measure_read_barrier_slow_path = false); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 71 | ~ConcurrentCopying(); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 72 | |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 73 | virtual void RunPhases() OVERRIDE |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 74 | REQUIRES(!immune_gray_stack_lock_, |
| 75 | !mark_stack_lock_, |
| 76 | !rb_slow_path_histogram_lock_, |
| 77 | !skipped_blocks_lock_); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 78 | void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 79 | REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 80 | void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 81 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 82 | void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 83 | void FinishPhase() REQUIRES(!mark_stack_lock_, |
| 84 | !rb_slow_path_histogram_lock_, |
| 85 | !skipped_blocks_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 86 | |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 87 | void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 88 | REQUIRES(!Locks::heap_bitmap_lock_); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 89 | virtual GcType GetGcType() const OVERRIDE { |
| 90 | return kGcTypePartial; |
| 91 | } |
| 92 | virtual CollectorType GetCollectorType() const OVERRIDE { |
| 93 | return kCollectorTypeCC; |
| 94 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 95 | virtual void RevokeAllThreadLocalBuffers() OVERRIDE; |
| 96 | void SetRegionSpace(space::RegionSpace* region_space) { |
| 97 | DCHECK(region_space != nullptr); |
| 98 | region_space_ = region_space; |
| 99 | } |
| 100 | space::RegionSpace* RegionSpace() { |
| 101 | return region_space_; |
| 102 | } |
Roland Levillain | 001eff9 | 2018-01-24 14:24:33 +0000 | [diff] [blame] | 103 | // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 104 | void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 105 | REQUIRES_SHARED(Locks::mutator_lock_); |
Roland Levillain | 001eff9 | 2018-01-24 14:24:33 +0000 | [diff] [blame] | 106 | // Assert the to-space invariant for a GC root reference `ref`. |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 107 | void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 108 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 109 | bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 110 | DCHECK(ref != nullptr); |
| 111 | return IsMarked(ref) == ref; |
| 112 | } |
Mathieu Chartier | c381c36 | 2016-08-23 13:27:53 -0700 | [diff] [blame] | 113 | template<bool kGrayImmuneObject = true, bool kFromGCThread = false> |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame^] | 114 | // Mark object `from_ref`, copying it to the to-space if needed. |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 115 | ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref, |
| 116 | mirror::Object* holder = nullptr, |
| 117 | MemberOffset offset = MemberOffset(0)) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 118 | REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 119 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
| 120 | ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 121 | REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 122 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 123 | bool IsMarking() const { |
| 124 | return is_marking_; |
| 125 | } |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 126 | // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying |
| 127 | // creates a small window where we might dispatch on these entrypoints. |
| 128 | bool IsUsingReadBarrierEntrypoints() const { |
| 129 | return is_using_read_barrier_entrypoints_; |
| 130 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 131 | bool IsActive() const { |
| 132 | return is_active_; |
| 133 | } |
| 134 | Barrier& GetBarrier() { |
| 135 | return *gc_barrier_; |
| 136 | } |
Hiroshi Yamauchi | febd0cf | 2016-09-14 19:31:25 -0700 | [diff] [blame] | 137 | bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) { |
| 138 | return weak_ref_access_enabled_; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 139 | } |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 140 | void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 141 | REQUIRES(!mark_stack_lock_); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 142 | |
Nicolas Geoffray | 13056a1 | 2017-05-11 11:48:28 +0000 | [diff] [blame] | 143 | virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE |
| 144 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 145 | |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 146 | private: |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 147 | void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 148 | REQUIRES(!mark_stack_lock_); |
Mathieu Chartier | ef496d9 | 2017-04-28 18:58:59 -0700 | [diff] [blame] | 149 | mirror::Object* Copy(mirror::Object* from_ref, |
| 150 | mirror::Object* holder, |
| 151 | MemberOffset offset) |
| 152 | REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | d6636d3 | 2016-07-28 11:02:38 -0700 | [diff] [blame] | 153 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame^] | 154 | // Scan the reference fields of object `to_ref`. |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 155 | void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 156 | REQUIRES(!mark_stack_lock_); |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame^] | 157 | // Process a field. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 158 | void Process(mirror::Object* obj, MemberOffset offset) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 159 | REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 160 | REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 161 | virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 162 | OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 163 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
| 164 | template<bool kGrayImmuneObject> |
Mathieu Chartier | da7c650 | 2015-07-23 16:01:26 -0700 | [diff] [blame] | 165 | void MarkRoot(mirror::CompressedReference<mirror::Object>* root) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 166 | REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 167 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Mathieu Chartier | bb87e0f | 2015-04-03 11:21:55 -0700 | [diff] [blame] | 168 | virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count, |
| 169 | const RootInfo& info) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 170 | OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 171 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 172 | void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 173 | accounting::ObjectStack* GetAllocationStack(); |
| 174 | accounting::ObjectStack* GetLiveStack(); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 175 | virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 176 | REQUIRES(!mark_stack_lock_); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 177 | bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); |
| 178 | void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 179 | REQUIRES(!mark_stack_lock_); |
Mathieu Chartier | 21328a1 | 2016-07-22 10:47:45 -0700 | [diff] [blame] | 180 | void GrayAllDirtyImmuneObjects() |
| 181 | REQUIRES(Locks::mutator_lock_) |
| 182 | REQUIRES(!mark_stack_lock_); |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 183 | void GrayAllNewlyDirtyImmuneObjects() |
| 184 | REQUIRES(Locks::mutator_lock_) |
| 185 | REQUIRES(!mark_stack_lock_); |
Mathieu Chartier | 21328a1 | 2016-07-22 10:47:45 -0700 | [diff] [blame] | 186 | void VerifyGrayImmuneObjects() |
| 187 | REQUIRES(Locks::mutator_lock_) |
| 188 | REQUIRES(!mark_stack_lock_); |
Mathieu Chartier | a1467d0 | 2017-02-22 09:22:50 -0800 | [diff] [blame] | 189 | void VerifyNoMissingCardMarks() |
| 190 | REQUIRES(Locks::mutator_lock_) |
| 191 | REQUIRES(!mark_stack_lock_); |
Hiroshi Yamauchi | febd0cf | 2016-09-14 19:31:25 -0700 | [diff] [blame] | 192 | size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 193 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); |
Hiroshi Yamauchi | febd0cf | 2016-09-14 19:31:25 -0700 | [diff] [blame] | 194 | void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 195 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 196 | void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 197 | REQUIRES(!mark_stack_lock_); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 198 | void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 31e8822 | 2016-10-14 18:43:19 -0700 | [diff] [blame] | 199 | virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass, |
| 200 | ObjPtr<mirror::Reference> reference) OVERRIDE |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 201 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 202 | void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 9750995 | 2015-07-13 14:35:43 -0700 | [diff] [blame] | 203 | virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 204 | REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 205 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Hiroshi Yamauchi | 057d977 | 2017-02-17 15:33:23 -0800 | [diff] [blame] | 206 | virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref, |
| 207 | bool do_atomic_update) OVERRIDE |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 208 | REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 209 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
Mathieu Chartier | c381c36 | 2016-08-23 13:27:53 -0700 | [diff] [blame] | 210 | bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 211 | REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 65f5f24 | 2016-12-19 11:44:47 -0800 | [diff] [blame] | 212 | virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field, |
| 213 | bool do_atomic_update) OVERRIDE |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 214 | REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 215 | void SweepSystemWeaks(Thread* self) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 216 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 217 | void Sweep(bool swap_bitmaps) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 218 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 219 | void SweepLargeObjects(bool swap_bitmaps) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 220 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 962cd7a | 2016-08-16 12:15:59 -0700 | [diff] [blame] | 221 | void MarkZygoteLargeObjects() |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 222 | REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 223 | void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size) |
Mathieu Chartier | d6636d3 | 2016-07-28 11:02:38 -0700 | [diff] [blame] | 224 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 225 | REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 226 | mirror::Object* AllocateInSkippedBlock(size_t alloc_size) |
Mathieu Chartier | d6636d3 | 2016-07-28 11:02:38 -0700 | [diff] [blame] | 227 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 228 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 229 | void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); |
| 230 | void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); |
| 231 | bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 232 | mirror::Object* GetFwdPtr(mirror::Object* from_ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 233 | REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 9044347 | 2015-07-16 20:32:27 -0700 | [diff] [blame] | 234 | void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 235 | void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 236 | void RecordLiveStackFreezeSize(Thread* self); |
| 237 | void ComputeUnevacFromSpaceLiveRatio(); |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 238 | void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 239 | REQUIRES_SHARED(Locks::mutator_lock_); |
Roland Levillain | 001eff9 | 2018-01-24 14:24:33 +0000 | [diff] [blame] | 240 | // Dump information about reference `ref` and return it as a string. |
| 241 | // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`. |
| 242 | std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, std::string indent = "") |
| 243 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 244 | // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`, |
| 245 | // and return it as a string. |
| 246 | std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref) |
| 247 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 248 | // Dump information about GC root `ref` and return it as a string. |
| 249 | std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 3f64f25 | 2015-06-12 18:35:06 -0700 | [diff] [blame] | 250 | void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 251 | REQUIRES_SHARED(Locks::mutator_lock_); |
| 252 | void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); |
| 253 | void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_); |
| 254 | void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_); |
| 255 | void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_); |
Mathieu Chartier | 1ca6890 | 2017-04-18 11:26:22 -0700 | [diff] [blame] | 256 | mirror::Object* MarkNonMoving(mirror::Object* from_ref, |
| 257 | mirror::Object* holder = nullptr, |
| 258 | MemberOffset offset = MemberOffset(0)) |
| 259 | REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | 723e6ce | 2015-10-28 20:59:47 -0700 | [diff] [blame] | 260 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 261 | ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref, |
Hiroshi Yamauchi | 8e67465 | 2015-12-22 11:09:18 -0800 | [diff] [blame] | 262 | accounting::SpaceBitmap<kObjectAlignment>* bitmap) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 263 | REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | 8e67465 | 2015-12-22 11:09:18 -0800 | [diff] [blame] | 264 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_); |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 265 | template<bool kGrayImmuneObject> |
| 266 | ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 267 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_); |
| 268 | void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | 8e67465 | 2015-12-22 11:09:18 -0800 | [diff] [blame] | 269 | REQUIRES(!mark_stack_lock_); |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 270 | void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_) |
Hiroshi Yamauchi | 8e67465 | 2015-12-22 11:09:18 -0800 | [diff] [blame] | 271 | REQUIRES(!mark_stack_lock_); |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 272 | void ScanImmuneObject(mirror::Object* obj) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 273 | REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_); |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 274 | mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) |
Andreas Gampe | bdf7f1c | 2016-08-30 16:38:47 -0700 | [diff] [blame] | 275 | REQUIRES_SHARED(Locks::mutator_lock_) |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 276 | REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_); |
| 277 | void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_); |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 278 | // Set the read barrier mark entrypoints to non-null. |
| 279 | void ActivateReadBarrierEntrypoints(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 280 | |
| 281 | space::RegionSpace* region_space_; // The underlying region space. |
| 282 | std::unique_ptr<Barrier> gc_barrier_; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 283 | std::unique_ptr<accounting::ObjectStack> gc_mark_stack_; |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame^] | 284 | |
| 285 | // The read-barrier mark-bit stack. Stores object references whose |
| 286 | // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier, |
| 287 | // so that this bit can be reset at the end of the collection in |
| 288 | // ConcurrentCopying::FinishPhase. The mark bit of an object can be |
| 289 | // used by mutator read barrier code to quickly test whether that |
| 290 | // object has been already marked. |
Mathieu Chartier | 36a270a | 2016-07-28 18:08:51 -0700 | [diff] [blame] | 291 | std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_; |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame^] | 292 | // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is |
| 293 | // full. A thread-safe test of whether the read-barrier mark-bit |
| 294 | // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)` |
| 295 | // (see use case in ConcurrentCopying::MarkFromReadBarrier). |
Mathieu Chartier | 36a270a | 2016-07-28 18:08:51 -0700 | [diff] [blame] | 296 | bool rb_mark_bit_stack_full_; |
Roland Levillain | 8f7ea9a | 2018-01-26 17:27:59 +0000 | [diff] [blame^] | 297 | |
Hiroshi Yamauchi | 8e67465 | 2015-12-22 11:09:18 -0800 | [diff] [blame] | 298 | std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_); |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 299 | Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 300 | std::vector<accounting::ObjectStack*> revoked_mark_stacks_ |
| 301 | GUARDED_BY(mark_stack_lock_); |
| 302 | static constexpr size_t kMarkStackSize = kPageSize; |
| 303 | static constexpr size_t kMarkStackPoolSize = 256; |
| 304 | std::vector<accounting::ObjectStack*> pooled_mark_stacks_ |
| 305 | GUARDED_BY(mark_stack_lock_); |
| 306 | Thread* thread_running_gc_; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 307 | bool is_marking_; // True while marking is ongoing. |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 308 | // True while we might dispatch on the read barrier entrypoints. |
| 309 | bool is_using_read_barrier_entrypoints_; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 310 | bool is_active_; // True while the collection is ongoing. |
| 311 | bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant. |
Mathieu Chartier | 763a31e | 2015-11-16 16:05:55 -0800 | [diff] [blame] | 312 | ImmuneSpaces immune_spaces_; |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 313 | accounting::ContinuousSpaceBitmap* region_space_bitmap_; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 314 | // A cache of Heap::GetMarkBitmap(). |
| 315 | accounting::HeapBitmap* heap_mark_bitmap_; |
| 316 | size_t live_stack_freeze_size_; |
| 317 | size_t from_space_num_objects_at_first_pause_; |
| 318 | size_t from_space_num_bytes_at_first_pause_; |
Hiroshi Yamauchi | 0b71357 | 2015-06-16 18:29:23 -0700 | [diff] [blame] | 319 | Atomic<int> is_mark_stack_push_disallowed_; |
| 320 | enum MarkStackMode { |
| 321 | kMarkStackModeOff = 0, // Mark stack is off. |
| 322 | kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto |
| 323 | // thread-local mark stacks. The GC-running thread pushes onto and |
| 324 | // pops off the GC mark stack without a lock. |
| 325 | kMarkStackModeShared, // All threads share the GC mark stack with a lock. |
| 326 | kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack |
| 327 | // without a lock. Other threads won't access the mark stack. |
| 328 | }; |
| 329 | Atomic<MarkStackMode> mark_stack_mode_; |
Hiroshi Yamauchi | febd0cf | 2016-09-14 19:31:25 -0700 | [diff] [blame] | 330 | bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 331 | |
| 332 | // How many objects and bytes we moved. Used for accounting. |
| 333 | Atomic<size_t> bytes_moved_; |
| 334 | Atomic<size_t> objects_moved_; |
Mathieu Chartier | cca44a0 | 2016-08-17 10:07:29 -0700 | [diff] [blame] | 335 | Atomic<uint64_t> cumulative_bytes_moved_; |
| 336 | Atomic<uint64_t> cumulative_objects_moved_; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 337 | |
| 338 | // The skipped blocks are memory blocks/chucks that were copies of |
| 339 | // objects that were unused due to lost races (cas failures) at |
| 340 | // object copy/forward pointer install. They are reused. |
| 341 | Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 342 | std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_); |
| 343 | Atomic<size_t> to_space_bytes_skipped_; |
| 344 | Atomic<size_t> to_space_objects_skipped_; |
| 345 | |
Mathieu Chartier | 56fe258 | 2016-07-14 13:30:03 -0700 | [diff] [blame] | 346 | // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier |
| 347 | // and also log. |
| 348 | bool measure_read_barrier_slow_path_; |
| 349 | // mark_from_read_barrier_measurements_ is true if systrace is enabled or |
| 350 | // measure_read_barrier_time_ is true. |
| 351 | bool mark_from_read_barrier_measurements_; |
| 352 | Atomic<uint64_t> rb_slow_path_ns_; |
| 353 | Atomic<uint64_t> rb_slow_path_count_; |
| 354 | Atomic<uint64_t> rb_slow_path_count_gc_; |
| 355 | mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 356 | Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_); |
| 357 | uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_); |
| 358 | uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_); |
| 359 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 360 | accounting::ReadBarrierTable* rb_table_; |
| 361 | bool force_evacuate_all_; // True if all regions are evacuated. |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 362 | Atomic<bool> updated_all_immune_objects_; |
| 363 | bool gc_grays_immune_objects_; |
| 364 | Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 365 | std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 366 | |
Mathieu Chartier | 3ed8ec1 | 2017-04-20 19:28:54 -0700 | [diff] [blame] | 367 | // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must |
| 368 | // be filled in before flipping thread roots so that FillDummyObject can run. Not |
| 369 | // ObjPtr since the GC may transition to suspended and runnable between phases. |
| 370 | mirror::Class* java_lang_Object_; |
| 371 | |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 372 | class ActivateReadBarrierEntrypointsCallback; |
| 373 | class ActivateReadBarrierEntrypointsCheckpoint; |
Mathieu Chartier | a07f559 | 2016-06-16 11:44:28 -0700 | [diff] [blame] | 374 | class AssertToSpaceInvariantFieldVisitor; |
Mathieu Chartier | a07f559 | 2016-06-16 11:44:28 -0700 | [diff] [blame] | 375 | class AssertToSpaceInvariantRefsVisitor; |
| 376 | class ClearBlackPtrsVisitor; |
| 377 | class ComputeUnevacFromSpaceLiveRatioVisitor; |
Hiroshi Yamauchi | febd0cf | 2016-09-14 19:31:25 -0700 | [diff] [blame] | 378 | class DisableMarkingCallback; |
Mathieu Chartier | a07f559 | 2016-06-16 11:44:28 -0700 | [diff] [blame] | 379 | class DisableMarkingCheckpoint; |
Hiroshi Yamauchi | febd0cf | 2016-09-14 19:31:25 -0700 | [diff] [blame] | 380 | class DisableWeakRefAccessCallback; |
Mathieu Chartier | a07f559 | 2016-06-16 11:44:28 -0700 | [diff] [blame] | 381 | class FlipCallback; |
Mathieu Chartier | 3768ade | 2017-05-02 14:04:39 -0700 | [diff] [blame] | 382 | template <bool kConcurrent> class GrayImmuneObjectVisitor; |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 383 | class ImmuneSpaceScanObjVisitor; |
Mathieu Chartier | a07f559 | 2016-06-16 11:44:28 -0700 | [diff] [blame] | 384 | class LostCopyVisitor; |
| 385 | class RefFieldsVisitor; |
| 386 | class RevokeThreadLocalMarkStackCheckpoint; |
Hiroshi Yamauchi | d8db5a2 | 2016-06-28 14:07:41 -0700 | [diff] [blame] | 387 | class ScopedGcGraysImmuneObjects; |
| 388 | class ThreadFlipVisitor; |
Mathieu Chartier | 21328a1 | 2016-07-22 10:47:45 -0700 | [diff] [blame] | 389 | class VerifyGrayImmuneObjectsVisitor; |
Mathieu Chartier | a07f559 | 2016-06-16 11:44:28 -0700 | [diff] [blame] | 390 | class VerifyNoFromSpaceRefsFieldVisitor; |
Mathieu Chartier | a07f559 | 2016-06-16 11:44:28 -0700 | [diff] [blame] | 391 | class VerifyNoFromSpaceRefsVisitor; |
Mathieu Chartier | a1467d0 | 2017-02-22 09:22:50 -0800 | [diff] [blame] | 392 | class VerifyNoMissingCardMarkVisitor; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 393 | |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 394 | DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying); |
Hiroshi Yamauchi | d5307ec | 2014-03-27 21:07:51 -0700 | [diff] [blame] | 395 | }; |
| 396 | |
| 397 | } // namespace collector |
| 398 | } // namespace gc |
| 399 | } // namespace art |
| 400 | |
| 401 | #endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_ |