blob: ab609906bfd08fb9e644450fe985d19927c4cfd6 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080020#include "barrier.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070021#include "garbage_collector.h"
Mathieu Chartier763a31e2015-11-16 16:05:55 -080022#include "immune_spaces.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080023#include "jni.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "offsets.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/object_reference.h"
26#include "safe_map.h"
27
28#include <unordered_map>
29#include <vector>
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070030
31namespace art {
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -070032class Closure;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033class RootInfo;
34
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070035namespace mirror {
36class Object;
37} // namespace mirror
38
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070039namespace gc {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080040
41namespace accounting {
Andreas Gampe291ce172017-04-24 13:22:18 -070042 template<typename T> class AtomicStack;
43 typedef AtomicStack<mirror::Object> ObjectStack;
Andreas Gamped4901292017-05-30 18:41:34 -070044 template <size_t kAlignment> class SpaceBitmap;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080045 typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
46 class HeapBitmap;
Andreas Gampe291ce172017-04-24 13:22:18 -070047 class ReadBarrierTable;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080048} // namespace accounting
49
50namespace space {
51 class RegionSpace;
52} // namespace space
53
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070054namespace collector {
55
56class ConcurrentCopying : public GarbageCollector {
57 public:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080058 // Enable the no-from-space-refs verification at the pause.
Hiroshi Yamauchidaf61a12016-06-10 14:27:38 -070059 static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080060 // Enable the from-space bytes/objects check.
Hiroshi Yamauchidaf61a12016-06-10 14:27:38 -070061 static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062 // Enable verbose mode.
Hiroshi Yamauchi3c448932016-01-22 16:26:50 -080063 static constexpr bool kVerboseMode = false;
Mathieu Chartier36a270a2016-07-28 18:08:51 -070064 // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
65 // pages.
66 static constexpr bool kGrayDirtyImmuneObjects = true;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070067
Chih-Hung Hsieha5931182016-09-01 15:08:13 -070068 explicit ConcurrentCopying(Heap* heap,
69 const std::string& name_prefix = "",
70 bool measure_read_barrier_slow_path = false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080071 ~ConcurrentCopying();
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070072
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070073 virtual void RunPhases() OVERRIDE
Mathieu Chartier56fe2582016-07-14 13:30:03 -070074 REQUIRES(!immune_gray_stack_lock_,
75 !mark_stack_lock_,
76 !rb_slow_path_histogram_lock_,
77 !skipped_blocks_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070078 void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070079 REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070080 void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070081 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070082 void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Mathieu Chartier56fe2582016-07-14 13:30:03 -070083 void FinishPhase() REQUIRES(!mark_stack_lock_,
84 !rb_slow_path_histogram_lock_,
85 !skipped_blocks_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080086
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070087 void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -070088 REQUIRES(!Locks::heap_bitmap_lock_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070089 virtual GcType GetGcType() const OVERRIDE {
90 return kGcTypePartial;
91 }
92 virtual CollectorType GetCollectorType() const OVERRIDE {
93 return kCollectorTypeCC;
94 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080095 virtual void RevokeAllThreadLocalBuffers() OVERRIDE;
96 void SetRegionSpace(space::RegionSpace* region_space) {
97 DCHECK(region_space != nullptr);
98 region_space_ = region_space;
99 }
100 space::RegionSpace* RegionSpace() {
101 return region_space_;
102 }
103 void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700104 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700105 void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700106 REQUIRES_SHARED(Locks::mutator_lock_);
107 bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800108 DCHECK(ref != nullptr);
109 return IsMarked(ref) == ref;
110 }
Mathieu Chartierc381c362016-08-23 13:27:53 -0700111 template<bool kGrayImmuneObject = true, bool kFromGCThread = false>
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700112 ALWAYS_INLINE mirror::Object* Mark(mirror::Object* from_ref,
113 mirror::Object* holder = nullptr,
114 MemberOffset offset = MemberOffset(0))
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700115 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700116 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
117 ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700118 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700119 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800120 bool IsMarking() const {
121 return is_marking_;
122 }
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700123 // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
124 // creates a small window where we might dispatch on these entrypoints.
125 bool IsUsingReadBarrierEntrypoints() const {
126 return is_using_read_barrier_entrypoints_;
127 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800128 bool IsActive() const {
129 return is_active_;
130 }
131 Barrier& GetBarrier() {
132 return *gc_barrier_;
133 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700134 bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
135 return weak_ref_access_enabled_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700136 }
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700137 void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700138 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700139
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000140 virtual mirror::Object* IsMarked(mirror::Object* from_ref) OVERRIDE
141 REQUIRES_SHARED(Locks::mutator_lock_);
142
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700143 private:
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700144 void PushOntoMarkStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700145 REQUIRES(!mark_stack_lock_);
Mathieu Chartieref496d92017-04-28 18:58:59 -0700146 mirror::Object* Copy(mirror::Object* from_ref,
147 mirror::Object* holder,
148 MemberOffset offset)
149 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartierd6636d32016-07-28 11:02:38 -0700150 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700151 void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700152 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800153 void Process(mirror::Object* obj, MemberOffset offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700154 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700155 REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700156 virtual void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700157 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700158 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
159 template<bool kGrayImmuneObject>
Mathieu Chartierda7c6502015-07-23 16:01:26 -0700160 void MarkRoot(mirror::CompressedReference<mirror::Object>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700161 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700162 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700163 virtual void VisitRoots(mirror::CompressedReference<mirror::Object>** roots, size_t count,
164 const RootInfo& info)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700165 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700166 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700167 void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800168 accounting::ObjectStack* GetAllocationStack();
169 accounting::ObjectStack* GetLiveStack();
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700170 virtual void ProcessMarkStack() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700171 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700172 bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
173 void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700174 REQUIRES(!mark_stack_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700175 void GrayAllDirtyImmuneObjects()
176 REQUIRES(Locks::mutator_lock_)
177 REQUIRES(!mark_stack_lock_);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700178 void GrayAllNewlyDirtyImmuneObjects()
179 REQUIRES(Locks::mutator_lock_)
180 REQUIRES(!mark_stack_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700181 void VerifyGrayImmuneObjects()
182 REQUIRES(Locks::mutator_lock_)
183 REQUIRES(!mark_stack_lock_);
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800184 void VerifyNoMissingCardMarks()
185 REQUIRES(Locks::mutator_lock_)
186 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700187 size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700188 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700189 void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700190 REQUIRES_SHARED(Locks::mutator_lock_);
191 void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700192 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700193 void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier31e88222016-10-14 18:43:19 -0700194 virtual void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
195 ObjPtr<mirror::Reference> reference) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700196 REQUIRES_SHARED(Locks::mutator_lock_);
197 void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier97509952015-07-13 14:35:43 -0700198 virtual mirror::Object* MarkObject(mirror::Object* from_ref) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700199 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700200 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Hiroshi Yamauchi057d9772017-02-17 15:33:23 -0800201 virtual void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
202 bool do_atomic_update) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700203 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700204 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartierc381c362016-08-23 13:27:53 -0700205 bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700206 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi65f5f242016-12-19 11:44:47 -0800207 virtual bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
208 bool do_atomic_update) OVERRIDE
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700209 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800210 void SweepSystemWeaks(Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700211 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800212 void Sweep(bool swap_bitmaps)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700213 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800214 void SweepLargeObjects(bool swap_bitmaps)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700215 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700216 void MarkZygoteLargeObjects()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700217 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800218 void FillWithDummyObject(mirror::Object* dummy_obj, size_t byte_size)
Mathieu Chartierd6636d32016-07-28 11:02:38 -0700219 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700220 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800221 mirror::Object* AllocateInSkippedBlock(size_t alloc_size)
Mathieu Chartierd6636d32016-07-28 11:02:38 -0700222 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700223 REQUIRES_SHARED(Locks::mutator_lock_);
224 void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
225 void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
226 bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800227 mirror::Object* GetFwdPtr(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700228 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700229 void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700230 void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800231 void RecordLiveStackFreezeSize(Thread* self);
232 void ComputeUnevacFromSpaceLiveRatio();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700233 void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700234 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700235 void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700236 REQUIRES_SHARED(Locks::mutator_lock_);
237 void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
238 void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
239 void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
240 void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700241 mirror::Object* MarkNonMoving(mirror::Object* from_ref,
242 mirror::Object* holder = nullptr,
243 MemberOffset offset = MemberOffset(0))
244 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700245 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700246 ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(mirror::Object* from_ref,
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800247 accounting::SpaceBitmap<kObjectAlignment>* bitmap)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700248 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800249 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700250 template<bool kGrayImmuneObject>
251 ALWAYS_INLINE mirror::Object* MarkImmuneSpace(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700252 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
253 void PushOntoFalseGrayStack(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800254 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700255 void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800256 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700257 void ScanImmuneObject(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700258 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700259 mirror::Object* MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700260 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700261 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
262 void DumpPerformanceInfo(std::ostream& os) OVERRIDE REQUIRES(!rb_slow_path_histogram_lock_);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700263 // Set the read barrier mark entrypoints to non-null.
264 void ActivateReadBarrierEntrypoints();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800265
266 space::RegionSpace* region_space_; // The underlying region space.
267 std::unique_ptr<Barrier> gc_barrier_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700268 std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700269 std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
270 bool rb_mark_bit_stack_full_;
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800271 std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700272 Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
273 std::vector<accounting::ObjectStack*> revoked_mark_stacks_
274 GUARDED_BY(mark_stack_lock_);
275 static constexpr size_t kMarkStackSize = kPageSize;
276 static constexpr size_t kMarkStackPoolSize = 256;
277 std::vector<accounting::ObjectStack*> pooled_mark_stacks_
278 GUARDED_BY(mark_stack_lock_);
279 Thread* thread_running_gc_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800280 bool is_marking_; // True while marking is ongoing.
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700281 // True while we might dispatch on the read barrier entrypoints.
282 bool is_using_read_barrier_entrypoints_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800283 bool is_active_; // True while the collection is ongoing.
284 bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant.
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800285 ImmuneSpaces immune_spaces_;
Andreas Gamped4901292017-05-30 18:41:34 -0700286 accounting::ContinuousSpaceBitmap* region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800287 // A cache of Heap::GetMarkBitmap().
288 accounting::HeapBitmap* heap_mark_bitmap_;
289 size_t live_stack_freeze_size_;
290 size_t from_space_num_objects_at_first_pause_;
291 size_t from_space_num_bytes_at_first_pause_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700292 Atomic<int> is_mark_stack_push_disallowed_;
293 enum MarkStackMode {
294 kMarkStackModeOff = 0, // Mark stack is off.
295 kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto
296 // thread-local mark stacks. The GC-running thread pushes onto and
297 // pops off the GC mark stack without a lock.
298 kMarkStackModeShared, // All threads share the GC mark stack with a lock.
299 kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack
300 // without a lock. Other threads won't access the mark stack.
301 };
302 Atomic<MarkStackMode> mark_stack_mode_;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700303 bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800304
305 // How many objects and bytes we moved. Used for accounting.
306 Atomic<size_t> bytes_moved_;
307 Atomic<size_t> objects_moved_;
Mathieu Chartiercca44a02016-08-17 10:07:29 -0700308 Atomic<uint64_t> cumulative_bytes_moved_;
309 Atomic<uint64_t> cumulative_objects_moved_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800310
311 // The skipped blocks are memory blocks/chucks that were copies of
312 // objects that were unused due to lost races (cas failures) at
313 // object copy/forward pointer install. They are reused.
314 Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
315 std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
316 Atomic<size_t> to_space_bytes_skipped_;
317 Atomic<size_t> to_space_objects_skipped_;
318
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700319 // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
320 // and also log.
321 bool measure_read_barrier_slow_path_;
322 // mark_from_read_barrier_measurements_ is true if systrace is enabled or
323 // measure_read_barrier_time_ is true.
324 bool mark_from_read_barrier_measurements_;
325 Atomic<uint64_t> rb_slow_path_ns_;
326 Atomic<uint64_t> rb_slow_path_count_;
327 Atomic<uint64_t> rb_slow_path_count_gc_;
328 mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
329 Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
330 uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
331 uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
332
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800333 accounting::ReadBarrierTable* rb_table_;
334 bool force_evacuate_all_; // True if all regions are evacuated.
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700335 Atomic<bool> updated_all_immune_objects_;
336 bool gc_grays_immune_objects_;
337 Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
338 std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800339
Mathieu Chartier3ed8ec12017-04-20 19:28:54 -0700340 // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
341 // be filled in before flipping thread roots so that FillDummyObject can run. Not
342 // ObjPtr since the GC may transition to suspended and runnable between phases.
343 mirror::Class* java_lang_Object_;
344
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700345 class ActivateReadBarrierEntrypointsCallback;
346 class ActivateReadBarrierEntrypointsCheckpoint;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700347 class AssertToSpaceInvariantFieldVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700348 class AssertToSpaceInvariantRefsVisitor;
349 class ClearBlackPtrsVisitor;
350 class ComputeUnevacFromSpaceLiveRatioVisitor;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700351 class DisableMarkingCallback;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700352 class DisableMarkingCheckpoint;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700353 class DisableWeakRefAccessCallback;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700354 class FlipCallback;
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700355 template <bool kConcurrent> class GrayImmuneObjectVisitor;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700356 class ImmuneSpaceScanObjVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700357 class LostCopyVisitor;
358 class RefFieldsVisitor;
359 class RevokeThreadLocalMarkStackCheckpoint;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700360 class ScopedGcGraysImmuneObjects;
361 class ThreadFlipVisitor;
Mathieu Chartier21328a12016-07-22 10:47:45 -0700362 class VerifyGrayImmuneObjectsVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700363 class VerifyNoFromSpaceRefsFieldVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700364 class VerifyNoFromSpaceRefsVisitor;
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800365 class VerifyNoMissingCardMarkVisitor;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800366
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700367 DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700368};
369
370} // namespace collector
371} // namespace gc
372} // namespace art
373
374#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_