blob: cd086c4fb83de19ef3a0459d22f327b1f17d04d5 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080020#include "barrier.h"
David Sehr67bf42e2018-02-26 16:43:04 -080021#include "base/safe_map.h"
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070022#include "garbage_collector.h"
Mathieu Chartier763a31e2015-11-16 16:05:55 -080023#include "immune_spaces.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024#include "jni.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080025#include "mirror/object_reference.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070026#include "offsets.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080027
28#include <unordered_map>
29#include <vector>
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070030
31namespace art {
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -070032class Closure;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033class RootInfo;
34
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070035namespace mirror {
36class Object;
37} // namespace mirror
38
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070039namespace gc {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080040
41namespace accounting {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080042template<typename T> class AtomicStack;
43typedef AtomicStack<mirror::Object> ObjectStack;
44template <size_t kAlignment> class SpaceBitmap;
45typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
46class HeapBitmap;
47class ReadBarrierTable;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080048} // namespace accounting
49
50namespace space {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080051class RegionSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080052} // namespace space
53
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070054namespace collector {
55
56class ConcurrentCopying : public GarbageCollector {
57 public:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080058 // Enable the no-from-space-refs verification at the pause.
Hiroshi Yamauchidaf61a12016-06-10 14:27:38 -070059 static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080060 // Enable the from-space bytes/objects check.
Hiroshi Yamauchidaf61a12016-06-10 14:27:38 -070061 static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062 // Enable verbose mode.
Hiroshi Yamauchi3c448932016-01-22 16:26:50 -080063 static constexpr bool kVerboseMode = false;
Mathieu Chartier36a270a2016-07-28 18:08:51 -070064 // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
65 // pages.
66 static constexpr bool kGrayDirtyImmuneObjects = true;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070067
Chih-Hung Hsieha5931182016-09-01 15:08:13 -070068 explicit ConcurrentCopying(Heap* heap,
Mathieu Chartier8d1a9962016-08-17 16:39:45 -070069 bool young_gen,
Chih-Hung Hsieha5931182016-09-01 15:08:13 -070070 const std::string& name_prefix = "",
71 bool measure_read_barrier_slow_path = false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080072 ~ConcurrentCopying();
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070073
Roland Levillainf73caca2018-08-24 17:19:07 +010074 void RunPhases() override
Mathieu Chartier56fe2582016-07-14 13:30:03 -070075 REQUIRES(!immune_gray_stack_lock_,
76 !mark_stack_lock_,
77 !rb_slow_path_histogram_lock_,
78 !skipped_blocks_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070079 void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070080 REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070081 void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070082 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070083 void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Mathieu Chartier56fe2582016-07-14 13:30:03 -070084 void FinishPhase() REQUIRES(!mark_stack_lock_,
85 !rb_slow_path_histogram_lock_,
86 !skipped_blocks_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080087
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070088 void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -070089 REQUIRES(!Locks::heap_bitmap_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +010090 GcType GetGcType() const override {
Mathieu Chartier8d1a9962016-08-17 16:39:45 -070091 return (kEnableGenerationalConcurrentCopyingCollection && young_gen_)
92 ? kGcTypeSticky
93 : kGcTypePartial;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070094 }
Roland Levillainf73caca2018-08-24 17:19:07 +010095 CollectorType GetCollectorType() const override {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070096 return kCollectorTypeCC;
97 }
Roland Levillainf73caca2018-08-24 17:19:07 +010098 void RevokeAllThreadLocalBuffers() override;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080099 void SetRegionSpace(space::RegionSpace* region_space) {
100 DCHECK(region_space != nullptr);
101 region_space_ = region_space;
102 }
103 space::RegionSpace* RegionSpace() {
104 return region_space_;
105 }
Roland Levillain001eff92018-01-24 14:24:33 +0000106 // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800107 void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700108 REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain001eff92018-01-24 14:24:33 +0000109 // Assert the to-space invariant for a GC root reference `ref`.
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700110 void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700111 REQUIRES_SHARED(Locks::mutator_lock_);
112 bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800113 DCHECK(ref != nullptr);
114 return IsMarked(ref) == ref;
115 }
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000116 // Mark object `from_ref`, copying it to the to-space if needed.
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700117 template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false>
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800118 ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
119 mirror::Object* from_ref,
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700120 mirror::Object* holder = nullptr,
121 MemberOffset offset = MemberOffset(0))
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700122 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700123 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
124 ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700125 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700126 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800127 bool IsMarking() const {
128 return is_marking_;
129 }
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700130 // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
131 // creates a small window where we might dispatch on these entrypoints.
132 bool IsUsingReadBarrierEntrypoints() const {
133 return is_using_read_barrier_entrypoints_;
134 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800135 bool IsActive() const {
136 return is_active_;
137 }
138 Barrier& GetBarrier() {
139 return *gc_barrier_;
140 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700141 bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
142 return weak_ref_access_enabled_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700143 }
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700144 void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700145 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700146
Roland Levillainf73caca2018-08-24 17:19:07 +0100147 mirror::Object* IsMarked(mirror::Object* from_ref) override
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000148 REQUIRES_SHARED(Locks::mutator_lock_);
149
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700150 private:
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800151 void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
152 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700153 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800154 mirror::Object* Copy(Thread* const self,
155 mirror::Object* from_ref,
Mathieu Chartieref496d92017-04-28 18:58:59 -0700156 mirror::Object* holder,
157 MemberOffset offset)
158 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartierd6636d32016-07-28 11:02:38 -0700159 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000160 // Scan the reference fields of object `to_ref`.
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700161 template <bool kNoUnEvac>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700162 void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700163 REQUIRES(!mark_stack_lock_);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000164 // Process a field.
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700165 template <bool kNoUnEvac>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800166 void Process(mirror::Object* obj, MemberOffset offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700167 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700168 REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100169 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
170 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700171 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
172 template<bool kGrayImmuneObject>
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800173 void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700174 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700175 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100176 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
177 size_t count,
178 const RootInfo& info) override
179 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700180 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700181 void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800182 accounting::ObjectStack* GetAllocationStack();
183 accounting::ObjectStack* GetLiveStack();
Roland Levillainf73caca2018-08-24 17:19:07 +0100184 void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700185 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700186 bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
187 void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700188 REQUIRES(!mark_stack_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700189 void GrayAllDirtyImmuneObjects()
190 REQUIRES(Locks::mutator_lock_)
191 REQUIRES(!mark_stack_lock_);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700192 void GrayAllNewlyDirtyImmuneObjects()
193 REQUIRES(Locks::mutator_lock_)
194 REQUIRES(!mark_stack_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700195 void VerifyGrayImmuneObjects()
196 REQUIRES(Locks::mutator_lock_)
197 REQUIRES(!mark_stack_lock_);
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800198 void VerifyNoMissingCardMarks()
199 REQUIRES(Locks::mutator_lock_)
200 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700201 size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700202 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700203 void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700204 REQUIRES_SHARED(Locks::mutator_lock_);
205 void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700206 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700207 void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100208 void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
209 ObjPtr<mirror::Reference> reference) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700210 REQUIRES_SHARED(Locks::mutator_lock_);
211 void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100212 mirror::Object* MarkObject(mirror::Object* from_ref) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700213 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700214 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100215 void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
216 bool do_atomic_update) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700217 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700218 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartierc381c362016-08-23 13:27:53 -0700219 bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700220 REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100221 bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
222 bool do_atomic_update) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700223 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800224 void SweepSystemWeaks(Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700225 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
Roland Levillainb1e1dc32018-07-10 19:19:31 +0100226 // Sweep unmarked objects to complete the garbage collection. Full GCs sweep
227 // all allocation spaces (except the region space). Sticky-bit GCs just sweep
228 // a subset of the heap.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800229 void Sweep(bool swap_bitmaps)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700230 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
Roland Levillainb1e1dc32018-07-10 19:19:31 +0100231 // Sweep only pointers within an array.
232 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
233 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800234 void SweepLargeObjects(bool swap_bitmaps)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700235 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700236 void MarkZygoteLargeObjects()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700237 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800238 void FillWithDummyObject(Thread* const self, mirror::Object* dummy_obj, size_t byte_size)
Mathieu Chartierd6636d32016-07-28 11:02:38 -0700239 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700240 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800241 mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
Mathieu Chartierd6636d32016-07-28 11:02:38 -0700242 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700243 REQUIRES_SHARED(Locks::mutator_lock_);
244 void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
245 void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
246 bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800247 mirror::Object* GetFwdPtr(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700248 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700249 void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700250 void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800251 void RecordLiveStackFreezeSize(Thread* self);
252 void ComputeUnevacFromSpaceLiveRatio();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700253 void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700254 REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain001eff92018-01-24 14:24:33 +0000255 // Dump information about reference `ref` and return it as a string.
256 // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
Andreas Gampebc802de2018-06-20 17:24:11 -0700257 std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
Roland Levillain001eff92018-01-24 14:24:33 +0000258 REQUIRES_SHARED(Locks::mutator_lock_);
259 // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
260 // and return it as a string.
261 std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
262 REQUIRES_SHARED(Locks::mutator_lock_);
263 // Dump information about GC root `ref` and return it as a string.
264 std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700265 void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700266 REQUIRES_SHARED(Locks::mutator_lock_);
267 void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
268 void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
269 void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
270 void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800271 mirror::Object* MarkNonMoving(Thread* const self,
272 mirror::Object* from_ref,
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700273 mirror::Object* holder = nullptr,
274 MemberOffset offset = MemberOffset(0))
275 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi723e6ce2015-10-28 20:59:47 -0700276 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800277 ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self,
278 mirror::Object* from_ref,
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800279 accounting::SpaceBitmap<kObjectAlignment>* bitmap)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700280 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800281 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700282 template<bool kGrayImmuneObject>
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800283 ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
284 mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700285 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800286 void PushOntoFalseGrayStack(Thread* const self, mirror::Object* obj)
287 REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800288 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700289 void ProcessFalseGrayStack() REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800290 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700291 void ScanImmuneObject(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700292 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800293 mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
294 mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700295 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700296 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100297 void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700298 // Set the read barrier mark entrypoints to non-null.
299 void ActivateReadBarrierEntrypoints();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800300
301 space::RegionSpace* region_space_; // The underlying region space.
302 std::unique_ptr<Barrier> gc_barrier_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700303 std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000304
305 // The read-barrier mark-bit stack. Stores object references whose
306 // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
307 // so that this bit can be reset at the end of the collection in
308 // ConcurrentCopying::FinishPhase. The mark bit of an object can be
309 // used by mutator read barrier code to quickly test whether that
310 // object has been already marked.
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700311 std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000312 // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is
313 // full. A thread-safe test of whether the read-barrier mark-bit
314 // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)`
315 // (see use case in ConcurrentCopying::MarkFromReadBarrier).
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700316 bool rb_mark_bit_stack_full_;
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000317
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800318 std::vector<mirror::Object*> false_gray_stack_ GUARDED_BY(mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700319 Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
320 std::vector<accounting::ObjectStack*> revoked_mark_stacks_
321 GUARDED_BY(mark_stack_lock_);
322 static constexpr size_t kMarkStackSize = kPageSize;
323 static constexpr size_t kMarkStackPoolSize = 256;
324 std::vector<accounting::ObjectStack*> pooled_mark_stacks_
325 GUARDED_BY(mark_stack_lock_);
326 Thread* thread_running_gc_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800327 bool is_marking_; // True while marking is ongoing.
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700328 // True while we might dispatch on the read barrier entrypoints.
329 bool is_using_read_barrier_entrypoints_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800330 bool is_active_; // True while the collection is ongoing.
331 bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant.
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800332 ImmuneSpaces immune_spaces_;
Andreas Gamped4901292017-05-30 18:41:34 -0700333 accounting::ContinuousSpaceBitmap* region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800334 // A cache of Heap::GetMarkBitmap().
335 accounting::HeapBitmap* heap_mark_bitmap_;
336 size_t live_stack_freeze_size_;
337 size_t from_space_num_objects_at_first_pause_;
338 size_t from_space_num_bytes_at_first_pause_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700339 Atomic<int> is_mark_stack_push_disallowed_;
340 enum MarkStackMode {
341 kMarkStackModeOff = 0, // Mark stack is off.
342 kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto
343 // thread-local mark stacks. The GC-running thread pushes onto and
344 // pops off the GC mark stack without a lock.
345 kMarkStackModeShared, // All threads share the GC mark stack with a lock.
346 kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack
347 // without a lock. Other threads won't access the mark stack.
348 };
349 Atomic<MarkStackMode> mark_stack_mode_;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700350 bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800351
352 // How many objects and bytes we moved. Used for accounting.
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800353 // GC thread moves many more objects than mutators.
354 // Therefore, we separate the two to avoid CAS.
355 Atomic<size_t> bytes_moved_; // Used by mutators
356 Atomic<size_t> objects_moved_; // Used by mutators
357 size_t bytes_moved_gc_thread_; // Used by GC
358 size_t objects_moved_gc_thread_; // Used by GC
Mathieu Chartiercca44a02016-08-17 10:07:29 -0700359 Atomic<uint64_t> cumulative_bytes_moved_;
360 Atomic<uint64_t> cumulative_objects_moved_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800361
Albert Mingkun Yange260e542018-11-05 13:45:59 +0000362 // copied_live_bytes_ratio_sum_ and gc_count_ are read and written by CC per
363 // GC, in ReclaimPhase, and are read by DumpPerformanceInfo (potentially from
364 // another thread). However, at present, DumpPerformanceInfo is only called
365 // when the runtime shuts down, so no concurrent access.
366 // The sum of of all copied live bytes ratio (to_bytes/from_bytes)
367 float copied_live_bytes_ratio_sum_;
368 // The number of GC counts, used to calculate the average above. (It doesn't
369 // include GC where from_bytes is zero, IOW, from-space is empty, which is
370 // possible for minor GC if all allocated objects are in non-moving
371 // space.)
372 size_t gc_count_;
373
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700374 // Generational "sticky", only trace through dirty objects in region space.
375 const bool young_gen_;
Roland Levillain2d94e292018-08-15 16:46:30 +0100376 // If true, the GC thread is done scanning marked objects on dirty and aged
377 // card (see ConcurrentCopying::MarkingPhase).
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700378 Atomic<bool> done_scanning_;
379
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800380 // The skipped blocks are memory blocks/chucks that were copies of
381 // objects that were unused due to lost races (cas failures) at
382 // object copy/forward pointer install. They are reused.
383 Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
384 std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
385 Atomic<size_t> to_space_bytes_skipped_;
386 Atomic<size_t> to_space_objects_skipped_;
387
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700388 // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
389 // and also log.
390 bool measure_read_barrier_slow_path_;
391 // mark_from_read_barrier_measurements_ is true if systrace is enabled or
392 // measure_read_barrier_time_ is true.
393 bool mark_from_read_barrier_measurements_;
394 Atomic<uint64_t> rb_slow_path_ns_;
395 Atomic<uint64_t> rb_slow_path_count_;
396 Atomic<uint64_t> rb_slow_path_count_gc_;
397 mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
398 Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
399 uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
400 uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
401
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800402 accounting::ReadBarrierTable* rb_table_;
403 bool force_evacuate_all_; // True if all regions are evacuated.
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700404 Atomic<bool> updated_all_immune_objects_;
405 bool gc_grays_immune_objects_;
406 Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
407 std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800408
Mathieu Chartier3ed8ec12017-04-20 19:28:54 -0700409 // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
410 // be filled in before flipping thread roots so that FillDummyObject can run. Not
411 // ObjPtr since the GC may transition to suspended and runnable between phases.
412 mirror::Class* java_lang_Object_;
413
Roland Levillainb1e1dc32018-07-10 19:19:31 +0100414 // Sweep array free buffer, used to sweep the spaces based on an array more
415 // efficiently, by recording dead objects to be freed in batches (see
416 // ConcurrentCopying::SweepArray).
417 MemMap sweep_array_free_buffer_mem_map_;
418
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700419 class ActivateReadBarrierEntrypointsCallback;
420 class ActivateReadBarrierEntrypointsCheckpoint;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700421 class AssertToSpaceInvariantFieldVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700422 class AssertToSpaceInvariantRefsVisitor;
423 class ClearBlackPtrsVisitor;
424 class ComputeUnevacFromSpaceLiveRatioVisitor;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700425 class DisableMarkingCallback;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700426 class DisableMarkingCheckpoint;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700427 class DisableWeakRefAccessCallback;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700428 class FlipCallback;
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700429 template <bool kConcurrent> class GrayImmuneObjectVisitor;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700430 class ImmuneSpaceScanObjVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700431 class LostCopyVisitor;
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700432 template <bool kNoUnEvac> class RefFieldsVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700433 class RevokeThreadLocalMarkStackCheckpoint;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700434 class ScopedGcGraysImmuneObjects;
435 class ThreadFlipVisitor;
Mathieu Chartier21328a12016-07-22 10:47:45 -0700436 class VerifyGrayImmuneObjectsVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700437 class VerifyNoFromSpaceRefsFieldVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700438 class VerifyNoFromSpaceRefsVisitor;
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800439 class VerifyNoMissingCardMarkVisitor;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800440
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700441 DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700442};
443
444} // namespace collector
445} // namespace gc
446} // namespace art
447
448#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_