blob: 6482ff73e234a7b7ee77f3949bd8abf2226ecd06 [file] [log] [blame]
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
18#define ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_
19
20#include "garbage_collector.h"
Mathieu Chartier6f382012019-07-30 09:47:35 -070021#include "gc/accounting/space_bitmap.h"
Mathieu Chartier763a31e2015-11-16 16:05:55 -080022#include "immune_spaces.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070023#include "offsets.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080024
Andreas Gampe8f1c8e52019-01-08 10:34:16 -080025#include <map>
Lokesh Gidrad0c5b252018-12-05 01:10:40 -080026#include <memory>
Lokesh Gidra289e7122019-10-17 12:57:16 -070027#include <unordered_map>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080028#include <vector>
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070029
30namespace art {
Andreas Gampe8f1c8e52019-01-08 10:34:16 -080031class Barrier;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -070032class Closure;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033class RootInfo;
34
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070035namespace mirror {
Andreas Gampe8f1c8e52019-01-08 10:34:16 -080036template<class MirrorType> class CompressedReference;
37template<class MirrorType> class HeapReference;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070038class Object;
39} // namespace mirror
40
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070041namespace gc {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080042
43namespace accounting {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080044template<typename T> class AtomicStack;
45typedef AtomicStack<mirror::Object> ObjectStack;
46template <size_t kAlignment> class SpaceBitmap;
47typedef SpaceBitmap<kObjectAlignment> ContinuousSpaceBitmap;
48class HeapBitmap;
49class ReadBarrierTable;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080050} // namespace accounting
51
52namespace space {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080053class RegionSpace;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080054} // namespace space
55
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070056namespace collector {
57
58class ConcurrentCopying : public GarbageCollector {
59 public:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080060 // Enable the no-from-space-refs verification at the pause.
Hiroshi Yamauchidaf61a12016-06-10 14:27:38 -070061 static constexpr bool kEnableNoFromSpaceRefsVerification = kIsDebugBuild;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080062 // Enable the from-space bytes/objects check.
Hiroshi Yamauchidaf61a12016-06-10 14:27:38 -070063 static constexpr bool kEnableFromSpaceAccountingCheck = kIsDebugBuild;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080064 // Enable verbose mode.
Hiroshi Yamauchi3c448932016-01-22 16:26:50 -080065 static constexpr bool kVerboseMode = false;
Mathieu Chartier36a270a2016-07-28 18:08:51 -070066 // If kGrayDirtyImmuneObjects is true then we gray dirty objects in the GC pause to prevent dirty
67 // pages.
68 static constexpr bool kGrayDirtyImmuneObjects = true;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070069
Albert Mingkun Yang0b4d1462018-11-29 13:25:35 +000070 ConcurrentCopying(Heap* heap,
71 bool young_gen,
72 bool use_generational_cc,
73 const std::string& name_prefix = "",
74 bool measure_read_barrier_slow_path = false);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080075 ~ConcurrentCopying();
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -070076
Roland Levillainf73caca2018-08-24 17:19:07 +010077 void RunPhases() override
Mathieu Chartier56fe2582016-07-14 13:30:03 -070078 REQUIRES(!immune_gray_stack_lock_,
79 !mark_stack_lock_,
Hans Boehme7d7e9d2019-06-03 18:36:06 +000080 !rb_slow_path_histogram_lock_,
81 !skipped_blocks_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070082 void InitializePhase() REQUIRES_SHARED(Locks::mutator_lock_)
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -070083 REQUIRES(!mark_stack_lock_, !immune_gray_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070084 void MarkingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
Lokesh Gidra2a9824c2018-11-07 15:57:17 -080085 REQUIRES(!mark_stack_lock_);
86 void CopyingPhase() REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +000087 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070088 void ReclaimPhase() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Mathieu Chartier56fe2582016-07-14 13:30:03 -070089 void FinishPhase() REQUIRES(!mark_stack_lock_,
Hans Boehme7d7e9d2019-06-03 18:36:06 +000090 !rb_slow_path_histogram_lock_,
91 !skipped_blocks_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080092
Lokesh Gidra10d0c962019-03-07 22:40:36 +000093 void CaptureRssAtPeak() REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070094 void BindBitmaps() REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -070095 REQUIRES(!Locks::heap_bitmap_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +010096 GcType GetGcType() const override {
Albert Mingkun Yang0b4d1462018-11-29 13:25:35 +000097 return (use_generational_cc_ && young_gen_)
Mathieu Chartier8d1a9962016-08-17 16:39:45 -070098 ? kGcTypeSticky
99 : kGcTypePartial;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700100 }
Roland Levillainf73caca2018-08-24 17:19:07 +0100101 CollectorType GetCollectorType() const override {
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700102 return kCollectorTypeCC;
103 }
Roland Levillainf73caca2018-08-24 17:19:07 +0100104 void RevokeAllThreadLocalBuffers() override;
Lokesh Gidra1c34b712018-12-18 13:41:58 -0800105 // Creates inter-region ref bitmaps for region-space and non-moving-space.
106 // Gets called in Heap construction after the two spaces are created.
107 void CreateInterRegionRefBitmaps();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800108 void SetRegionSpace(space::RegionSpace* region_space) {
109 DCHECK(region_space != nullptr);
110 region_space_ = region_space;
111 }
112 space::RegionSpace* RegionSpace() {
113 return region_space_;
114 }
Roland Levillain001eff92018-01-24 14:24:33 +0000115 // Assert the to-space invariant for a heap reference `ref` held in `obj` at offset `offset`.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800116 void AssertToSpaceInvariant(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700117 REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain001eff92018-01-24 14:24:33 +0000118 // Assert the to-space invariant for a GC root reference `ref`.
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700119 void AssertToSpaceInvariant(GcRootSource* gc_root_source, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700120 REQUIRES_SHARED(Locks::mutator_lock_);
121 bool IsInToSpace(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800122 DCHECK(ref != nullptr);
123 return IsMarked(ref) == ref;
124 }
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000125 // Mark object `from_ref`, copying it to the to-space if needed.
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700126 template<bool kGrayImmuneObject = true, bool kNoUnEvac = false, bool kFromGCThread = false>
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800127 ALWAYS_INLINE mirror::Object* Mark(Thread* const self,
128 mirror::Object* from_ref,
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700129 mirror::Object* holder = nullptr,
130 MemberOffset offset = MemberOffset(0))
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700131 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000132 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700133 ALWAYS_INLINE mirror::Object* MarkFromReadBarrier(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700134 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000135 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 bool IsMarking() const {
137 return is_marking_;
138 }
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700139 // We may want to use read barrier entrypoints before is_marking_ is true since concurrent graying
140 // creates a small window where we might dispatch on these entrypoints.
141 bool IsUsingReadBarrierEntrypoints() const {
142 return is_using_read_barrier_entrypoints_;
143 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800144 bool IsActive() const {
145 return is_active_;
146 }
147 Barrier& GetBarrier() {
148 return *gc_barrier_;
149 }
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700150 bool IsWeakRefAccessEnabled() REQUIRES(Locks::thread_list_lock_) {
151 return weak_ref_access_enabled_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700152 }
Lokesh Gidra93170312019-12-04 13:46:11 -0800153 void RevokeThreadLocalMarkStack(Thread* thread) REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700154
Roland Levillainf73caca2018-08-24 17:19:07 +0100155 mirror::Object* IsMarked(mirror::Object* from_ref) override
Nicolas Geoffray13056a12017-05-11 11:48:28 +0000156 REQUIRES_SHARED(Locks::mutator_lock_);
157
Lokesh Gidra289e7122019-10-17 12:57:16 -0700158 void AssertNoThreadMarkStackMapping(Thread* thread) REQUIRES(!mark_stack_lock_);
159
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700160 private:
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800161 void PushOntoMarkStack(Thread* const self, mirror::Object* obj)
162 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700163 REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800164 mirror::Object* Copy(Thread* const self,
165 mirror::Object* from_ref,
Mathieu Chartieref496d92017-04-28 18:58:59 -0700166 mirror::Object* holder,
167 MemberOffset offset)
168 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000169 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000170 // Scan the reference fields of object `to_ref`.
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700171 template <bool kNoUnEvac>
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700172 void Scan(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700173 REQUIRES(!mark_stack_lock_);
Lokesh Gidra8f5aaad2018-12-11 15:05:56 -0800174 // Scan the reference fields of object 'obj' in the dirty cards during
175 // card-table scan. In addition to visiting the references, it also sets the
176 // read-barrier state to gray for Reference-type objects to ensure that
177 // GetReferent() called on these objects calls the read-barrier on the referent.
178 template <bool kNoUnEvac>
179 void ScanDirtyObject(mirror::Object* obj) REQUIRES_SHARED(Locks::mutator_lock_)
180 REQUIRES(!mark_stack_lock_);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000181 // Process a field.
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700182 template <bool kNoUnEvac>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800183 void Process(mirror::Object* obj, MemberOffset offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700184 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000185 REQUIRES(!mark_stack_lock_ , !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100186 void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info) override
187 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000188 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700189 template<bool kGrayImmuneObject>
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800190 void MarkRoot(Thread* const self, mirror::CompressedReference<mirror::Object>* root)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700191 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000192 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100193 void VisitRoots(mirror::CompressedReference<mirror::Object>** roots,
194 size_t count,
195 const RootInfo& info) override
196 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000197 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700198 void VerifyNoFromSpaceReferences() REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800199 accounting::ObjectStack* GetAllocationStack();
200 accounting::ObjectStack* GetLiveStack();
Roland Levillainf73caca2018-08-24 17:19:07 +0100201 void ProcessMarkStack() override REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700202 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700203 bool ProcessMarkStackOnce() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
204 void ProcessMarkStackRef(mirror::Object* to_ref) REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700205 REQUIRES(!mark_stack_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700206 void GrayAllDirtyImmuneObjects()
207 REQUIRES(Locks::mutator_lock_)
208 REQUIRES(!mark_stack_lock_);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700209 void GrayAllNewlyDirtyImmuneObjects()
210 REQUIRES(Locks::mutator_lock_)
211 REQUIRES(!mark_stack_lock_);
Mathieu Chartier21328a12016-07-22 10:47:45 -0700212 void VerifyGrayImmuneObjects()
213 REQUIRES(Locks::mutator_lock_)
214 REQUIRES(!mark_stack_lock_);
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800215 void VerifyNoMissingCardMarks()
216 REQUIRES(Locks::mutator_lock_)
217 REQUIRES(!mark_stack_lock_);
Lokesh Gidra2a9824c2018-11-07 15:57:17 -0800218 template <typename Processor>
219 size_t ProcessThreadLocalMarkStacks(bool disable_weak_ref_access,
220 Closure* checkpoint_callback,
221 const Processor& processor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700222 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700223 void RevokeThreadLocalMarkStacks(bool disable_weak_ref_access, Closure* checkpoint_callback)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700224 REQUIRES_SHARED(Locks::mutator_lock_);
225 void SwitchToSharedMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700226 REQUIRES(!mark_stack_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700227 void SwitchToGcExclusiveMarkStackMode() REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100228 void DelayReferenceReferent(ObjPtr<mirror::Class> klass,
229 ObjPtr<mirror::Reference> reference) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700230 REQUIRES_SHARED(Locks::mutator_lock_);
231 void ProcessReferences(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100232 mirror::Object* MarkObject(mirror::Object* from_ref) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700233 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000234 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100235 void MarkHeapReference(mirror::HeapReference<mirror::Object>* from_ref,
236 bool do_atomic_update) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700237 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000238 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Mathieu Chartierc381c362016-08-23 13:27:53 -0700239 bool IsMarkedInUnevacFromSpace(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700240 REQUIRES_SHARED(Locks::mutator_lock_);
Lokesh Gidra519c1c72018-11-09 17:10:47 -0800241 bool IsMarkedInNonMovingSpace(mirror::Object* from_ref)
242 REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillainf73caca2018-08-24 17:19:07 +0100243 bool IsNullOrMarkedHeapReference(mirror::HeapReference<mirror::Object>* field,
244 bool do_atomic_update) override
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700245 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800246 void SweepSystemWeaks(Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700247 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::heap_bitmap_lock_);
Roland Levillainb1e1dc32018-07-10 19:19:31 +0100248 // Sweep unmarked objects to complete the garbage collection. Full GCs sweep
249 // all allocation spaces (except the region space). Sticky-bit GCs just sweep
250 // a subset of the heap.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800251 void Sweep(bool swap_bitmaps)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700252 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
Roland Levillainb1e1dc32018-07-10 19:19:31 +0100253 // Sweep only pointers within an array.
254 void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps)
255 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_, !mark_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800256 void SweepLargeObjects(bool swap_bitmaps)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700257 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier962cd7a2016-08-16 12:15:59 -0700258 void MarkZygoteLargeObjects()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700259 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800260 void FillWithDummyObject(Thread* const self, mirror::Object* dummy_obj, size_t byte_size)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000261 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700262 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800263 mirror::Object* AllocateInSkippedBlock(Thread* const self, size_t alloc_size)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000264 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700265 REQUIRES_SHARED(Locks::mutator_lock_);
266 void CheckEmptyMarkStack() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
267 void IssueEmptyCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
268 bool IsOnAllocStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800269 mirror::Object* GetFwdPtr(mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700270 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700271 void FlipThreadRoots() REQUIRES(!Locks::mutator_lock_);
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700272 void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800273 void RecordLiveStackFreezeSize(Thread* self);
274 void ComputeUnevacFromSpaceLiveRatio();
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700275 void LogFromSpaceRefHolder(mirror::Object* obj, MemberOffset offset)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700276 REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain001eff92018-01-24 14:24:33 +0000277 // Dump information about reference `ref` and return it as a string.
278 // Use `ref_name` to name the reference in messages. Each message is prefixed with `indent`.
Andreas Gampebc802de2018-06-20 17:24:11 -0700279 std::string DumpReferenceInfo(mirror::Object* ref, const char* ref_name, const char* indent = "")
Roland Levillain001eff92018-01-24 14:24:33 +0000280 REQUIRES_SHARED(Locks::mutator_lock_);
281 // Dump information about heap reference `ref`, referenced from object `obj` at offset `offset`,
282 // and return it as a string.
283 std::string DumpHeapReference(mirror::Object* obj, MemberOffset offset, mirror::Object* ref)
284 REQUIRES_SHARED(Locks::mutator_lock_);
285 // Dump information about GC root `ref` and return it as a string.
286 std::string DumpGcRoot(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi3f64f252015-06-12 18:35:06 -0700287 void AssertToSpaceInvariantInNonMovingSpace(mirror::Object* obj, mirror::Object* ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700288 REQUIRES_SHARED(Locks::mutator_lock_);
289 void ReenableWeakRefAccess(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
290 void DisableMarking() REQUIRES_SHARED(Locks::mutator_lock_);
291 void IssueDisableMarkingCheckpoint() REQUIRES_SHARED(Locks::mutator_lock_);
292 void ExpandGcMarkStack() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800293 mirror::Object* MarkNonMoving(Thread* const self,
294 mirror::Object* from_ref,
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700295 mirror::Object* holder = nullptr,
296 MemberOffset offset = MemberOffset(0))
297 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000298 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800299 ALWAYS_INLINE mirror::Object* MarkUnevacFromSpaceRegion(Thread* const self,
300 mirror::Object* from_ref,
Hiroshi Yamauchi8e674652015-12-22 11:09:18 -0800301 accounting::SpaceBitmap<kObjectAlignment>* bitmap)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700302 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000303 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700304 template<bool kGrayImmuneObject>
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800305 ALWAYS_INLINE mirror::Object* MarkImmuneSpace(Thread* const self,
306 mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700307 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!immune_gray_stack_lock_);
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700308 void ScanImmuneObject(mirror::Object* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700309 REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!mark_stack_lock_);
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800310 mirror::Object* MarkFromReadBarrierWithMeasurements(Thread* const self,
311 mirror::Object* from_ref)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700312 REQUIRES_SHARED(Locks::mutator_lock_)
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000313 REQUIRES(!mark_stack_lock_, !skipped_blocks_lock_, !immune_gray_stack_lock_);
Roland Levillainbbc6e7e2018-08-24 16:58:47 +0100314 void DumpPerformanceInfo(std::ostream& os) override REQUIRES(!rb_slow_path_histogram_lock_);
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700315 // Set the read barrier mark entrypoints to non-null.
316 void ActivateReadBarrierEntrypoints();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800317
Lokesh Gidra2a9824c2018-11-07 15:57:17 -0800318 void CaptureThreadRootsForMarking() REQUIRES_SHARED(Locks::mutator_lock_);
319 void AddLiveBytesAndScanRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
320 bool TestMarkBitmapForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
321 template <bool kAtomic = false>
322 bool TestAndSetMarkBitForRef(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
323 void PushOntoLocalMarkStack(mirror::Object* ref) REQUIRES_SHARED(Locks::mutator_lock_);
324 void ProcessMarkStackForMarkingAndComputeLiveBytes() REQUIRES_SHARED(Locks::mutator_lock_)
325 REQUIRES(!mark_stack_lock_);
326
Lokesh Gidra289e7122019-10-17 12:57:16 -0700327 void RemoveThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
328 REQUIRES(mark_stack_lock_);
329 void AddThreadMarkStackMapping(Thread* thread, accounting::ObjectStack* tl_mark_stack)
330 REQUIRES(mark_stack_lock_);
Lokesh Gidra55d9aaa2019-11-19 19:02:34 -0800331 void AssertEmptyThreadMarkStackMap() REQUIRES(mark_stack_lock_);
Lokesh Gidra289e7122019-10-17 12:57:16 -0700332
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800333 space::RegionSpace* region_space_; // The underlying region space.
334 std::unique_ptr<Barrier> gc_barrier_;
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700335 std::unique_ptr<accounting::ObjectStack> gc_mark_stack_;
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000336
Albert Mingkun Yang0b4d1462018-11-29 13:25:35 +0000337 // If true, enable generational collection when using the Concurrent Copying
338 // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
339 // for major collections. Generational CC collection is currently only
340 // compatible with Baker read barriers. Set in Heap constructor.
341 const bool use_generational_cc_;
342
343 // Generational "sticky", only trace through dirty objects in region space.
344 const bool young_gen_;
345
346 // If true, the GC thread is done scanning marked objects on dirty and aged
347 // card (see ConcurrentCopying::CopyingPhase).
348 Atomic<bool> done_scanning_;
349
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000350 // The read-barrier mark-bit stack. Stores object references whose
351 // mark bit has been set by ConcurrentCopying::MarkFromReadBarrier,
352 // so that this bit can be reset at the end of the collection in
353 // ConcurrentCopying::FinishPhase. The mark bit of an object can be
354 // used by mutator read barrier code to quickly test whether that
355 // object has been already marked.
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700356 std::unique_ptr<accounting::ObjectStack> rb_mark_bit_stack_;
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000357 // Thread-unsafe Boolean value hinting that `rb_mark_bit_stack_` is
358 // full. A thread-safe test of whether the read-barrier mark-bit
359 // stack is full is implemented by `rb_mark_bit_stack_->AtomicPushBack(ref)`
360 // (see use case in ConcurrentCopying::MarkFromReadBarrier).
Mathieu Chartier36a270a2016-07-28 18:08:51 -0700361 bool rb_mark_bit_stack_full_;
Roland Levillain8f7ea9a2018-01-26 17:27:59 +0000362
Lokesh Gidra726e2dd2019-12-05 11:03:34 -0800363 // Guards access to pooled_mark_stacks_ and revoked_mark_stacks_ vectors.
364 // Also guards destruction and revocations of thread-local mark-stacks.
365 // Clearing thread-local mark-stack (by other threads or during destruction)
366 // should be guarded by it.
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700367 Mutex mark_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
368 std::vector<accounting::ObjectStack*> revoked_mark_stacks_
369 GUARDED_BY(mark_stack_lock_);
370 static constexpr size_t kMarkStackSize = kPageSize;
371 static constexpr size_t kMarkStackPoolSize = 256;
372 std::vector<accounting::ObjectStack*> pooled_mark_stacks_
373 GUARDED_BY(mark_stack_lock_);
Lokesh Gidra289e7122019-10-17 12:57:16 -0700374 // TODO(lokeshgidra b/140119552): remove this after bug fix.
375 std::unordered_map<Thread*, accounting::ObjectStack*> thread_mark_stack_map_
376 GUARDED_BY(mark_stack_lock_);
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700377 Thread* thread_running_gc_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800378 bool is_marking_; // True while marking is ongoing.
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700379 // True while we might dispatch on the read barrier entrypoints.
380 bool is_using_read_barrier_entrypoints_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800381 bool is_active_; // True while the collection is ongoing.
382 bool is_asserting_to_space_invariant_; // True while asserting the to-space invariant.
Mathieu Chartier763a31e2015-11-16 16:05:55 -0800383 ImmuneSpaces immune_spaces_;
Andreas Gamped4901292017-05-30 18:41:34 -0700384 accounting::ContinuousSpaceBitmap* region_space_bitmap_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800385 // A cache of Heap::GetMarkBitmap().
386 accounting::HeapBitmap* heap_mark_bitmap_;
387 size_t live_stack_freeze_size_;
Hans Boehma253c2d2019-05-13 12:38:54 -0700388 size_t from_space_num_objects_at_first_pause_; // Computed if kEnableFromSpaceAccountingCheck
389 size_t from_space_num_bytes_at_first_pause_; // Computed if kEnableFromSpaceAccountingCheck
Hiroshi Yamauchi0b713572015-06-16 18:29:23 -0700390 Atomic<int> is_mark_stack_push_disallowed_;
391 enum MarkStackMode {
392 kMarkStackModeOff = 0, // Mark stack is off.
393 kMarkStackModeThreadLocal, // All threads except for the GC-running thread push refs onto
394 // thread-local mark stacks. The GC-running thread pushes onto and
395 // pops off the GC mark stack without a lock.
396 kMarkStackModeShared, // All threads share the GC mark stack with a lock.
397 kMarkStackModeGcExclusive // The GC-running thread pushes onto and pops from the GC mark stack
398 // without a lock. Other threads won't access the mark stack.
399 };
400 Atomic<MarkStackMode> mark_stack_mode_;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700401 bool weak_ref_access_enabled_ GUARDED_BY(Locks::thread_list_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800402
Hans Boehma253c2d2019-05-13 12:38:54 -0700403 // How many objects and bytes we moved. The GC thread moves many more objects
404 // than mutators. Therefore, we separate the two to avoid CAS. Bytes_moved_ and
405 // bytes_moved_gc_thread_ are critical for GC triggering; the others are just informative.
Hiroshi Yamauchi7a181542017-03-08 17:34:46 -0800406 Atomic<size_t> bytes_moved_; // Used by mutators
407 Atomic<size_t> objects_moved_; // Used by mutators
408 size_t bytes_moved_gc_thread_; // Used by GC
409 size_t objects_moved_gc_thread_; // Used by GC
Mathieu Chartiercca44a02016-08-17 10:07:29 -0700410 Atomic<uint64_t> cumulative_bytes_moved_;
411 Atomic<uint64_t> cumulative_objects_moved_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800412
Albert Mingkun Yangaf9cce12018-11-07 09:58:35 +0000413 // copied_live_bytes_ratio_sum_ is read and written by CC per GC, in
414 // ReclaimPhase, and is read by DumpPerformanceInfo (potentially from another
415 // thread). However, at present, DumpPerformanceInfo is only called when the
416 // runtime shuts down, so no concurrent access. The same reasoning goes for
417 // gc_count_ and reclaimed_bytes_ratio_sum_
418
Albert Mingkun Yange260e542018-11-05 13:45:59 +0000419 // The sum of of all copied live bytes ratio (to_bytes/from_bytes)
420 float copied_live_bytes_ratio_sum_;
421 // The number of GC counts, used to calculate the average above. (It doesn't
422 // include GC where from_bytes is zero, IOW, from-space is empty, which is
423 // possible for minor GC if all allocated objects are in non-moving
424 // space.)
425 size_t gc_count_;
Lokesh Gidrad0c5b252018-12-05 01:10:40 -0800426 // Bit is set if the corresponding object has inter-region references that
427 // were found during the marking phase of two-phase full-heap GC cycle.
Mathieu Chartier6f382012019-07-30 09:47:35 -0700428 accounting::ContinuousSpaceBitmap region_space_inter_region_bitmap_;
429 accounting::ContinuousSpaceBitmap non_moving_space_inter_region_bitmap_;
Albert Mingkun Yange260e542018-11-05 13:45:59 +0000430
Albert Mingkun Yangaf9cce12018-11-07 09:58:35 +0000431 // reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
432 float reclaimed_bytes_ratio_sum_;
433
Hans Boehme7d7e9d2019-06-03 18:36:06 +0000434 // The skipped blocks are memory blocks/chucks that were copies of
435 // objects that were unused due to lost races (cas failures) at
436 // object copy/forward pointer install. They may be reused.
437 // Skipped blocks are always in region space. Their size is included directly
438 // in num_bytes_allocated_, i.e. they are treated as allocated, but may be directly
439 // used without going through a GC cycle like other objects. They are reused only
440 // if we run out of region space. TODO: Revisit this design.
441 Mutex skipped_blocks_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
442 std::multimap<size_t, uint8_t*> skipped_blocks_map_ GUARDED_BY(skipped_blocks_lock_);
443 Atomic<size_t> to_space_bytes_skipped_;
444 Atomic<size_t> to_space_objects_skipped_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800445
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700446 // If measure_read_barrier_slow_path_ is true, we count how long is spent in MarkFromReadBarrier
447 // and also log.
448 bool measure_read_barrier_slow_path_;
449 // mark_from_read_barrier_measurements_ is true if systrace is enabled or
450 // measure_read_barrier_time_ is true.
451 bool mark_from_read_barrier_measurements_;
452 Atomic<uint64_t> rb_slow_path_ns_;
453 Atomic<uint64_t> rb_slow_path_count_;
454 Atomic<uint64_t> rb_slow_path_count_gc_;
455 mutable Mutex rb_slow_path_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
456 Histogram<uint64_t> rb_slow_path_time_histogram_ GUARDED_BY(rb_slow_path_histogram_lock_);
457 uint64_t rb_slow_path_count_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
458 uint64_t rb_slow_path_count_gc_total_ GUARDED_BY(rb_slow_path_histogram_lock_);
459
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800460 accounting::ReadBarrierTable* rb_table_;
461 bool force_evacuate_all_; // True if all regions are evacuated.
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700462 Atomic<bool> updated_all_immune_objects_;
463 bool gc_grays_immune_objects_;
464 Mutex immune_gray_stack_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
465 std::vector<mirror::Object*> immune_gray_stack_ GUARDED_BY(immune_gray_stack_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800466
Mathieu Chartier3ed8ec12017-04-20 19:28:54 -0700467 // Class of java.lang.Object. Filled in from WellKnownClasses in FlipCallback. Must
468 // be filled in before flipping thread roots so that FillDummyObject can run. Not
469 // ObjPtr since the GC may transition to suspended and runnable between phases.
470 mirror::Class* java_lang_Object_;
471
Roland Levillainb1e1dc32018-07-10 19:19:31 +0100472 // Sweep array free buffer, used to sweep the spaces based on an array more
473 // efficiently, by recording dead objects to be freed in batches (see
474 // ConcurrentCopying::SweepArray).
475 MemMap sweep_array_free_buffer_mem_map_;
476
Albert Mingkun Yangaf9cce12018-11-07 09:58:35 +0000477 // Use signed because after_gc may be larger than before_gc.
478 int64_t num_bytes_allocated_before_gc_;
479
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700480 class ActivateReadBarrierEntrypointsCallback;
481 class ActivateReadBarrierEntrypointsCheckpoint;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700482 class AssertToSpaceInvariantFieldVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700483 class AssertToSpaceInvariantRefsVisitor;
484 class ClearBlackPtrsVisitor;
485 class ComputeUnevacFromSpaceLiveRatioVisitor;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700486 class DisableMarkingCallback;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700487 class DisableMarkingCheckpoint;
Hiroshi Yamauchifebd0cf2016-09-14 19:31:25 -0700488 class DisableWeakRefAccessCallback;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700489 class FlipCallback;
Mathieu Chartier3768ade2017-05-02 14:04:39 -0700490 template <bool kConcurrent> class GrayImmuneObjectVisitor;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700491 class ImmuneSpaceScanObjVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700492 class LostCopyVisitor;
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700493 template <bool kNoUnEvac> class RefFieldsVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700494 class RevokeThreadLocalMarkStackCheckpoint;
Hiroshi Yamauchid8db5a22016-06-28 14:07:41 -0700495 class ScopedGcGraysImmuneObjects;
496 class ThreadFlipVisitor;
Mathieu Chartier21328a12016-07-22 10:47:45 -0700497 class VerifyGrayImmuneObjectsVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700498 class VerifyNoFromSpaceRefsFieldVisitor;
Mathieu Chartiera07f5592016-06-16 11:44:28 -0700499 class VerifyNoFromSpaceRefsVisitor;
Mathieu Chartiera1467d02017-02-22 09:22:50 -0800500 class VerifyNoMissingCardMarkVisitor;
Lokesh Gidra2a9824c2018-11-07 15:57:17 -0800501 class ImmuneSpaceCaptureRefsVisitor;
502 template <bool kAtomicTestAndSet = false> class CaptureRootsForMarkingVisitor;
503 class CaptureThreadRootsForMarkingAndCheckpoint;
504 template <bool kHandleInterRegionRefs> class ComputeLiveBytesAndMarkRefFieldsVisitor;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800505
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700506 DISALLOW_IMPLICIT_CONSTRUCTORS(ConcurrentCopying);
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -0700507};
508
509} // namespace collector
510} // namespace gc
511} // namespace art
512
513#endif // ART_RUNTIME_GC_COLLECTOR_CONCURRENT_COPYING_H_