blob: c54edf55f4120ffa00534f068820880416d8334a [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_HEAP_H_
18#define ART_RUNTIME_GC_HEAP_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Mathieu Chartier31000802015-06-14 14:14:37 -070022#include <unordered_set>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Andreas Gampe57943812017-12-06 21:39:13 -080025#include <android-base/logging.h>
26
Ian Rogersb0fa5dc2014-04-28 16:47:08 -070027#include "allocator_type.h"
David Sehrc431b9d2018-03-02 12:01:51 -080028#include "base/atomic.h"
Andreas Gampe57943812017-12-06 21:39:13 -080029#include "base/macros.h"
Andreas Gampe351c4472017-07-12 19:32:55 -070030#include "base/mutex.h"
Andreas Gampedcc528d2017-12-07 13:37:10 -080031#include "base/runtime_debug.h"
David Sehr67bf42e2018-02-26 16:43:04 -080032#include "base/safe_map.h"
Vladimir Marko80afd022015-05-19 18:08:00 +010033#include "base/time_utils.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070034#include "gc/collector/gc_type.h"
Andreas Gamped4901292017-05-30 18:41:34 -070035#include "gc/collector/iteration.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080036#include "gc/collector_type.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070037#include "gc/gc_cause.h"
Andreas Gampe86823542019-02-25 09:38:49 -080038#include "gc/space/image_space_loading_order.h"
Mathieu Chartier2dbe6272014-09-16 10:43:23 -070039#include "gc/space/large_object_space.h"
Mathieu Chartier9d156d52016-10-06 17:44:26 -070040#include "handle.h"
Mathieu Chartier0795f232016-09-27 18:43:30 -070041#include "obj_ptr.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070042#include "offsets.h"
Mathieu Chartierf8cb1782016-03-18 18:45:41 -070043#include "process_state.h"
Andreas Gampe217488a2017-09-18 08:34:42 -070044#include "read_barrier_config.h"
Andreas Gampe5a0430d2019-01-04 14:33:57 -080045#include "runtime_globals.h"
Mathieu Chartier4e305412014-02-19 10:54:44 -080046#include "verify_object.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070047
48namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070049
Ian Rogers81d425b2012-09-27 16:03:43 -070050class ConditionVariable;
Andreas Gampe639b2b12019-01-08 10:32:50 -080051enum class InstructionSet;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070052class IsMarkedVisitor;
Ian Rogers81d425b2012-09-27 16:03:43 -070053class Mutex;
Andreas Gamped4901292017-05-30 18:41:34 -070054class RootVisitor;
Ian Rogers40e3bac2012-11-20 00:09:14 -080055class StackVisitor;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070056class Thread;
Mathieu Chartier3cf22532015-07-09 15:15:09 -070057class ThreadPool;
Mathieu Chartier357e9be2012-08-01 11:00:14 -070058class TimingLogger;
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -070059class VariableSizedHandleScope;
Carl Shapiro69759ea2011-07-21 18:13:35 -070060
Ian Rogers1d54e732013-05-02 21:10:01 -070061namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080062class Class;
63class Object;
Ian Rogers1d54e732013-05-02 21:10:01 -070064} // namespace mirror
65
66namespace gc {
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070067
Andreas Gampe27fa96c2016-10-07 15:05:24 -070068class AllocationListener;
Man Cao8c2ff642015-05-27 17:25:30 -070069class AllocRecordObjectMap;
Andreas Gampe9b8c5882016-10-21 15:27:46 -070070class GcPauseListener;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070071class ReferenceProcessor;
Mathieu Chartiera5eae692014-12-17 17:56:03 -080072class TaskProcessor;
Mathieu Chartier1ca68902017-04-18 11:26:22 -070073class Verification;
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -070074
Ian Rogers1d54e732013-05-02 21:10:01 -070075namespace accounting {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080076template <typename T> class AtomicStack;
77typedef AtomicStack<mirror::Object> ObjectStack;
78class CardTable;
79class HeapBitmap;
80class ModUnionTable;
81class ReadBarrierTable;
82class RememberedSet;
Ian Rogers1d54e732013-05-02 21:10:01 -070083} // namespace accounting
84
85namespace collector {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080086class ConcurrentCopying;
87class GarbageCollector;
Igor Murashkin2ffb7032017-11-08 13:35:21 -080088class MarkSweep;
89class SemiSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070090} // namespace collector
91
Zuo Wangf37a88b2014-07-10 04:26:41 -070092namespace allocator {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080093class RosAlloc;
Zuo Wangf37a88b2014-07-10 04:26:41 -070094} // namespace allocator
95
Ian Rogers1d54e732013-05-02 21:10:01 -070096namespace space {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080097class AllocSpace;
98class BumpPointerSpace;
99class ContinuousMemMapAllocSpace;
100class DiscontinuousSpace;
101class DlMallocSpace;
102class ImageSpace;
103class LargeObjectSpace;
104class MallocSpace;
105class RegionSpace;
106class RosAllocSpace;
107class Space;
108class ZygoteSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -0700109} // namespace space
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700110
Zuo Wangf37a88b2014-07-10 04:26:41 -0700111enum HomogeneousSpaceCompactResult {
112 // Success.
113 kSuccess,
114 // Reject due to disabled moving GC.
115 kErrorReject,
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -0700116 // Unsupported due to the current configuration.
117 kErrorUnsupported,
Zuo Wangf37a88b2014-07-10 04:26:41 -0700118 // System is shutting down.
119 kErrorVMShuttingDown,
120};
121
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700122// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
Hiroshi Yamauchid31fb972013-11-19 11:08:27 -0800123static constexpr bool kUseRosAlloc = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700124
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800125// If true, use thread-local allocation stack.
126static constexpr bool kUseThreadLocalAllocationStack = true;
127
Ian Rogers50b35e22012-10-04 10:09:15 -0700128class Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -0700129 public:
Hiroshi Yamauchi5ccd4982014-03-11 12:19:04 -0700130 static constexpr size_t kDefaultStartingSize = kPageSize;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700131 static constexpr size_t kDefaultInitialSize = 2 * MB;
Mathieu Chartier2f4f0a32014-07-10 17:50:34 -0700132 static constexpr size_t kDefaultMaximumSize = 256 * MB;
Mathieu Chartier6a7824d2014-08-22 14:53:04 -0700133 static constexpr size_t kDefaultNonMovingSpaceCapacity = 64 * MB;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700134 static constexpr size_t kDefaultMaxFree = 2 * MB;
135 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700136 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
137 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
Mathieu Chartier9e03b2f2016-05-27 13:50:59 -0700138 static constexpr size_t kDefaultTLABSize = 32 * KB;
Mathieu Chartier720ef762013-08-17 14:46:54 -0700139 static constexpr double kDefaultTargetUtilization = 0.5;
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700140 static constexpr double kDefaultHeapGrowthMultiplier = 2.0;
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700141 // Primitive arrays larger than this size are put in the large object space.
Mathieu Chartier8261d022016-08-08 09:41:04 -0700142 static constexpr size_t kMinLargeObjectThreshold = 3 * kPageSize;
143 static constexpr size_t kDefaultLargeObjectThreshold = kMinLargeObjectThreshold;
Mathieu Chartier10d68862015-04-15 14:21:33 -0700144 // Whether or not parallel GC is enabled. If not, then we never create the thread pool.
145 static constexpr bool kDefaultEnableParallelGC = false;
Mathieu Chartierfa4ea822018-03-02 13:48:54 -0800146 static uint8_t* const kPreferredAllocSpaceBegin;
Igor Murashkin446ba4b2015-02-04 15:11:27 -0800147
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700148 // Whether or not we use the free list large object space. Only use it if USE_ART_LOW_4G_ALLOCATOR
149 // since this means that we have to use the slow msync loop in MemMap::MapAnonymous.
Mathieu Chartier2dbe6272014-09-16 10:43:23 -0700150 static constexpr space::LargeObjectSpaceType kDefaultLargeObjectSpaceType =
Igor Murashkin446ba4b2015-02-04 15:11:27 -0800151 USE_ART_LOW_4G_ALLOCATOR ?
152 space::LargeObjectSpaceType::kFreeList
153 : space::LargeObjectSpaceType::kMap;
154
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700155 // Used so that we don't overflow the allocation time atomic integer.
Mathieu Chartier720ef762013-08-17 14:46:54 -0700156 static constexpr size_t kTimeAdjust = 1024;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700157
Hans Boehmc220f982018-10-12 16:15:45 -0700158 // Client should call NotifyNativeAllocation every kNotifyNativeInterval allocations.
159 // Should be chosen so that time_to_call_mallinfo / kNotifyNativeInterval is on the same order
160 // as object allocation time. time_to_call_mallinfo seems to be on the order of 1 usec.
Hans Boehm15752672018-12-18 17:01:00 -0800161#ifdef __ANDROID__
Hans Boehmc220f982018-10-12 16:15:45 -0700162 static constexpr uint32_t kNotifyNativeInterval = 32;
Hans Boehm15752672018-12-18 17:01:00 -0800163#else
164 // Some host mallinfo() implementations are slow. And memory is less scarce.
165 static constexpr uint32_t kNotifyNativeInterval = 128;
166#endif
Hans Boehmc220f982018-10-12 16:15:45 -0700167
168 // RegisterNativeAllocation checks immediately whether GC is needed if size exceeds the
169 // following. kCheckImmediatelyThreshold * kNotifyNativeInterval should be small enough to
170 // make it safe to allocate that many bytes between checks.
171 static constexpr size_t kCheckImmediatelyThreshold = 300000;
172
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700173 // How often we allow heap trimming to happen (nanoseconds).
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800174 static constexpr uint64_t kHeapTrimWait = MsToNs(5000);
Mathieu Chartier7bf52d22014-03-13 14:46:09 -0700175 // How long we wait after a transition request to perform a collector transition (nanoseconds).
176 static constexpr uint64_t kCollectorTransitionWait = MsToNs(5000);
Andreas Gampeed56b5e2017-10-19 12:58:19 -0700177 // Whether the transition-wait applies or not. Zero wait will stress the
178 // transition code and collector, but increases jank probability.
179 DECLARE_RUNTIME_DEBUG_FLAG(kStressCollectorTransition);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800180
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700181 // Create a heap with the requested sizes. The possible empty
182 // image_file_names names specify Spaces to load based on
183 // ImageWriter output.
Roland Levillain3887c462015-08-12 18:15:42 +0100184 Heap(size_t initial_size,
185 size_t growth_limit,
186 size_t min_free,
187 size_t max_free,
188 double target_utilization,
189 double foreground_heap_growth_multiplier,
Hans Boehmbb2467b2019-03-29 22:55:06 -0700190 size_t stop_for_native_allocs,
Roland Levillain3887c462015-08-12 18:15:42 +0100191 size_t capacity,
192 size_t non_moving_space_capacity,
Vladimir Markod1908512018-11-22 14:57:28 +0000193 const std::vector<std::string>& boot_class_path,
194 const std::vector<std::string>& boot_class_path_locations,
195 const std::string& image_file_name,
Roland Levillain3887c462015-08-12 18:15:42 +0100196 InstructionSet image_instruction_set,
197 CollectorType foreground_collector_type,
198 CollectorType background_collector_type,
199 space::LargeObjectSpaceType large_object_space_type,
200 size_t large_object_threshold,
201 size_t parallel_gc_threads,
202 size_t conc_gc_threads,
203 bool low_memory_mode,
204 size_t long_pause_threshold,
205 size_t long_gc_threshold,
Hans Boehmc220f982018-10-12 16:15:45 -0700206 bool ignore_target_footprint,
Roland Levillain3887c462015-08-12 18:15:42 +0100207 bool use_tlab,
208 bool verify_pre_gc_heap,
209 bool verify_pre_sweeping_heap,
210 bool verify_post_gc_heap,
211 bool verify_pre_gc_rosalloc,
212 bool verify_pre_sweeping_rosalloc,
213 bool verify_post_gc_rosalloc,
214 bool gc_stress_mode,
Mathieu Chartier56fe2582016-07-14 13:30:03 -0700215 bool measure_gc_performance,
Roland Levillain3887c462015-08-12 18:15:42 +0100216 bool use_homogeneous_space_compaction,
Albert Mingkun Yang0b4d1462018-11-29 13:25:35 +0000217 bool use_generational_cc,
Albert Mingkun Yangde94ea72018-11-16 10:15:49 +0000218 uint64_t min_interval_homogeneous_space_compaction_by_oom,
219 bool dump_region_info_before_gc,
Andreas Gampe86823542019-02-25 09:38:49 -0800220 bool dump_region_info_after_gc,
221 space::ImageSpaceLoadingOrder image_space_loading_order);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700222
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800223 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700224
Brian Carlstroma40f9bc2011-07-26 21:26:07 -0700225 // Allocates and initializes storage for an object instance.
Vladimir Marko991cd5c2019-05-30 14:23:39 +0100226 template <bool kInstrumented = true, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700227 mirror::Object* AllocObject(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700228 ObjPtr<mirror::Class> klass,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700229 size_t num_bytes,
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700230 const PreFenceVisitor& pre_fence_visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700231 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700232 REQUIRES(!*gc_complete_lock_,
233 !*pending_task_lock_,
234 !*backtrace_lock_,
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700235 !Roles::uninterruptible_) {
Vladimir Marko991cd5c2019-05-30 14:23:39 +0100236 return AllocObjectWithAllocator<kInstrumented>(self,
237 klass,
238 num_bytes,
239 GetCurrentAllocator(),
240 pre_fence_visitor);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700241 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800242
Vladimir Marko991cd5c2019-05-30 14:23:39 +0100243 template <bool kInstrumented = true, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700244 mirror::Object* AllocNonMovableObject(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700245 ObjPtr<mirror::Class> klass,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700246 size_t num_bytes,
Hiroshi Yamauchi4cd662e2014-04-03 16:28:10 -0700247 const PreFenceVisitor& pre_fence_visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700248 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700249 REQUIRES(!*gc_complete_lock_,
250 !*pending_task_lock_,
251 !*backtrace_lock_,
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700252 !Roles::uninterruptible_) {
Vladimir Marko991cd5c2019-05-30 14:23:39 +0100253 return AllocObjectWithAllocator<kInstrumented>(self,
254 klass,
255 num_bytes,
256 GetCurrentNonMovingAllocator(),
257 pre_fence_visitor);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700258 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800259
Vladimir Marko991cd5c2019-05-30 14:23:39 +0100260 template <bool kInstrumented = true, bool kCheckLargeObject = true, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700261 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700262 ObjPtr<mirror::Class> klass,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700263 size_t byte_count,
264 AllocatorType allocator,
265 const PreFenceVisitor& pre_fence_visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700266 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700267 REQUIRES(!*gc_complete_lock_,
268 !*pending_task_lock_,
269 !*backtrace_lock_,
Mathieu Chartier4e2cb092015-07-22 16:17:51 -0700270 !Roles::uninterruptible_);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800271
272 AllocatorType GetCurrentAllocator() const {
273 return current_allocator_;
274 }
275
276 AllocatorType GetCurrentNonMovingAllocator() const {
277 return current_non_moving_allocator_;
278 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700279
Mathieu Chartier590fee92013-09-13 13:46:47 -0700280 // Visit all of the live objects in the heap.
Andreas Gampe351c4472017-07-12 19:32:55 -0700281 template <typename Visitor>
282 ALWAYS_INLINE void VisitObjects(Visitor&& visitor)
283 REQUIRES_SHARED(Locks::mutator_lock_)
284 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
285 template <typename Visitor>
286 ALWAYS_INLINE void VisitObjectsPaused(Visitor&& visitor)
287 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
288
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700289 void CheckPreconditionsForAllocObject(ObjPtr<mirror::Class> c, size_t byte_count)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700290 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700291
Hans Boehmc220f982018-10-12 16:15:45 -0700292 // Inform the garbage collector of a non-malloc allocated native memory that might become
293 // reclaimable in the future as a result of Java garbage collection.
Mathieu Chartier90443472015-07-16 20:32:27 -0700294 void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
Richard Uhlerf4eedfe2017-12-29 14:48:42 +0000295 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Richard Uhlercaaa2b02017-02-01 09:54:17 +0000296 void RegisterNativeFree(JNIEnv* env, size_t bytes);
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700297
Hans Boehmc220f982018-10-12 16:15:45 -0700298 // Notify the garbage collector of malloc allocations that might be reclaimable
299 // as a result of Java garbage collection. Each such call represents approximately
300 // kNotifyNativeInterval such allocations.
301 void NotifyNativeAllocations(JNIEnv* env)
302 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
303
304 uint32_t GetNotifyNativeInterval() {
305 return kNotifyNativeInterval;
306 }
307
Mathieu Chartier50482232013-11-21 11:48:14 -0800308 // Change the allocator, updates entrypoints.
Mathieu Chartierd8891782014-03-02 13:28:37 -0800309 void ChangeAllocator(AllocatorType allocator)
Mathieu Chartier90443472015-07-16 20:32:27 -0700310 REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_);
Mathieu Chartier50482232013-11-21 11:48:14 -0800311
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800312 // Change the collector to be one of the possible options (MS, CMS, SS).
Mathieu Chartierd8891782014-03-02 13:28:37 -0800313 void ChangeCollector(CollectorType collector_type)
Mathieu Chartier90443472015-07-16 20:32:27 -0700314 REQUIRES(Locks::mutator_lock_);
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800315
Ian Rogers04d7aa92013-03-16 14:29:17 -0700316 // The given reference is believed to be to an object in the Java heap, check the soundness of it.
Mathieu Chartier4e305412014-02-19 10:54:44 -0800317 // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a
318 // proper lock ordering for it.
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700319 void VerifyObjectBody(ObjPtr<mirror::Object> o) NO_THREAD_SAFETY_ANALYSIS;
Ian Rogers408f79a2011-08-23 18:22:33 -0700320
Ian Rogers04d7aa92013-03-16 14:29:17 -0700321 // Check sanity of all live references.
Mathieu Chartier90443472015-07-16 20:32:27 -0700322 void VerifyHeap() REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartier8ab7e782014-05-19 16:55:27 -0700323 // Returns how many failures occured.
324 size_t VerifyHeapReferences(bool verify_referents = true)
Mathieu Chartier90443472015-07-16 20:32:27 -0700325 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700326 bool VerifyMissingCardMarks()
Mathieu Chartier90443472015-07-16 20:32:27 -0700327 REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700328
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700329 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -0700330 // and doesn't abort on error, allowing the caller to report more
331 // meaningful diagnostics.
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700332 bool IsValidObjectAddress(const void* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700333
Mathieu Chartierd68ac702014-02-11 14:50:51 -0800334 // Faster alternative to IsHeapAddress since finding if an object is in the large object space is
335 // very slow.
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700336 bool IsNonDiscontinuousSpaceHeapAddress(const void* addr) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700337 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800338
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700339 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
340 // Requires the heap lock to be held.
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700341 bool IsLiveObjectLocked(ObjPtr<mirror::Object> obj,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700342 bool search_allocation_stack = true,
343 bool search_live_stack = true,
344 bool sorted = false)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700345 REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
Elliott Hughesa2501992011-08-26 19:39:54 -0700346
Mathieu Chartier590fee92013-09-13 13:46:47 -0700347 // Returns true if there is any chance that the object (obj) will move.
Mathieu Chartier0795f232016-09-27 18:43:30 -0700348 bool IsMovableObject(ObjPtr<mirror::Object> obj) const REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700349
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800350 // Enables us to compacting GC until objects are released.
Mathieu Chartier90443472015-07-16 20:32:27 -0700351 void IncrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
352 void DecrementDisableMovingGC(Thread* self) REQUIRES(!*gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700353
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -0700354 // Temporarily disable thread flip for JNI critical calls.
355 void IncrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
356 void DecrementDisableThreadFlip(Thread* self) REQUIRES(!*thread_flip_lock_);
357 void ThreadFlipBegin(Thread* self) REQUIRES(!*thread_flip_lock_);
358 void ThreadFlipEnd(Thread* self) REQUIRES(!*thread_flip_lock_);
359
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700360 // Clear all of the mark bits, doesn't clear bitmaps which have the same live bits as mark bits.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800361 // Mutator lock is required for GetContinuousSpaces.
362 void ClearMarkedObjects()
363 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700364 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier4aeec172014-03-27 16:09:46 -0700365
Carl Shapiro69759ea2011-07-21 18:13:35 -0700366 // Initiates an explicit garbage collection.
Andreas Gampe94c589d2017-12-27 12:43:01 -0800367 void CollectGarbage(bool clear_soft_references, GcCause cause = kGcCauseExplicit)
Mathieu Chartier90443472015-07-16 20:32:27 -0700368 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700369
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700370 // Does a concurrent GC, should only be called by the GC daemon thread
371 // through runtime.
Mathieu Chartier35b59a22017-04-17 15:24:43 -0700372 void ConcurrentGC(Thread* self, GcCause cause, bool force_full)
Mathieu Chartier90443472015-07-16 20:32:27 -0700373 REQUIRES(!Locks::runtime_shutdown_lock_, !*gc_complete_lock_, !*pending_task_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700374
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800375 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
376 // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700377 void CountInstances(const std::vector<Handle<mirror::Class>>& classes,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700378 bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800379 uint64_t* counts)
Mathieu Chartier90443472015-07-16 20:32:27 -0700380 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700381 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700382
Richard Uhler660be6f2017-11-22 16:12:29 +0000383 // Implements VMDebug.getInstancesOfClasses and JDWP RT_Instances.
Mathieu Chartier2d855952016-10-12 19:37:59 -0700384 void GetInstances(VariableSizedHandleScope& scope,
385 Handle<mirror::Class> c,
Richard Uhler660be6f2017-11-22 16:12:29 +0000386 bool use_is_assignable_from,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700387 int32_t max_count,
Mathieu Chartier2d855952016-10-12 19:37:59 -0700388 std::vector<Handle<mirror::Object>>& instances)
Mathieu Chartier90443472015-07-16 20:32:27 -0700389 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700390 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700391
Elliott Hughes0cbaff52013-01-16 15:28:01 -0800392 // Implements JDWP OR_ReferringObjects.
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -0700393 void GetReferringObjects(VariableSizedHandleScope& scope,
394 Handle<mirror::Object> o,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700395 int32_t max_count,
Mathieu Chartieraea9bfb2016-10-12 19:19:56 -0700396 std::vector<Handle<mirror::Object>>& referring_objects)
Mathieu Chartier90443472015-07-16 20:32:27 -0700397 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700398 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700399
Ian Rogers3bb17a62012-01-27 23:56:44 -0800400 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
401 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800402 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700403
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800404 // Make the current growth limit the new maximum capacity, unmaps pages at the end of spaces
405 // which will never be used. Used to implement dalvik.system.VMRuntime.clampGrowthLimit.
Mathieu Chartier90443472015-07-16 20:32:27 -0700406 void ClampGrowthLimit() REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartier379d09f2015-01-08 11:28:13 -0800407
Ian Rogers30fab402012-01-23 15:43:46 -0800408 // Target ideal heap utilization ratio, implements
409 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700410 double GetTargetHeapUtilization() const {
411 return target_utilization_;
412 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700413
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700414 // Data structure memory usage tracking.
415 void RegisterGCAllocation(size_t bytes);
416 void RegisterGCDeAllocation(size_t bytes);
417
Mathieu Chartier1b54f9c2014-04-30 16:45:02 -0700418 // Set the heap's private space pointers to be the same as the space based on it's type. Public
419 // due to usage by tests.
420 void SetSpaceAsDefault(space::ContinuousSpace* continuous_space)
Mathieu Chartier90443472015-07-16 20:32:27 -0700421 REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800422 void AddSpace(space::Space* space)
423 REQUIRES(!Locks::heap_bitmap_lock_)
424 REQUIRES(Locks::mutator_lock_);
425 void RemoveSpace(space::Space* space)
426 REQUIRES(!Locks::heap_bitmap_lock_)
427 REQUIRES(Locks::mutator_lock_);
Mathieu Chartier73d1e172014-04-11 17:53:48 -0700428
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +0000429 double GetPreGcWeightedAllocatedBytes() const {
430 return pre_gc_weighted_allocated_bytes_;
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +0000431 }
432
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +0000433 double GetPostGcWeightedAllocatedBytes() const {
434 return post_gc_weighted_allocated_bytes_;
435 }
436
437 void CalculatePreGcWeightedAllocatedBytes();
438 void CalculatePostGcWeightedAllocatedBytes();
Albert Mingkun Yangd6e178e2018-11-19 12:58:30 +0000439 uint64_t GetTotalGcCpuTime();
440
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +0000441 uint64_t GetProcessCpuStartTime() const {
442 return process_cpu_start_time_ns_;
443 }
444
Albert Mingkun Yang1c0ae842019-01-11 16:52:24 +0000445 uint64_t GetPostGCLastProcessCpuTime() const {
446 return post_gc_last_process_cpu_time_ns_;
447 }
448
Ian Rogers30fab402012-01-23 15:43:46 -0800449 // Set target ideal heap utilization ratio, implements
450 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700451 void SetTargetHeapUtilization(float target);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800452
453 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
454 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800455 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700456
Mathieu Chartier590fee92013-09-13 13:46:47 -0700457 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
458 // waited for.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700459 collector::GcType WaitForGcToComplete(GcCause cause, Thread* self) REQUIRES(!*gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700460
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800461 // Update the heap's process state to a new value, may cause compaction to occur.
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700462 void UpdateProcessState(ProcessState old_process_state, ProcessState new_process_state)
Mathieu Chartier90443472015-07-16 20:32:27 -0700463 REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800464
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800465 bool HaveContinuousSpaces() const NO_THREAD_SAFETY_ANALYSIS {
466 // No lock since vector empty is thread safe.
467 return !continuous_spaces_.empty();
468 }
469
470 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700471 REQUIRES_SHARED(Locks::mutator_lock_) {
Ian Rogers1d54e732013-05-02 21:10:01 -0700472 return continuous_spaces_;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800473 }
474
Ian Rogers1d54e732013-05-02 21:10:01 -0700475 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
476 return discontinuous_spaces_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700477 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700478
Mathieu Chartier10fb83a2014-06-15 15:15:43 -0700479 const collector::Iteration* GetCurrentGcIteration() const {
480 return &current_gc_iteration_;
481 }
482 collector::Iteration* GetCurrentGcIteration() {
483 return &current_gc_iteration_;
484 }
485
Ian Rogers04d7aa92013-03-16 14:29:17 -0700486 // Enable verification of object references when the runtime is sufficiently initialized.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800487 void EnableObjectValidation() {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800488 verify_object_mode_ = kVerifyObjectSupport;
489 if (verify_object_mode_ > kVerifyObjectModeDisabled) {
Ian Rogers04d7aa92013-03-16 14:29:17 -0700490 VerifyHeap();
491 }
Elliott Hughes85d15452011-09-16 17:33:01 -0700492 }
493
Ian Rogers04d7aa92013-03-16 14:29:17 -0700494 // Disable object reference verification for image writing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800495 void DisableObjectValidation() {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800496 verify_object_mode_ = kVerifyObjectModeDisabled;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700497 }
498
Ian Rogers04d7aa92013-03-16 14:29:17 -0700499 // Other checks may be performed if we know the heap should be in a sane state.
Ian Rogers23435d02012-09-24 11:23:12 -0700500 bool IsObjectValidationEnabled() const {
Mathieu Chartier4e305412014-02-19 10:54:44 -0800501 return verify_object_mode_ > kVerifyObjectModeDisabled;
Ian Rogers23435d02012-09-24 11:23:12 -0700502 }
503
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700504 // Returns true if low memory mode is enabled.
505 bool IsLowMemoryMode() const {
506 return low_memory_mode_;
507 }
508
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -0700509 // Returns the heap growth multiplier, this affects how much we grow the heap after a GC.
510 // Scales heap growth, min free, and max free.
511 double HeapGrowthMultiplier() const;
512
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800513 // Freed bytes can be negative in cases where we copy objects from a compacted space to a
514 // free-list backed space.
Mathieu Chartiere76e70f2014-05-02 16:35:37 -0700515 void RecordFree(uint64_t freed_objects, int64_t freed_bytes);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700516
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700517 // Record the bytes freed by thread-local buffer revoke.
518 void RecordFreeRevoke();
519
Ian Rogers1d54e732013-05-02 21:10:01 -0700520 accounting::CardTable* GetCardTable() const {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700521 return card_table_.get();
Ian Rogers5d76c432011-10-31 21:42:49 -0700522 }
523
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800524 accounting::ReadBarrierTable* GetReadBarrierTable() const {
525 return rb_table_.get();
526 }
527
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700528 void AddFinalizerReference(Thread* self, ObjPtr<mirror::Object>* object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700529
Ian Rogers1d54e732013-05-02 21:10:01 -0700530 // Returns the number of bytes currently allocated.
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700531 // The result should be treated as an approximation, if it is being concurrently updated.
Ian Rogers1d54e732013-05-02 21:10:01 -0700532 size_t GetBytesAllocated() const {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700533 return num_bytes_allocated_.load(std::memory_order_relaxed);
Ian Rogers1d54e732013-05-02 21:10:01 -0700534 }
535
Albert Mingkun Yang0b4d1462018-11-29 13:25:35 +0000536 bool GetUseGenerationalCC() const {
537 return use_generational_cc_;
538 }
539
Ian Rogers1d54e732013-05-02 21:10:01 -0700540 // Returns the number of objects currently allocated.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700541 size_t GetObjectsAllocated() const
542 REQUIRES(!Locks::heap_bitmap_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700543
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700544 // Returns the total number of objects allocated since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700545 uint64_t GetObjectsAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700546
547 // Returns the total number of bytes allocated since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700548 uint64_t GetBytesAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700549
550 // Returns the total number of objects freed since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700551 uint64_t GetObjectsFreedEver() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700552 return total_objects_freed_ever_;
553 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700554
555 // Returns the total number of bytes freed since the heap was created.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700556 uint64_t GetBytesFreedEver() const {
Ian Rogers1d54e732013-05-02 21:10:01 -0700557 return total_bytes_freed_ever_;
558 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700559
Albert Mingkun Yangf9f22f92018-12-14 19:54:34 +0000560 space::RegionSpace* GetRegionSpace() const {
561 return region_space_;
562 }
563
Ian Rogers1d54e732013-05-02 21:10:01 -0700564 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
565 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
566 // were specified. Android apps start with a growth limit (small heap size) which is
567 // cleared/extended for large apps.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800568 size_t GetMaxMemory() const {
Hans Boehmfb8b4e22018-09-05 16:45:42 -0700569 // There are some race conditions in the allocation code that can cause bytes allocated to
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700570 // become larger than growth_limit_ in rare cases.
571 return std::max(GetBytesAllocated(), growth_limit_);
Ian Rogers1d54e732013-05-02 21:10:01 -0700572 }
573
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700574 // Implements java.lang.Runtime.totalMemory, returning approximate amount of memory currently
575 // consumed by an application.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800576 size_t GetTotalMemory() const;
Ian Rogers1d54e732013-05-02 21:10:01 -0700577
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700578 // Returns approximately how much free memory we have until the next GC happens.
579 size_t GetFreeMemoryUntilGC() const {
Hans Boehmc220f982018-10-12 16:15:45 -0700580 return UnsignedDifference(target_footprint_.load(std::memory_order_relaxed),
581 GetBytesAllocated());
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700582 }
583
584 // Returns approximately how much free memory we have until the next OOME happens.
585 size_t GetFreeMemoryUntilOOME() const {
Hans Boehmc220f982018-10-12 16:15:45 -0700586 return UnsignedDifference(growth_limit_, GetBytesAllocated());
Mathieu Chartierdd162fb2014-08-06 17:06:33 -0700587 }
588
589 // Returns how much free memory we have until we need to grow the heap to perform an allocation.
590 // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory.
Ian Rogersef7d42f2014-01-06 12:55:46 -0800591 size_t GetFreeMemory() const {
Hans Boehmc220f982018-10-12 16:15:45 -0700592 return UnsignedDifference(GetTotalMemory(),
593 num_bytes_allocated_.load(std::memory_order_relaxed));
Ian Rogers1d54e732013-05-02 21:10:01 -0700594 }
595
Roland Levillainef012222017-06-21 16:28:06 +0100596 // Get the space that corresponds to an object's address. Current implementation searches all
Ian Rogers1d54e732013-05-02 21:10:01 -0700597 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
598 // TODO: consider using faster data structure like binary tree.
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700599 space::ContinuousSpace* FindContinuousSpaceFromObject(ObjPtr<mirror::Object>, bool fail_ok) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700600 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700601
602 space::ContinuousSpace* FindContinuousSpaceFromAddress(const mirror::Object* addr) const
603 REQUIRES_SHARED(Locks::mutator_lock_);
604
605 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(ObjPtr<mirror::Object>,
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800606 bool fail_ok) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700607 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700608
609 space::Space* FindSpaceFromObject(ObjPtr<mirror::Object> obj, bool fail_ok) const
610 REQUIRES_SHARED(Locks::mutator_lock_);
611
612 space::Space* FindSpaceFromAddress(const void* ptr) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700613 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700614
Roland Levillain5fcf1ea2018-10-30 11:58:08 +0000615 std::string DumpSpaceNameFromAddress(const void* addr) const
616 REQUIRES_SHARED(Locks::mutator_lock_);
617
Richard Uhlercaaa2b02017-02-01 09:54:17 +0000618 void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800619
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800620 // Do a pending collector transition.
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700621 void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Mathieu Chartiera5f9de02014-02-28 16:48:42 -0800622
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800623 // Deflate monitors, ... and trim the spaces.
Mathieu Chartier90443472015-07-16 20:32:27 -0700624 void Trim(Thread* self) REQUIRES(!*gc_complete_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700625
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700626 void RevokeThreadLocalBuffers(Thread* thread);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700627 void RevokeRosAllocThreadLocalBuffers(Thread* thread);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700628 void RevokeAllThreadLocalBuffers();
Ian Rogers68d8b422014-07-17 11:09:10 -0700629 void AssertThreadLocalBuffersAreRevoked(Thread* thread);
Hiroshi Yamauchic93c5302014-03-20 16:15:37 -0700630 void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
Mathieu Chartier6f365cc2014-04-23 12:42:27 -0700631 void RosAllocVerification(TimingLogger* timings, const char* name)
Mathieu Chartier90443472015-07-16 20:32:27 -0700632 REQUIRES(Locks::mutator_lock_);
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -0800633
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700634 accounting::HeapBitmap* GetLiveBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700635 return live_bitmap_.get();
636 }
637
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700638 accounting::HeapBitmap* GetMarkBitmap() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700639 return mark_bitmap_.get();
640 }
641
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700642 accounting::ObjectStack* GetLiveStack() REQUIRES_SHARED(Locks::heap_bitmap_lock_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800643 return live_stack_.get();
644 }
645
Mathieu Chartier590fee92013-09-13 13:46:47 -0700646 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700647
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700648 // Mark and empty stack.
649 void FlushAllocStack()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700650 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700651 REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700652
Hiroshi Yamauchi90d70682014-02-20 16:17:30 -0800653 // Revoke all the thread-local allocation stacks.
Mathieu Chartierc22c59e2014-02-24 15:16:06 -0800654 void RevokeAllThreadLocalAllocationStacks(Thread* self)
Mathieu Chartier90443472015-07-16 20:32:27 -0700655 REQUIRES(Locks::mutator_lock_, !Locks::runtime_shutdown_lock_, !Locks::thread_list_lock_);
Hiroshi Yamauchi90d70682014-02-20 16:17:30 -0800656
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700657 // Mark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartiera8e8f9c2014-04-09 14:51:05 -0700658 // TODO: Refactor?
659 void MarkAllocStack(accounting::SpaceBitmap<kObjectAlignment>* bitmap1,
660 accounting::SpaceBitmap<kObjectAlignment>* bitmap2,
Mathieu Chartierbbd695c2014-04-16 09:48:48 -0700661 accounting::SpaceBitmap<kLargeObjectAlignment>* large_objects,
662 accounting::ObjectStack* stack)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700663 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700664 REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700665
Mathieu Chartier590fee92013-09-13 13:46:47 -0700666 // Mark the specified allocation stack as live.
667 void MarkAllocStackAsLive(accounting::ObjectStack* stack)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700668 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700669 REQUIRES(Locks::heap_bitmap_lock_);
Mathieu Chartier82353312013-07-18 10:47:51 -0700670
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800671 // Unbind any bound bitmaps.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800672 void UnBindBitmaps()
673 REQUIRES(Locks::heap_bitmap_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700674 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800675
Jeff Haodcdc85b2015-12-04 14:06:18 -0800676 // Returns the boot image spaces. There may be multiple boot image spaces.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800677 const std::vector<space::ImageSpace*>& GetBootImageSpaces() const {
678 return boot_image_spaces_;
679 }
680
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700681 bool ObjectIsInBootImageSpace(ObjPtr<mirror::Object> obj) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700682 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800683
Mingyao Yang6ea1a0e2016-01-29 12:12:49 -0800684 bool IsInBootImageOatFile(const void* p) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700685 REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yang6ea1a0e2016-01-29 12:12:49 -0800686
Vladimir Marko1aff1ef2019-07-02 15:12:50 +0100687 // Get the start address of the boot images if any; otherwise returns 0.
Vladimir Marko7cde4582019-07-05 13:26:11 +0100688 uint32_t GetBootImagesStartAddress() const {
689 return boot_images_start_address_;
690 }
Vladimir Marko1aff1ef2019-07-02 15:12:50 +0100691
692 // Get the size of all boot images, including the heap and oat areas.
Vladimir Marko7cde4582019-07-05 13:26:11 +0100693 uint32_t GetBootImagesSize() const {
694 return boot_images_size_;
695 }
696
697 // Check if a pointer points to a boot image.
698 bool IsBootImageAddress(const void* p) const {
699 return reinterpret_cast<uintptr_t>(p) - boot_images_start_address_ < boot_images_size_;
700 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700701
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800702 space::DlMallocSpace* GetDlMallocSpace() const {
703 return dlmalloc_space_;
704 }
705
706 space::RosAllocSpace* GetRosAllocSpace() const {
707 return rosalloc_space_;
708 }
709
Zuo Wangf37a88b2014-07-10 04:26:41 -0700710 // Return the corresponding rosalloc space.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -0800711 space::RosAllocSpace* GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700712 REQUIRES_SHARED(Locks::mutator_lock_);
Zuo Wangf37a88b2014-07-10 04:26:41 -0700713
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700714 space::MallocSpace* GetNonMovingSpace() const {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700715 return non_moving_space_;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700716 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700717
718 space::LargeObjectSpace* GetLargeObjectsSpace() const {
719 return large_object_space_;
720 }
721
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800722 // Returns the free list space that may contain movable objects (the
723 // one that's not the non-moving space), either rosalloc_space_ or
724 // dlmalloc_space_.
725 space::MallocSpace* GetPrimaryFreeListSpace() {
726 if (kUseRosAlloc) {
727 DCHECK(rosalloc_space_ != nullptr);
728 // reinterpret_cast is necessary as the space class hierarchy
729 // isn't known (#included) yet here.
730 return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
731 } else {
732 DCHECK(dlmalloc_space_ != nullptr);
733 return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
734 }
735 }
736
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700737 void DumpSpaces(std::ostream& stream) const REQUIRES_SHARED(Locks::mutator_lock_);
738 std::string DumpSpaces() const REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughesf8349362012-06-18 15:00:06 -0700739
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700740 // GC performance measuring
Mathieu Chartier5d2a3f72016-05-11 11:35:39 -0700741 void DumpGcPerformanceInfo(std::ostream& os)
Richard Uhlercaaa2b02017-02-01 09:54:17 +0000742 REQUIRES(!*gc_complete_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700743 void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700744
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700745 // Thread pool.
746 void CreateThreadPool();
747 void DeleteThreadPool();
748 ThreadPool* GetThreadPool() {
749 return thread_pool_.get();
750 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700751 size_t GetParallelGCThreadCount() const {
752 return parallel_gc_threads_;
753 }
754 size_t GetConcGCThreadCount() const {
755 return conc_gc_threads_;
756 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700757 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
758 void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700759
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800760 accounting::RememberedSet* FindRememberedSetFromSpace(space::Space* space);
761 void AddRememberedSet(accounting::RememberedSet* remembered_set);
Mathieu Chartier5189e242014-07-24 11:11:05 -0700762 // Also deletes the remebered set.
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -0800763 void RemoveRememberedSet(space::Space* space);
764
Mathieu Chartier590fee92013-09-13 13:46:47 -0700765 bool IsCompilingBoot() const;
Jeff Haodcdc85b2015-12-04 14:06:18 -0800766 bool HasBootImageSpace() const {
767 return !boot_image_spaces_.empty();
Mathieu Chartier073b16c2015-11-10 14:13:23 -0800768 }
Mathieu Chartier590fee92013-09-13 13:46:47 -0700769
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700770 ReferenceProcessor* GetReferenceProcessor() {
Mathieu Chartier3cf22532015-07-09 15:15:09 -0700771 return reference_processor_.get();
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700772 }
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800773 TaskProcessor* GetTaskProcessor() {
774 return task_processor_.get();
775 }
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -0700776
Mathieu Chartiere4cab172014-08-19 18:24:04 -0700777 bool HasZygoteSpace() const {
778 return zygote_space_ != nullptr;
779 }
780
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700781 // Returns the active concurrent copying collector.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800782 collector::ConcurrentCopying* ConcurrentCopyingCollector() {
Albert Mingkun Yang0b4d1462018-11-29 13:25:35 +0000783 if (use_generational_cc_) {
Mathieu Chartier8d1a9962016-08-17 16:39:45 -0700784 DCHECK((active_concurrent_copying_collector_ == concurrent_copying_collector_) ||
785 (active_concurrent_copying_collector_ == young_concurrent_copying_collector_));
786 } else {
787 DCHECK_EQ(active_concurrent_copying_collector_, concurrent_copying_collector_);
788 }
789 return active_concurrent_copying_collector_;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800790 }
791
792 CollectorType CurrentCollectorType() {
793 return collector_type_;
794 }
795
796 bool IsGcConcurrentAndMoving() const {
797 if (IsGcConcurrent() && IsMovingGc(collector_type_)) {
798 // Assume no transition when a concurrent moving collector is used.
799 DCHECK_EQ(collector_type_, foreground_collector_type_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800800 return true;
801 }
802 return false;
803 }
804
Mathieu Chartier90443472015-07-16 20:32:27 -0700805 bool IsMovingGCDisabled(Thread* self) REQUIRES(!*gc_complete_lock_) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800806 MutexLock mu(self, *gc_complete_lock_);
Mathieu Chartierd6b17d42017-02-17 12:50:39 -0800807 return disable_moving_gc_count_ > 0;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800808 }
809
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800810 // Request an asynchronous trim.
Mathieu Chartier90443472015-07-16 20:32:27 -0700811 void RequestTrim(Thread* self) REQUIRES(!*pending_task_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800812
813 // Request asynchronous GC.
Mathieu Chartier35b59a22017-04-17 15:24:43 -0700814 void RequestConcurrentGC(Thread* self, GcCause cause, bool force_full)
815 REQUIRES(!*pending_task_lock_);
Mathieu Chartier079101a2014-12-15 14:23:10 -0800816
Mathieu Chartierdfe30832015-03-06 15:28:34 -0800817 // Whether or not we may use a garbage collector, used so that we only create collectors we need.
818 bool MayUseCollector(CollectorType type) const;
819
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700820 // Used by tests to reduce timinig-dependent flakiness in OOME behavior.
821 void SetMinIntervalHomogeneousSpaceCompactionByOom(uint64_t interval) {
822 min_interval_homogeneous_space_compaction_by_oom_ = interval;
823 }
824
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700825 // Helpers for android.os.Debug.getRuntimeStat().
826 uint64_t GetGcCount() const;
827 uint64_t GetGcTime() const;
828 uint64_t GetBlockingGcCount() const;
829 uint64_t GetBlockingGcTime() const;
Mathieu Chartier90443472015-07-16 20:32:27 -0700830 void DumpGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
831 void DumpBlockingGcCountRateHistogram(std::ostream& os) const REQUIRES(!*gc_complete_lock_);
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -0700832
Man Cao8c2ff642015-05-27 17:25:30 -0700833 // Allocation tracking support
834 // Callers to this function use double-checked locking to ensure safety on allocation_records_
835 bool IsAllocTrackingEnabled() const {
Orion Hodson88591fe2018-03-06 13:35:43 +0000836 return alloc_tracking_enabled_.load(std::memory_order_relaxed);
Man Cao8c2ff642015-05-27 17:25:30 -0700837 }
838
Mathieu Chartier90443472015-07-16 20:32:27 -0700839 void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) {
Orion Hodson88591fe2018-03-06 13:35:43 +0000840 alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed);
Man Cao8c2ff642015-05-27 17:25:30 -0700841 }
842
Mathieu Chartier0a206072019-03-28 12:29:22 -0700843 // Return the current stack depth of allocation records.
844 size_t GetAllocTrackerStackDepth() const {
845 return alloc_record_depth_;
846 }
847
848 // Return the current stack depth of allocation records.
849 void SetAllocTrackerStackDepth(size_t alloc_record_depth) {
850 alloc_record_depth_ = alloc_record_depth;
851 }
852
853 AllocRecordObjectMap* GetAllocationRecords() const REQUIRES(Locks::alloc_tracker_lock_) {
Man Cao8c2ff642015-05-27 17:25:30 -0700854 return allocation_records_.get();
855 }
856
857 void SetAllocationRecords(AllocRecordObjectMap* records)
Mathieu Chartier90443472015-07-16 20:32:27 -0700858 REQUIRES(Locks::alloc_tracker_lock_);
Man Cao8c2ff642015-05-27 17:25:30 -0700859
Man Cao1ed11b92015-06-11 22:47:35 -0700860 void VisitAllocationRecords(RootVisitor* visitor) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700861 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700862 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao1ed11b92015-06-11 22:47:35 -0700863
Mathieu Chartier97509952015-07-13 14:35:43 -0700864 void SweepAllocationRecords(IsMarkedVisitor* visitor) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700865 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700866 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700867
868 void DisallowNewAllocationRecords() const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700869 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700870 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700871
872 void AllowNewAllocationRecords() const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700873 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700874 REQUIRES(!Locks::alloc_tracker_lock_);
Man Cao42c3c332015-06-23 16:38:25 -0700875
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700876 void BroadcastForNewAllocationRecords() const
Hiroshi Yamauchifdbd13c2015-09-02 16:16:58 -0700877 REQUIRES(!Locks::alloc_tracker_lock_);
878
Mathieu Chartier51168372015-08-12 16:40:32 -0700879 void DisableGCForShutdown() REQUIRES(!*gc_complete_lock_);
880
Mathieu Chartierdb00eaf2015-08-31 17:10:05 -0700881 // Create a new alloc space and compact default alloc space to it.
882 HomogeneousSpaceCompactResult PerformHomogeneousSpaceCompact() REQUIRES(!*gc_complete_lock_);
883 bool SupportHomogeneousSpaceCompactAndCollectorTransitions() const;
884
Andreas Gampe27fa96c2016-10-07 15:05:24 -0700885 // Install an allocation listener.
886 void SetAllocationListener(AllocationListener* l);
887 // Remove an allocation listener. Note: the listener must not be deleted, as for performance
888 // reasons, we assume it stays valid when we read it (so that we don't require a lock).
889 void RemoveAllocationListener();
890
Andreas Gampe9b8c5882016-10-21 15:27:46 -0700891 // Install a gc pause listener.
892 void SetGcPauseListener(GcPauseListener* l);
893 // Get the currently installed gc pause listener, or null.
894 GcPauseListener* GetGcPauseListener() {
Orion Hodson88591fe2018-03-06 13:35:43 +0000895 return gc_pause_listener_.load(std::memory_order_acquire);
Andreas Gampe9b8c5882016-10-21 15:27:46 -0700896 }
897 // Remove a gc pause listener. Note: the listener must not be deleted, as for performance
898 // reasons, we assume it stays valid when we read it (so that we don't require a lock).
899 void RemoveGcPauseListener();
900
Mathieu Chartier1ca68902017-04-18 11:26:22 -0700901 const Verification* GetVerification() const;
902
Mathieu Chartiera98a2822017-05-24 16:14:10 -0700903 void PostForkChildAction(Thread* self);
904
Lokesh Gidraea5b4292019-08-08 16:27:21 -0700905 void TraceHeapSize(size_t heap_size);
906
Carl Shapiro58551df2011-07-24 03:09:51 -0700907 private:
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800908 class ConcurrentGCTask;
909 class CollectorTransitionTask;
910 class HeapTrimTask;
Mathieu Chartiera98a2822017-05-24 16:14:10 -0700911 class TriggerPostForkCCGcTask;
Mathieu Chartiera5eae692014-12-17 17:56:03 -0800912
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -0800913 // Compact source space to target space. Returns the collector used.
914 collector::GarbageCollector* Compact(space::ContinuousMemMapAllocSpace* target_space,
915 space::ContinuousMemMapAllocSpace* source_space,
916 GcCause gc_cause)
Mathieu Chartier90443472015-07-16 20:32:27 -0700917 REQUIRES(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700918
Hiroshi Yamauchie4d99872015-02-26 12:53:45 -0800919 void LogGC(GcCause gc_cause, collector::GarbageCollector* collector);
Mathieu Chartieraa516822015-10-02 15:53:37 -0700920 void StartGC(Thread* self, GcCause cause, CollectorType collector_type)
921 REQUIRES(!*gc_complete_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700922 void FinishGC(Thread* self, collector::GcType gc_type) REQUIRES(!*gc_complete_lock_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800923
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +0000924 double CalculateGcWeightedAllocatedBytes(uint64_t gc_last_process_cpu_time_ns,
925 uint64_t current_process_cpu_time) const;
926
Mathieu Chartierb363f662014-07-16 13:28:58 -0700927 // Create a mem map with a preferred base address.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100928 static MemMap MapAnonymousPreferredAddress(const char* name,
929 uint8_t* request_begin,
930 size_t capacity,
931 std::string* out_error_str);
Mathieu Chartierb363f662014-07-16 13:28:58 -0700932
Zuo Wangf37a88b2014-07-10 04:26:41 -0700933 bool SupportHSpaceCompaction() const {
Mathieu Chartierb363f662014-07-16 13:28:58 -0700934 // Returns true if we can do hspace compaction
Zuo Wangf37a88b2014-07-10 04:26:41 -0700935 return main_space_backup_ != nullptr;
936 }
937
Hans Boehm7c73dd12019-02-06 00:20:18 +0000938 // Size_t saturating arithmetic
Hans Boehmc220f982018-10-12 16:15:45 -0700939 static ALWAYS_INLINE size_t UnsignedDifference(size_t x, size_t y) {
940 return x > y ? x - y : 0;
941 }
Hans Boehm7c73dd12019-02-06 00:20:18 +0000942 static ALWAYS_INLINE size_t UnsignedSum(size_t x, size_t y) {
943 return x + y >= x ? x + y : std::numeric_limits<size_t>::max();
944 }
Hans Boehmc220f982018-10-12 16:15:45 -0700945
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800946 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
947 return
Hans Boehmc220f982018-10-12 16:15:45 -0700948 allocator_type != kAllocatorTypeRegionTLAB &&
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800949 allocator_type != kAllocatorTypeBumpPointer &&
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800950 allocator_type != kAllocatorTypeTLAB &&
Hans Boehmc220f982018-10-12 16:15:45 -0700951 allocator_type != kAllocatorTypeRegion;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800952 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800953 static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
Mathieu Chartier5ace2012016-11-30 10:15:41 -0800954 if (kUseReadBarrier) {
955 // Read barrier may have the TLAB allocator but is always concurrent. TODO: clean this up.
956 return true;
957 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800958 return
Hans Boehmc220f982018-10-12 16:15:45 -0700959 allocator_type != kAllocatorTypeTLAB &&
960 allocator_type != kAllocatorTypeBumpPointer;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800961 }
Mathieu Chartier31f44142014-04-08 14:40:03 -0700962 static bool IsMovingGc(CollectorType collector_type) {
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700963 return
Hans Boehmc220f982018-10-12 16:15:45 -0700964 collector_type == kCollectorTypeCC ||
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700965 collector_type == kCollectorTypeSS ||
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -0700966 collector_type == kCollectorTypeCCBackground ||
Zuo Wangf37a88b2014-07-10 04:26:41 -0700967 collector_type == kCollectorTypeHomogeneousSpaceCompact;
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800968 }
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700969 bool ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_count) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700970 REQUIRES_SHARED(Locks::mutator_lock_);
Hans Boehmc220f982018-10-12 16:15:45 -0700971
972 // Checks whether we should garbage collect:
973 ALWAYS_INLINE bool ShouldConcurrentGCForJava(size_t new_num_bytes_allocated);
Hans Boehm7c73dd12019-02-06 00:20:18 +0000974 float NativeMemoryOverTarget(size_t current_native_bytes, bool is_gc_concurrent);
Hans Boehmc220f982018-10-12 16:15:45 -0700975 ALWAYS_INLINE void CheckConcurrentGCForJava(Thread* self,
976 size_t new_num_bytes_allocated,
977 ObjPtr<mirror::Object>* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700978 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700979 REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
Hans Boehm7c73dd12019-02-06 00:20:18 +0000980 void CheckGCForNative(Thread* self)
Hans Boehmc220f982018-10-12 16:15:45 -0700981 REQUIRES(!*pending_task_lock_, !*gc_complete_lock_);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700982
Mathieu Chartiereb8167a2014-05-07 15:43:14 -0700983 accounting::ObjectStack* GetMarkStack() {
984 return mark_stack_.get();
985 }
986
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800987 // We don't force this to be inlined since it is a slow path.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800988 template <bool kInstrumented, typename PreFenceVisitor>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700989 mirror::Object* AllocLargeObject(Thread* self,
Mathieu Chartier9d156d52016-10-06 17:44:26 -0700990 ObjPtr<mirror::Class>* klass,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700991 size_t byte_count,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800992 const PreFenceVisitor& pre_fence_visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700993 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -0700994 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800995
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700996 // Handles Allocate()'s slow allocation path with GC involved after
997 // an initial allocation attempt failed.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -0700998 mirror::Object* AllocateInternalWithGc(Thread* self,
999 AllocatorType allocator,
Mathieu Chartiereebc3af2016-02-29 18:13:38 -08001000 bool instrumented,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001001 size_t num_bytes,
1002 size_t* bytes_allocated,
1003 size_t* usable_size,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001004 size_t* bytes_tl_bulk_allocated,
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001005 ObjPtr<mirror::Class>* klass)
Mathieu Chartier90443472015-07-16 20:32:27 -07001006 REQUIRES(!Locks::thread_suspend_count_lock_, !*gc_complete_lock_, !*pending_task_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001007 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiera6399032012-06-11 18:49:50 -07001008
Mathieu Chartier590fee92013-09-13 13:46:47 -07001009 // Allocate into a specific space.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001010 mirror::Object* AllocateInto(Thread* self,
1011 space::AllocSpace* space,
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001012 ObjPtr<mirror::Class> c,
Mathieu Chartier590fee92013-09-13 13:46:47 -07001013 size_t bytes)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001014 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001015
Mathieu Chartier31f44142014-04-08 14:40:03 -07001016 // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
1017 // wrong space.
Mathieu Chartier90443472015-07-16 20:32:27 -07001018 void SwapSemiSpaces() REQUIRES(Locks::mutator_lock_);
Mathieu Chartier31f44142014-04-08 14:40:03 -07001019
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001020 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
1021 // that the switch statement is constant optimized in the entrypoints.
Mathieu Chartierc528dba2013-11-26 12:00:11 -08001022 template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001023 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self,
1024 AllocatorType allocator_type,
1025 size_t alloc_size,
1026 size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001027 size_t* usable_size,
1028 size_t* bytes_tl_bulk_allocated)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001029 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001030
Mathieu Chartier5ace2012016-11-30 10:15:41 -08001031 mirror::Object* AllocWithNewTLAB(Thread* self,
1032 size_t alloc_size,
1033 bool grow,
1034 size_t* bytes_allocated,
1035 size_t* usable_size,
1036 size_t* bytes_tl_bulk_allocated)
1037 REQUIRES_SHARED(Locks::mutator_lock_);
1038
Hiroshi Yamauchi654dd482014-07-09 12:54:32 -07001039 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, AllocatorType allocator_type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001040 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001041
Hans Boehmc220f982018-10-12 16:15:45 -07001042 // Are we out of memory, and thus should force a GC or fail?
1043 // For concurrent collectors, out of memory is defined by growth_limit_.
1044 // For nonconcurrent collectors it is defined by target_footprint_ unless grow is
1045 // set. If grow is set, the limit is growth_limit_ and we adjust target_footprint_
1046 // to accomodate the allocation.
Mathieu Chartier5ace2012016-11-30 10:15:41 -08001047 ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type,
1048 size_t alloc_size,
1049 bool grow);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -07001050
Mathieu Chartierb5de3bb2015-06-05 13:21:05 -07001051 // Run the finalizers. If timeout is non zero, then we use the VMRuntime version.
1052 void RunFinalization(JNIEnv* env, uint64_t timeout);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001053
1054 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
1055 // waited for.
Mathieu Chartier89a201e2014-05-02 10:27:26 -07001056 collector::GcType WaitForGcToCompleteLocked(GcCause cause, Thread* self)
Mathieu Chartier90443472015-07-16 20:32:27 -07001057 REQUIRES(gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001058
Mathieu Chartier7bf52d22014-03-13 14:46:09 -07001059 void RequestCollectorTransition(CollectorType desired_collector_type, uint64_t delta_time)
Mathieu Chartier90443472015-07-16 20:32:27 -07001060 REQUIRES(!*pending_task_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001061
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001062 void RequestConcurrentGCAndSaveObject(Thread* self, bool force_full, ObjPtr<mirror::Object>* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001063 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001064 REQUIRES(!*pending_task_lock_);
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001065 bool IsGCRequestPending() const;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001066
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001067 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
1068 // which type of Gc was actually ran.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001069 collector::GcType CollectGarbageInternal(collector::GcType gc_plan,
1070 GcCause gc_cause,
Ian Rogers1d54e732013-05-02 21:10:01 -07001071 bool clear_soft_references)
Mathieu Chartier90443472015-07-16 20:32:27 -07001072 REQUIRES(!*gc_complete_lock_, !Locks::heap_bitmap_lock_, !Locks::thread_suspend_count_lock_,
1073 !*pending_task_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -08001074
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001075 void PreGcVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -07001076 REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001077 void PreGcVerificationPaused(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -07001078 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001079 void PrePauseRosAllocVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -07001080 REQUIRES(Locks::mutator_lock_);
Ian Rogers1d54e732013-05-02 21:10:01 -07001081 void PreSweepingGcVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -07001082 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
Mathieu Chartierad2541a2013-10-25 10:05:23 -07001083 void PostGcVerification(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -07001084 REQUIRES(!Locks::mutator_lock_, !*gc_complete_lock_);
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001085 void PostGcVerificationPaused(collector::GarbageCollector* gc)
Mathieu Chartier90443472015-07-16 20:32:27 -07001086 REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001087
Mathieu Chartierafe49982014-03-27 10:55:04 -07001088 // Find a collector based on GC type.
1089 collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
1090
Zuo Wangf37a88b2014-07-10 04:26:41 -07001091 // Create the main free list malloc space, either a RosAlloc space or DlMalloc space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001092 void CreateMainMallocSpace(MemMap&& mem_map,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001093 size_t initial_size,
1094 size_t growth_limit,
Mathieu Chartier31f44142014-04-08 14:40:03 -07001095 size_t capacity);
1096
Zuo Wangf37a88b2014-07-10 04:26:41 -07001097 // Create a malloc space based on a mem map. Does not set the space as default.
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001098 space::MallocSpace* CreateMallocSpaceFromMemMap(MemMap&& mem_map,
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001099 size_t initial_size,
1100 size_t growth_limit,
1101 size_t capacity,
1102 const char* name,
1103 bool can_move_objects);
Zuo Wangf37a88b2014-07-10 04:26:41 -07001104
Ian Rogers3bb17a62012-01-27 23:56:44 -08001105 // Given the current contents of the alloc space, increase the allowed heap footprint to match
1106 // the target utilization ratio. This should only be called immediately after a full garbage
Mathieu Chartiere2c2f6e2014-12-16 18:49:31 -08001107 // collection. bytes_allocated_before_gc is used to measure bytes / second for the period which
1108 // the GC was run.
1109 void GrowForUtilization(collector::GarbageCollector* collector_ran,
Hans Boehmc220f982018-10-12 16:15:45 -07001110 size_t bytes_allocated_before_gc = 0);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001111
Mathieu Chartier637e3482012-08-17 10:41:32 -07001112 size_t GetPercentFree();
Elliott Hughesc967f782012-04-16 10:23:15 -07001113
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001114 // Swap the allocation stack with the live stack.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001115 void SwapStacks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001116
Lei Li4add3b42015-01-15 11:55:26 +08001117 // Clear cards and update the mod union table. When process_alloc_space_cards is true,
1118 // if clear_alloc_space_cards is true, then we clear cards instead of ageing them. We do
1119 // not process the alloc space if process_alloc_space_cards is false.
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001120 void ProcessCards(TimingLogger* timings,
1121 bool use_rem_sets,
1122 bool process_alloc_space_cards,
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001123 bool clear_alloc_space_cards)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001124 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -07001125
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001126 // Push an object onto the allocation stack.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001127 void PushOnAllocationStack(Thread* self, ObjPtr<mirror::Object>* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001128 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001129 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001130 void PushOnAllocationStackWithInternalGC(Thread* self, ObjPtr<mirror::Object>* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001131 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001132 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001133 void PushOnThreadLocalAllocationStackWithInternalGC(Thread* thread, ObjPtr<mirror::Object>* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001134 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartiera4f6af92015-08-11 17:35:25 -07001135 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -08001136
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001137 void ClearConcurrentGCRequest();
Mathieu Chartier90443472015-07-16 20:32:27 -07001138 void ClearPendingTrim(Thread* self) REQUIRES(!*pending_task_lock_);
1139 void ClearPendingCollectorTransition(Thread* self) REQUIRES(!*pending_task_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001140
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001141 // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
1142 // sweep GC, false for other GC types.
1143 bool IsGcConcurrent() const ALWAYS_INLINE {
Hans Boehmc220f982018-10-12 16:15:45 -07001144 return collector_type_ == kCollectorTypeCC ||
1145 collector_type_ == kCollectorTypeCMS ||
Hiroshi Yamauchi60985b72016-08-24 13:53:12 -07001146 collector_type_ == kCollectorTypeCCBackground;
Hiroshi Yamauchi3e417802014-03-20 12:03:02 -07001147 }
1148
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001149 // Trim the managed and native spaces by releasing unused memory back to the OS.
Mathieu Chartier90443472015-07-16 20:32:27 -07001150 void TrimSpaces(Thread* self) REQUIRES(!*gc_complete_lock_);
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001151
1152 // Trim 0 pages at the end of reference tables.
1153 void TrimIndirectReferenceTables(Thread* self);
1154
Andreas Gampe351c4472017-07-12 19:32:55 -07001155 template <typename Visitor>
1156 ALWAYS_INLINE void VisitObjectsInternal(Visitor&& visitor)
1157 REQUIRES_SHARED(Locks::mutator_lock_)
1158 REQUIRES(!Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1159 template <typename Visitor>
1160 ALWAYS_INLINE void VisitObjectsInternalRegionSpace(Visitor&& visitor)
1161 REQUIRES(Locks::mutator_lock_, !Locks::heap_bitmap_lock_, !*gc_complete_lock_);
1162
Mathieu Chartier90443472015-07-16 20:32:27 -07001163 void UpdateGcCountRateHistograms() REQUIRES(gc_complete_lock_);
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001164
Mathieu Chartier31000802015-06-14 14:14:37 -07001165 // GC stress mode attempts to do one GC per unique backtrace.
Mathieu Chartier9d156d52016-10-06 17:44:26 -07001166 void CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -07001167 REQUIRES_SHARED(Locks::mutator_lock_)
Mathieu Chartier90443472015-07-16 20:32:27 -07001168 REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
Mathieu Chartier31000802015-06-14 14:14:37 -07001169
Richard Uhlercaaa2b02017-02-01 09:54:17 +00001170 collector::GcType NonStickyGcType() const {
1171 return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
1172 }
1173
Hans Boehmc220f982018-10-12 16:15:45 -07001174 // Return the amount of space we allow for native memory when deciding whether to
1175 // collect. We collect when a weighted sum of Java memory plus native memory exceeds
1176 // the similarly weighted sum of the Java heap size target and this value.
Richard Uhlercaaa2b02017-02-01 09:54:17 +00001177 ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
Hans Boehm15752672018-12-18 17:01:00 -08001178 // We keep the traditional limit of max_free_ in place for small heaps,
1179 // but allow it to be adjusted upward for large heaps to limit GC overhead.
1180 return target_footprint_.load(std::memory_order_relaxed) / 8 + max_free_;
Richard Uhlercaaa2b02017-02-01 09:54:17 +00001181 }
1182
Orion Hodson82cf9a22018-03-27 16:36:32 +01001183 ALWAYS_INLINE void IncrementNumberOfBytesFreedRevoke(size_t freed_bytes_revoke);
1184
Andreas Gampe170331f2017-12-07 18:41:03 -08001185 // Remove a vlog code from heap-inl.h which is transitively included in half the world.
1186 static void VlogHeapGrowth(size_t max_allowed_footprint, size_t new_footprint, size_t alloc_size);
1187
Hans Boehmc220f982018-10-12 16:15:45 -07001188 // Return our best approximation of the number of bytes of native memory that
1189 // are currently in use, and could possibly be reclaimed as an indirect result
1190 // of a garbage collection.
1191 size_t GetNativeBytes();
1192
Ian Rogers1d54e732013-05-02 21:10:01 -07001193 // All-known continuous spaces, where objects lie within fixed bounds.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001194 std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001195
Ian Rogers1d54e732013-05-02 21:10:01 -07001196 // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
Mathieu Chartiera9d82fe2016-01-25 20:06:11 -08001197 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001198
Mathieu Chartier590fee92013-09-13 13:46:47 -07001199 // All-known alloc spaces, where objects may be or have been allocated.
1200 std::vector<space::AllocSpace*> alloc_spaces_;
1201
1202 // A space where non-movable objects are allocated, when compaction is enabled it contains
1203 // Classes, ArtMethods, ArtFields, and non moving objects.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -07001204 space::MallocSpace* non_moving_space_;
Ian Rogers1d54e732013-05-02 21:10:01 -07001205
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001206 // Space which we use for the kAllocatorTypeROSAlloc.
1207 space::RosAllocSpace* rosalloc_space_;
1208
1209 // Space which we use for the kAllocatorTypeDlMalloc.
1210 space::DlMallocSpace* dlmalloc_space_;
1211
Mathieu Chartierfc5b5282014-01-09 16:15:36 -08001212 // The main space is the space which the GC copies to and from on process state updates. This
1213 // space is typically either the dlmalloc_space_ or the rosalloc_space_.
1214 space::MallocSpace* main_space_;
1215
Ian Rogers1d54e732013-05-02 21:10:01 -07001216 // The large object space we are currently allocating into.
1217 space::LargeObjectSpace* large_object_space_;
1218
1219 // The card table, dirtied by the write barrier.
Ian Rogers700a4022014-05-19 16:49:03 -07001220 std::unique_ptr<accounting::CardTable> card_table_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -07001221
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001222 std::unique_ptr<accounting::ReadBarrierTable> rb_table_;
1223
Mathieu Chartier11409ae2013-09-23 11:49:36 -07001224 // A mod-union table remembers all of the references from the it's space to other spaces.
Mathieu Chartierbad02672014-08-25 13:08:22 -07001225 AllocationTrackingSafeMap<space::Space*, accounting::ModUnionTable*, kAllocatorTagHeap>
1226 mod_union_tables_;
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001227
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001228 // A remembered set remembers all of the references from the it's space to the target space.
Mathieu Chartierbad02672014-08-25 13:08:22 -07001229 AllocationTrackingSafeMap<space::Space*, accounting::RememberedSet*, kAllocatorTagHeap>
1230 remembered_sets_;
Hiroshi Yamauchi38e68e92014-03-07 13:59:08 -08001231
Mathieu Chartier0de9f732013-11-22 17:58:48 -08001232 // The current collector type.
1233 CollectorType collector_type_;
Mathieu Chartier31f44142014-04-08 14:40:03 -07001234 // Which collector we use when the app is in the foreground.
1235 CollectorType foreground_collector_type_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -08001236 // Which collector we will use when the app is notified of a transition to background.
1237 CollectorType background_collector_type_;
Mathieu Chartiera5f9de02014-02-28 16:48:42 -08001238 // Desired collector type, heap trimming daemon transitions the heap if it is != collector_type_.
1239 CollectorType desired_collector_type_;
1240
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001241 // Lock which guards pending tasks.
1242 Mutex* pending_task_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001243
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001244 // How many GC threads we may use for paused parts of garbage collection.
1245 const size_t parallel_gc_threads_;
1246
1247 // How many GC threads we may use for unpaused parts of garbage collection.
1248 const size_t conc_gc_threads_;
Mathieu Chartier63a54342013-07-23 13:17:59 -07001249
Mathieu Chartiere0a53e92013-08-05 10:17:40 -07001250 // Boolean for if we are in low memory mode.
1251 const bool low_memory_mode_;
1252
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001253 // If we get a pause longer than long pause log threshold, then we print out the GC after it
1254 // finishes.
1255 const size_t long_pause_log_threshold_;
1256
1257 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
1258 const size_t long_gc_log_threshold_;
1259
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +00001260 // Starting time of the new process; meant to be used for measuring total process CPU time.
1261 uint64_t process_cpu_start_time_ns_;
1262
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +00001263 // Last time (before and after) GC started; meant to be used to measure the
1264 // duration between two GCs.
1265 uint64_t pre_gc_last_process_cpu_time_ns_;
1266 uint64_t post_gc_last_process_cpu_time_ns_;
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +00001267
Albert Mingkun Yang6e0d3252018-12-10 15:22:45 +00001268 // allocated_bytes * (current_process_cpu_time - [pre|post]_gc_last_process_cpu_time)
1269 double pre_gc_weighted_allocated_bytes_;
1270 double post_gc_weighted_allocated_bytes_;
Albert Mingkun Yang2d7329b2018-11-30 19:58:18 +00001271
Hans Boehmc220f982018-10-12 16:15:45 -07001272 // If we ignore the target footprint it lets the heap grow until it hits the heap capacity, this
1273 // is useful for benchmarking since it reduces time spent in GC to a low %.
1274 const bool ignore_target_footprint_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001275
Mathieu Chartier8e4a96d2014-05-21 10:44:32 -07001276 // Lock which guards zygote space creation.
1277 Mutex zygote_creation_lock_;
1278
Mathieu Chartiere4cab172014-08-19 18:24:04 -07001279 // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
1280 // zygote space creation.
1281 space::ZygoteSpace* zygote_space_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001282
Mathieu Chartierbd0a6532014-02-27 11:14:21 -08001283 // Minimum allocation size of large object.
1284 size_t large_object_threshold_;
1285
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001286 // Guards access to the state of GC, associated conditional variable is used to signal when a GC
1287 // completes.
1288 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Ian Rogers700a4022014-05-19 16:49:03 -07001289 std::unique_ptr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001290
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -07001291 // Used to synchronize between JNI critical calls and the thread flip of the CC collector.
1292 Mutex* thread_flip_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1293 std::unique_ptr<ConditionVariable> thread_flip_cond_ GUARDED_BY(thread_flip_lock_);
Hiroshi Yamauchi20a0be02016-02-19 15:44:06 -08001294 // This counter keeps track of how many threads are currently in a JNI critical section. This is
1295 // incremented once per thread even with nested enters.
Hiroshi Yamauchi76f55b02015-08-21 16:10:39 -07001296 size_t disable_thread_flip_count_ GUARDED_BY(thread_flip_lock_);
1297 bool thread_flip_running_ GUARDED_BY(thread_flip_lock_);
1298
Mathieu Chartier78f7b4c2014-05-06 10:57:27 -07001299 // Reference processor;
Mathieu Chartier3cf22532015-07-09 15:15:09 -07001300 std::unique_ptr<ReferenceProcessor> reference_processor_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001301
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001302 // Task processor, proxies heap trim requests to the daemon threads.
1303 std::unique_ptr<TaskProcessor> task_processor_;
1304
Mathieu Chartier40112dd2017-06-26 17:49:09 -07001305 // Collector type of the running GC.
Mathieu Chartierd5a89ee2014-01-31 09:55:13 -08001306 volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001307
Mathieu Chartier40112dd2017-06-26 17:49:09 -07001308 // Cause of the last running GC.
1309 volatile GcCause last_gc_cause_ GUARDED_BY(gc_complete_lock_);
1310
Mathieu Chartier183009a2017-02-16 21:19:28 -08001311 // The thread currently running the GC.
1312 volatile Thread* thread_running_gc_ GUARDED_BY(gc_complete_lock_);
1313
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001314 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
Ian Rogers1d54e732013-05-02 21:10:01 -07001315 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -07001316 collector::GcType next_gc_type_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001317
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001318 // Maximum size that the heap can reach.
Mathieu Chartier379d09f2015-01-08 11:28:13 -08001319 size_t capacity_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001320
Ian Rogers1d54e732013-05-02 21:10:01 -07001321 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
1322 // programs it is "cleared" making it the same as capacity.
Hans Boehmc220f982018-10-12 16:15:45 -07001323 // Only weakly enforced for simultaneous allocations.
Mathieu Chartier2fde5332012-09-14 14:51:54 -07001324 size_t growth_limit_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001325
Hans Boehmc220f982018-10-12 16:15:45 -07001326 // Target size (as in maximum allocatable bytes) for the heap. Weakly enforced as a limit for
1327 // non-concurrent GC. Used as a guideline for computing concurrent_start_bytes_ in the
1328 // concurrent GC case.
1329 Atomic<size_t> target_footprint_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -07001330
Ian Rogers1d54e732013-05-02 21:10:01 -07001331 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
1332 // it completes ahead of an allocation failing.
Hans Boehmc220f982018-10-12 16:15:45 -07001333 // A multiple of this is also used to determine when to trigger a GC in response to native
1334 // allocation.
Mathieu Chartier0051be62012-10-12 17:47:11 -07001335 size_t concurrent_start_bytes_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -07001336
Ian Rogers1d54e732013-05-02 21:10:01 -07001337 // Since the heap was created, how many bytes have been freed.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001338 uint64_t total_bytes_freed_ever_;
Ian Rogers1d54e732013-05-02 21:10:01 -07001339
1340 // Since the heap was created, how many objects have been freed.
Mathieu Chartierdd162fb2014-08-06 17:06:33 -07001341 uint64_t total_objects_freed_ever_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001342
Hans Boehmfb8b4e22018-09-05 16:45:42 -07001343 // Number of bytes currently allocated and not yet reclaimed. Includes active
1344 // TLABS in their entirety, even if they have not yet been parceled out.
Ian Rogersef7d42f2014-01-06 12:55:46 -08001345 Atomic<size_t> num_bytes_allocated_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001346
Hans Boehmc220f982018-10-12 16:15:45 -07001347 // Number of registered native bytes allocated. Adjusted after each RegisterNativeAllocation and
1348 // RegisterNativeFree. Used to help determine when to trigger GC for native allocations. Should
1349 // not include bytes allocated through the system malloc, since those are implicitly included.
1350 Atomic<size_t> native_bytes_registered_;
Mathieu Chartier987ccff2013-07-08 11:05:21 -07001351
Hans Boehmc220f982018-10-12 16:15:45 -07001352 // Approximately the smallest value of GetNativeBytes() we've seen since the last GC.
Richard Uhlercaaa2b02017-02-01 09:54:17 +00001353 Atomic<size_t> old_native_bytes_allocated_;
1354
Hans Boehmc220f982018-10-12 16:15:45 -07001355 // Total number of native objects of which we were notified since the beginning of time, mod 2^32.
1356 // Allows us to check for GC only roughly every kNotifyNativeInterval allocations.
1357 Atomic<uint32_t> native_objects_notified_;
1358
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -07001359 // Number of bytes freed by thread local buffer revokes. This will
1360 // cancel out the ahead-of-time bulk counting of bytes allocated in
1361 // rosalloc thread-local buffers. It is temporarily accumulated
1362 // here to be subtracted from num_bytes_allocated_ later at the next
1363 // GC.
1364 Atomic<size_t> num_bytes_freed_revoke_;
1365
Mathieu Chartier10fb83a2014-06-15 15:15:43 -07001366 // Info related to the current or previous GC iteration.
1367 collector::Iteration current_gc_iteration_;
1368
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001369 // Heap verification flags.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001370 const bool verify_missing_card_marks_;
1371 const bool verify_system_weaks_;
1372 const bool verify_pre_gc_heap_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001373 const bool verify_pre_sweeping_heap_;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001374 const bool verify_post_gc_heap_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001375 const bool verify_mod_union_table_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001376 bool verify_pre_gc_rosalloc_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001377 bool verify_pre_sweeping_rosalloc_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001378 bool verify_post_gc_rosalloc_;
Mathieu Chartier31000802015-06-14 14:14:37 -07001379 const bool gc_stress_mode_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001380
1381 // RAII that temporarily disables the rosalloc verification during
1382 // the zygote fork.
1383 class ScopedDisableRosAllocVerification {
1384 private:
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001385 Heap* const heap_;
1386 const bool orig_verify_pre_gc_;
1387 const bool orig_verify_pre_sweeping_;
1388 const bool orig_verify_post_gc_;
1389
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001390 public:
1391 explicit ScopedDisableRosAllocVerification(Heap* heap)
1392 : heap_(heap),
1393 orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_),
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001394 orig_verify_pre_sweeping_(heap_->verify_pre_sweeping_rosalloc_),
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001395 orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) {
1396 heap_->verify_pre_gc_rosalloc_ = false;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001397 heap_->verify_pre_sweeping_rosalloc_ = false;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001398 heap_->verify_post_gc_rosalloc_ = false;
1399 }
1400 ~ScopedDisableRosAllocVerification() {
1401 heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001402 heap_->verify_pre_sweeping_rosalloc_ = orig_verify_pre_sweeping_;
Hiroshi Yamauchia4adbfd2014-02-04 18:12:17 -08001403 heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_;
1404 }
1405 };
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001406
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001407 // Parallel GC data structures.
Ian Rogers700a4022014-05-19 16:49:03 -07001408 std::unique_ptr<ThreadPool> thread_pool_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -07001409
Roland Levillain8f7ea9a2018-01-26 17:27:59 +00001410 // A bitmap that is set corresponding to the known live objects since the last GC cycle.
Ian Rogers700a4022014-05-19 16:49:03 -07001411 std::unique_ptr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
Roland Levillain8f7ea9a2018-01-26 17:27:59 +00001412 // A bitmap that is set corresponding to the marked objects in the current GC cycle.
Ian Rogers700a4022014-05-19 16:49:03 -07001413 std::unique_ptr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001414
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -07001415 // Mark stack that we reuse to avoid re-allocating the mark stack.
Ian Rogers700a4022014-05-19 16:49:03 -07001416 std::unique_ptr<accounting::ObjectStack> mark_stack_;
Mathieu Chartier5301cd22012-05-31 12:11:36 -07001417
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001418 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
1419 // to use the live bitmap as the old mark bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -07001420 const size_t max_allocation_stack_size_;
Ian Rogers700a4022014-05-19 16:49:03 -07001421 std::unique_ptr<accounting::ObjectStack> allocation_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001422
1423 // Second allocation stack so that we can process allocation with the heap unlocked.
Ian Rogers700a4022014-05-19 16:49:03 -07001424 std::unique_ptr<accounting::ObjectStack> live_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001425
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001426 // Allocator type.
Mathieu Chartier50482232013-11-21 11:48:14 -08001427 AllocatorType current_allocator_;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001428 const AllocatorType current_non_moving_allocator_;
1429
Roland Levillainef012222017-06-21 16:28:06 +01001430 // Which GCs we run in order when an allocation fails.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -08001431 std::vector<collector::GcType> gc_plan_;
1432
Mathieu Chartier590fee92013-09-13 13:46:47 -07001433 // Bump pointer spaces.
1434 space::BumpPointerSpace* bump_pointer_space_;
1435 // Temp space is the space which the semispace collector copies to.
1436 space::BumpPointerSpace* temp_space_;
1437
Roland Levillain8f7ea9a2018-01-26 17:27:59 +00001438 // Region space, used by the concurrent collector.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001439 space::RegionSpace* region_space_;
1440
Mathieu Chartier0051be62012-10-12 17:47:11 -07001441 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
1442 // utilization, regardless of target utilization ratio.
Hans Boehmc220f982018-10-12 16:15:45 -07001443 const size_t min_free_;
Mathieu Chartier0051be62012-10-12 17:47:11 -07001444
1445 // The ideal maximum free size, when we grow the heap for utilization.
Hans Boehmc220f982018-10-12 16:15:45 -07001446 const size_t max_free_;
Mathieu Chartier0051be62012-10-12 17:47:11 -07001447
Roland Levillain99bd16b2018-02-21 14:18:15 +00001448 // Target ideal heap utilization ratio.
Mathieu Chartier0051be62012-10-12 17:47:11 -07001449 double target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -07001450
Mathieu Chartier2f8da3e2014-04-15 15:37:02 -07001451 // How much more we grow the heap when we are a foreground app instead of background.
1452 double foreground_heap_growth_multiplier_;
1453
Hans Boehmbb2467b2019-03-29 22:55:06 -07001454 // The amount of native memory allocation since the last GC required to cause us to wait for a
1455 // collection as a result of native allocation. Very large values can cause the device to run
1456 // out of memory, due to lack of finalization to reclaim native memory. Making it too small can
1457 // cause jank in apps like launcher that intentionally allocate large amounts of memory in rapid
1458 // succession. (b/122099093) 1/4 to 1/3 of physical memory seems to be a good number.
1459 const size_t stop_for_native_allocs_;
1460
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001461 // Total time which mutators are paused or waiting for GC to complete.
Mathieu Chartier155dfe92012-10-09 14:24:49 -07001462 uint64_t total_wait_time_;
1463
Ian Rogers04d7aa92013-03-16 14:29:17 -07001464 // The current state of heap verification, may be enabled or disabled.
Mathieu Chartier4e305412014-02-19 10:54:44 -08001465 VerifyObjectMode verify_object_mode_;
Ian Rogers04d7aa92013-03-16 14:29:17 -07001466
Mathieu Chartier1d27b342014-01-28 12:51:09 -08001467 // Compacting GC disable count, prevents compacting GC from running iff > 0.
1468 size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -07001469
1470 std::vector<collector::GarbageCollector*> garbage_collectors_;
1471 collector::SemiSpace* semi_space_collector_;
Mathieu Chartier8d1a9962016-08-17 16:39:45 -07001472 collector::ConcurrentCopying* active_concurrent_copying_collector_;
1473 collector::ConcurrentCopying* young_concurrent_copying_collector_;
Hiroshi Yamauchid5307ec2014-03-27 21:07:51 -07001474 collector::ConcurrentCopying* concurrent_copying_collector_;
Brian Carlstrom1f870082011-08-23 16:02:11 -07001475
Evgenii Stepanov1e133742015-05-20 12:30:59 -07001476 const bool is_running_on_memory_tool_;
Mathieu Chartier692fafd2013-11-29 17:24:40 -08001477 const bool use_tlab_;
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -07001478
Zuo Wangf37a88b2014-07-10 04:26:41 -07001479 // Pointer to the space which becomes the new main space when we do homogeneous space compaction.
Mathieu Chartierb363f662014-07-16 13:28:58 -07001480 // Use unique_ptr since the space is only added during the homogeneous compaction phase.
1481 std::unique_ptr<space::MallocSpace> main_space_backup_;
Zuo Wangf37a88b2014-07-10 04:26:41 -07001482
1483 // Minimal interval allowed between two homogeneous space compactions caused by OOM.
1484 uint64_t min_interval_homogeneous_space_compaction_by_oom_;
1485
1486 // Times of the last homogeneous space compaction caused by OOM.
1487 uint64_t last_time_homogeneous_space_compaction_by_oom_;
1488
1489 // Saved OOMs by homogeneous space compaction.
1490 Atomic<size_t> count_delayed_oom_;
1491
1492 // Count for requested homogeneous space compaction.
1493 Atomic<size_t> count_requested_homogeneous_space_compaction_;
1494
1495 // Count for ignored homogeneous space compaction.
1496 Atomic<size_t> count_ignored_homogeneous_space_compaction_;
1497
1498 // Count for performed homogeneous space compaction.
1499 Atomic<size_t> count_performed_homogeneous_space_compaction_;
1500
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001501 // Whether or not a concurrent GC is pending.
1502 Atomic<bool> concurrent_gc_pending_;
1503
1504 // Active tasks which we can modify (change target time, desired collector type, etc..).
1505 CollectorTransitionTask* pending_collector_transition_ GUARDED_BY(pending_task_lock_);
1506 HeapTrimTask* pending_heap_trim_ GUARDED_BY(pending_task_lock_);
1507
Zuo Wangf37a88b2014-07-10 04:26:41 -07001508 // Whether or not we use homogeneous space compaction to avoid OOM errors.
1509 bool use_homogeneous_space_compaction_for_oom_;
1510
Albert Mingkun Yang0b4d1462018-11-29 13:25:35 +00001511 // If true, enable generational collection when using the Concurrent Copying
1512 // (CC) collector, i.e. use sticky-bit CC for minor collections and (full) CC
1513 // for major collections. Set in Heap constructor.
1514 const bool use_generational_cc_;
1515
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001516 // True if the currently running collection has made some thread wait.
1517 bool running_collection_is_blocking_ GUARDED_BY(gc_complete_lock_);
1518 // The number of blocking GC runs.
1519 uint64_t blocking_gc_count_;
1520 // The total duration of blocking GC runs.
1521 uint64_t blocking_gc_time_;
1522 // The duration of the window for the GC count rate histograms.
1523 static constexpr uint64_t kGcCountRateHistogramWindowDuration = MsToNs(10 * 1000); // 10s.
Vincent Palomarescc17d072019-01-28 11:14:01 -08001524 // Maximum number of missed histogram windows for which statistics will be collected.
1525 static constexpr uint64_t kGcCountRateHistogramMaxNumMissedWindows = 100;
Hiroshi Yamauchia1c9f012015-04-02 10:18:12 -07001526 // The last time when the GC count rate histograms were updated.
1527 // This is rounded by kGcCountRateHistogramWindowDuration (a multiple of 10s).
1528 uint64_t last_update_time_gc_count_rate_histograms_;
1529 // The running count of GC runs in the last window.
1530 uint64_t gc_count_last_window_;
1531 // The running count of blocking GC runs in the last window.
1532 uint64_t blocking_gc_count_last_window_;
1533 // The maximum number of buckets in the GC count rate histograms.
1534 static constexpr size_t kGcCountRateMaxBucketCount = 200;
1535 // The histogram of the number of GC invocations per window duration.
1536 Histogram<uint64_t> gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1537 // The histogram of the number of blocking GC invocations per window duration.
1538 Histogram<uint64_t> blocking_gc_count_rate_histogram_ GUARDED_BY(gc_complete_lock_);
1539
Man Cao8c2ff642015-05-27 17:25:30 -07001540 // Allocation tracking support
1541 Atomic<bool> alloc_tracking_enabled_;
Mathieu Chartier458b1052016-03-29 14:02:55 -07001542 std::unique_ptr<AllocRecordObjectMap> allocation_records_;
Mathieu Chartier0a206072019-03-28 12:29:22 -07001543 size_t alloc_record_depth_;
Man Cao8c2ff642015-05-27 17:25:30 -07001544
Mathieu Chartier31000802015-06-14 14:14:37 -07001545 // GC stress related data structures.
1546 Mutex* backtrace_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1547 // Debugging variables, seen backtraces vs unique backtraces.
1548 Atomic<uint64_t> seen_backtrace_count_;
1549 Atomic<uint64_t> unique_backtrace_count_;
1550 // Stack trace hashes that we already saw,
1551 std::unordered_set<uint64_t> seen_backtraces_ GUARDED_BY(backtrace_lock_);
1552
Mathieu Chartier51168372015-08-12 16:40:32 -07001553 // We disable GC when we are shutting down the runtime in case there are daemon threads still
1554 // allocating.
1555 bool gc_disabled_for_shutdown_ GUARDED_BY(gc_complete_lock_);
1556
Albert Mingkun Yangde94ea72018-11-16 10:15:49 +00001557 // Turned on by -XX:DumpRegionInfoBeforeGC and -XX:DumpRegionInfoAfterGC to
1558 // emit region info before and after each GC cycle.
1559 bool dump_region_info_before_gc_;
1560 bool dump_region_info_after_gc_;
1561
Jeff Haodcdc85b2015-12-04 14:06:18 -08001562 // Boot image spaces.
1563 std::vector<space::ImageSpace*> boot_image_spaces_;
Mathieu Chartier073b16c2015-11-10 14:13:23 -08001564
Vladimir Marko7cde4582019-07-05 13:26:11 +01001565 // Boot image address range. Includes images and oat files.
1566 uint32_t boot_images_start_address_;
1567 uint32_t boot_images_size_;
1568
Andreas Gampe27fa96c2016-10-07 15:05:24 -07001569 // An installed allocation listener.
1570 Atomic<AllocationListener*> alloc_listener_;
Andreas Gampe9b8c5882016-10-21 15:27:46 -07001571 // An installed GC Pause listener.
1572 Atomic<GcPauseListener*> gc_pause_listener_;
Andreas Gampe27fa96c2016-10-07 15:05:24 -07001573
Mathieu Chartier1ca68902017-04-18 11:26:22 -07001574 std::unique_ptr<Verification> verification_;
1575
Mathieu Chartiera5eae692014-12-17 17:56:03 -08001576 friend class CollectorTransitionTask;
Mathieu Chartier6f365cc2014-04-23 12:42:27 -07001577 friend class collector::GarbageCollector;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001578 friend class collector::ConcurrentCopying;
Ian Rogers1d54e732013-05-02 21:10:01 -07001579 friend class collector::MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -07001580 friend class collector::SemiSpace;
Alex Light3b8aa772018-08-13 15:55:44 -07001581 friend class GCCriticalSection;
Mathieu Chartier39e32612013-11-12 16:28:05 -08001582 friend class ReferenceQueue;
Mathieu Chartieraa516822015-10-02 15:53:37 -07001583 friend class ScopedGCCriticalSection;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -07001584 friend class VerifyReferenceCardVisitor;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001585 friend class VerifyReferenceVisitor;
1586 friend class VerifyObjectVisitor;
Ian Rogers30fab402012-01-23 15:43:46 -08001587
Carl Shapiro69759ea2011-07-21 18:13:35 -07001588 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
1589};
1590
Ian Rogers1d54e732013-05-02 21:10:01 -07001591} // namespace gc
Carl Shapiro1fb86202011-06-27 17:43:13 -07001592} // namespace art
1593
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001594#endif // ART_RUNTIME_GC_HEAP_H_