blob: fd7a6145c342abceb28d734e8954645cf805e2de [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_GC_HEAP_H_
18#define ART_RUNTIME_GC_HEAP_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <vector>
23
Mathieu Chartier2fde5332012-09-14 14:51:54 -070024#include "atomic_integer.h"
Sameer Abu Asala8439542013-02-14 16:06:42 -080025#include "base/timing_logger.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070026#include "gc/accounting/atomic_stack.h"
27#include "gc/accounting/card_table.h"
Hiroshi Yamauchi6f4ffe42014-01-13 12:30:44 -080028#include "gc/gc_cause.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029#include "gc/collector/gc_type.h"
Mathieu Chartier0de9f732013-11-22 17:58:48 -080030#include "gc/collector_type.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070031#include "globals.h"
Ian Rogers30fab402012-01-23 15:43:46 -080032#include "gtest/gtest.h"
Mathieu Chartierc39e3422013-08-07 16:41:36 -070033#include "jni.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070034#include "locks.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070035#include "offsets.h"
Mathieu Chartier39e32612013-11-12 16:28:05 -080036#include "reference_queue.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070037#include "root_visitor.h"
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070038#include "safe_map.h"
Mathieu Chartier02b6a782012-10-26 13:51:26 -070039#include "thread_pool.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070040
41namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070042
Ian Rogers81d425b2012-09-27 16:03:43 -070043class ConditionVariable;
Ian Rogers81d425b2012-09-27 16:03:43 -070044class Mutex;
Ian Rogers40e3bac2012-11-20 00:09:14 -080045class StackVisitor;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070046class Thread;
Mathieu Chartier357e9be2012-08-01 11:00:14 -070047class TimingLogger;
Carl Shapiro69759ea2011-07-21 18:13:35 -070048
Ian Rogers1d54e732013-05-02 21:10:01 -070049namespace mirror {
50 class Class;
51 class Object;
52} // namespace mirror
53
54namespace gc {
55namespace accounting {
56 class HeapBitmap;
57 class ModUnionTable;
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -080058 class ObjectSet;
Ian Rogers1d54e732013-05-02 21:10:01 -070059} // namespace accounting
60
61namespace collector {
62 class GarbageCollector;
63 class MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -070064 class SemiSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070065} // namespace collector
66
67namespace space {
68 class AllocSpace;
Mathieu Chartier590fee92013-09-13 13:46:47 -070069 class BumpPointerSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070070 class DiscontinuousSpace;
71 class DlMallocSpace;
72 class ImageSpace;
73 class LargeObjectSpace;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070074 class MallocSpace;
75 class RosAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070076 class Space;
77 class SpaceTest;
Mathieu Chartier590fee92013-09-13 13:46:47 -070078 class ContinuousMemMapAllocSpace;
Ian Rogers1d54e732013-05-02 21:10:01 -070079} // namespace space
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070080
Mathieu Chartierd22d5482012-11-06 17:14:12 -080081class AgeCardVisitor {
82 public:
Brian Carlstromdf629502013-07-17 22:39:56 -070083 byte operator()(byte card) const {
Ian Rogers1d54e732013-05-02 21:10:01 -070084 if (card == accounting::CardTable::kCardDirty) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -080085 return card - 1;
86 } else {
87 return 0;
88 }
89 }
90};
91
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080092// Different types of allocators.
93enum AllocatorType {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080094 kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints.
95 kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints.
96 kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints.
97 kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints.
98 kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints.
99 kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800100};
101
Ian Rogers04d7aa92013-03-16 14:29:17 -0700102// How we want to sanity check the heap's correctness.
103enum HeapVerificationMode {
104 kHeapVerificationNotPermitted, // Too early in runtime start-up for heap to be verified.
105 kNoHeapVerification, // Production default.
106 kVerifyAllFast, // Sanity check all heap accesses with quick(er) tests.
107 kVerifyAll // Sanity check all heap accesses.
108};
Mathieu Chartier720ef762013-08-17 14:46:54 -0700109static constexpr HeapVerificationMode kDesiredHeapVerification = kNoHeapVerification;
Ian Rogers04d7aa92013-03-16 14:29:17 -0700110
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700111// If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace
Hiroshi Yamauchid31fb972013-11-19 11:08:27 -0800112static constexpr bool kUseRosAlloc = true;
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700113
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800114// The process state passed in from the activity manager, used to determine when to do trimming
115// and compaction.
116enum ProcessState {
117 kProcessStateJankPerceptible = 0,
118 kProcessStateJankImperceptible = 1,
119};
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800120std::ostream& operator<<(std::ostream& os, const ProcessState& process_state);
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800121
Ian Rogers50b35e22012-10-04 10:09:15 -0700122class Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -0700123 public:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700124 // If true, measure the total allocation time.
125 static constexpr bool kMeasureAllocationTime = false;
126 // Primitive arrays larger than this size are put in the large object space.
127 static constexpr size_t kLargeObjectThreshold = 3 * kPageSize;
128
Mathieu Chartier720ef762013-08-17 14:46:54 -0700129 static constexpr size_t kDefaultInitialSize = 2 * MB;
130 static constexpr size_t kDefaultMaximumSize = 32 * MB;
131 static constexpr size_t kDefaultMaxFree = 2 * MB;
132 static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700133 static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5);
134 static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800135 static constexpr size_t kDefaultTLABSize = 256 * KB;
Mathieu Chartier0051be62012-10-12 17:47:11 -0700136
137 // Default target utilization.
Mathieu Chartier720ef762013-08-17 14:46:54 -0700138 static constexpr double kDefaultTargetUtilization = 0.5;
Mathieu Chartier0051be62012-10-12 17:47:11 -0700139
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700140 // Used so that we don't overflow the allocation time atomic integer.
Mathieu Chartier720ef762013-08-17 14:46:54 -0700141 static constexpr size_t kTimeAdjust = 1024;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700142
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700143 // Create a heap with the requested sizes. The possible empty
144 // image_file_names names specify Spaces to load based on
145 // ImageWriter output.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700146 explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
147 size_t max_free, double target_utilization, size_t capacity,
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800148 const std::string& original_image_file_name,
149 CollectorType post_zygote_collector_type, CollectorType background_collector_type,
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700150 size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode,
Mathieu Chartierff3b24a2013-11-22 16:04:25 -0800151 size_t long_pause_threshold, size_t long_gc_threshold,
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800152 bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap,
153 bool verify_post_gc_heap);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700154
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800155 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700156
Brian Carlstroma40f9bc2011-07-26 21:26:07 -0700157 // Allocates and initializes storage for an object instance.
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800158 template <bool kInstrumented>
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800159 mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes)
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700160 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800161 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
162 GetCurrentAllocator());
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700163 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800164 template <bool kInstrumented>
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800165 mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes)
Mathieu Chartier590fee92013-09-13 13:46:47 -0700166 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800167 return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes,
168 GetCurrentNonMovingAllocator());
Mathieu Chartier590fee92013-09-13 13:46:47 -0700169 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800170 template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor>
Mathieu Chartier1febddf2013-11-20 12:33:14 -0800171 ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator(
172 Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator,
173 const PreFenceVisitor& pre_fence_visitor = VoidFunctor())
Mathieu Chartier590fee92013-09-13 13:46:47 -0700174 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800175
176 AllocatorType GetCurrentAllocator() const {
177 return current_allocator_;
178 }
179
180 AllocatorType GetCurrentNonMovingAllocator() const {
181 return current_non_moving_allocator_;
182 }
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700183
Mathieu Chartier590fee92013-09-13 13:46:47 -0700184 // Visit all of the live objects in the heap.
185 void VisitObjects(ObjectVisitorCallback callback, void* arg)
186 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_);
187
188 void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
189
190 void DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count)
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700191 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
192 void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation);
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700193
Ian Rogers1eb512d2013-10-18 15:42:20 -0700194 void RegisterNativeAllocation(JNIEnv* env, int bytes);
195 void RegisterNativeFree(JNIEnv* env, int bytes);
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700196
Mathieu Chartier50482232013-11-21 11:48:14 -0800197 // Change the allocator, updates entrypoints.
198 void ChangeAllocator(AllocatorType allocator);
199
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800200 // Transition the garbage collector during runtime, may copy objects from one space to another.
201 void TransitionCollector(CollectorType collector_type);
202
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800203 // Change the collector to be one of the possible options (MS, CMS, SS).
204 void ChangeCollector(CollectorType collector_type);
205
Ian Rogers04d7aa92013-03-16 14:29:17 -0700206 // The given reference is believed to be to an object in the Java heap, check the soundness of it.
207 void VerifyObjectImpl(const mirror::Object* o);
208 void VerifyObject(const mirror::Object* o) {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700209 if (o != nullptr && this != nullptr && verify_object_mode_ > kNoHeapVerification) {
Ian Rogers04d7aa92013-03-16 14:29:17 -0700210 VerifyObjectImpl(o);
211 }
212 }
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800213 // Check that c.getClass() == c.getClass().getClass().
214 bool VerifyClassClass(const mirror::Class* c) const;
Ian Rogers408f79a2011-08-23 18:22:33 -0700215
Ian Rogers04d7aa92013-03-16 14:29:17 -0700216 // Check sanity of all live references.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700217 void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700218 bool VerifyHeapReferences()
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700219 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
220 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700221 bool VerifyMissingCardMarks()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700222 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
223 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700224
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700225 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -0700226 // and doesn't abort on error, allowing the caller to report more
227 // meaningful diagnostics.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700228 bool IsValidObjectAddress(const mirror::Object* obj) const;
229
230 // Returns true if the address passed in is a heap address, doesn't need to be aligned.
231 bool IsHeapAddress(const mirror::Object* obj) const;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800232
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700233 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
234 // Requires the heap lock to be held.
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700235 bool IsLiveObjectLocked(const mirror::Object* obj, bool search_allocation_stack = true,
236 bool search_live_stack = true, bool sorted = false)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700237 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Elliott Hughesa2501992011-08-26 19:39:54 -0700238
Mathieu Chartier590fee92013-09-13 13:46:47 -0700239 // Returns true if there is any chance that the object (obj) will move.
240 bool IsMovableObject(const mirror::Object* obj) const;
241
242 // Returns true if an object is in the temp space, if this happens its usually indicative of
243 // compaction related errors.
244 bool IsInTempSpace(const mirror::Object* obj) const;
245
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800246 // Enables us to compacting GC until objects are released.
247 void IncrementDisableMovingGC(Thread* self);
248 void DecrementDisableMovingGC(Thread* self);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700249
Carl Shapiro69759ea2011-07-21 18:13:35 -0700250 // Initiates an explicit garbage collection.
Ian Rogers1d54e732013-05-02 21:10:01 -0700251 void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700252
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700253 // Does a concurrent GC, should only be called by the GC daemon thread
254 // through runtime.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700255 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700256
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800257 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
258 // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800259 void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from,
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800260 uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700261 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
262 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes3b78c942013-01-15 17:35:41 -0800263 // Implements JDWP RT_Instances.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800264 void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances)
Elliott Hughes3b78c942013-01-15 17:35:41 -0800265 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
266 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes0cbaff52013-01-16 15:28:01 -0800267 // Implements JDWP OR_ReferringObjects.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800268 void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects)
Elliott Hughes0cbaff52013-01-16 15:28:01 -0800269 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
270 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700271
Ian Rogers3bb17a62012-01-27 23:56:44 -0800272 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
273 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800274 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700275
Ian Rogers30fab402012-01-23 15:43:46 -0800276 // Target ideal heap utilization ratio, implements
277 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700278 double GetTargetHeapUtilization() const {
279 return target_utilization_;
280 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700281
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700282 // Data structure memory usage tracking.
283 void RegisterGCAllocation(size_t bytes);
284 void RegisterGCDeAllocation(size_t bytes);
285
Ian Rogers30fab402012-01-23 15:43:46 -0800286 // Set target ideal heap utilization ratio, implements
287 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700288 void SetTargetHeapUtilization(float target);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800289
290 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
291 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800292 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700293
Mathieu Chartier590fee92013-09-13 13:46:47 -0700294 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
295 // waited for.
296 collector::GcType WaitForGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700297
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800298 // Update the heap's process state to a new value, may cause compaction to occur.
299 void UpdateProcessState(ProcessState process_state);
300
Ian Rogers1d54e732013-05-02 21:10:01 -0700301 const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const {
302 return continuous_spaces_;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800303 }
304
Ian Rogers1d54e732013-05-02 21:10:01 -0700305 const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const {
306 return discontinuous_spaces_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700307 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700308
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800309 void SetReferenceOffsets(MemberOffset reference_referent_offset,
310 MemberOffset reference_queue_offset,
311 MemberOffset reference_queueNext_offset,
312 MemberOffset reference_pendingNext_offset,
313 MemberOffset finalizer_reference_zombie_offset);
Mathieu Chartier39e32612013-11-12 16:28:05 -0800314 MemberOffset GetReferenceReferentOffset() const {
315 return reference_referent_offset_;
316 }
317 MemberOffset GetReferenceQueueOffset() const {
318 return reference_queue_offset_;
319 }
320 MemberOffset GetReferenceQueueNextOffset() const {
321 return reference_queueNext_offset_;
322 }
323 MemberOffset GetReferencePendingNextOffset() const {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700324 return reference_pendingNext_offset_;
325 }
Mathieu Chartier39e32612013-11-12 16:28:05 -0800326 MemberOffset GetFinalizerReferenceZombieOffset() const {
Brian Carlstrom1f870082011-08-23 16:02:11 -0700327 return finalizer_reference_zombie_offset_;
328 }
Mathieu Chartier39e32612013-11-12 16:28:05 -0800329 static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg);
330 void ProcessReferences(TimingLogger& timings, bool clear_soft, RootVisitor* is_marked_callback,
331 RootVisitor* recursive_mark_object_callback, void* arg)
332 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
333 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700334
Ian Rogers04d7aa92013-03-16 14:29:17 -0700335 // Enable verification of object references when the runtime is sufficiently initialized.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800336 void EnableObjectValidation() {
Ian Rogers04d7aa92013-03-16 14:29:17 -0700337 verify_object_mode_ = kDesiredHeapVerification;
338 if (verify_object_mode_ > kNoHeapVerification) {
339 VerifyHeap();
340 }
Elliott Hughes85d15452011-09-16 17:33:01 -0700341 }
342
Ian Rogers04d7aa92013-03-16 14:29:17 -0700343 // Disable object reference verification for image writing.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800344 void DisableObjectValidation() {
Ian Rogers04d7aa92013-03-16 14:29:17 -0700345 verify_object_mode_ = kHeapVerificationNotPermitted;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700346 }
347
Ian Rogers04d7aa92013-03-16 14:29:17 -0700348 // Other checks may be performed if we know the heap should be in a sane state.
Ian Rogers23435d02012-09-24 11:23:12 -0700349 bool IsObjectValidationEnabled() const {
Ian Rogers04d7aa92013-03-16 14:29:17 -0700350 return kDesiredHeapVerification > kNoHeapVerification &&
351 verify_object_mode_ > kHeapVerificationNotPermitted;
Ian Rogers23435d02012-09-24 11:23:12 -0700352 }
353
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700354 // Returns true if low memory mode is enabled.
355 bool IsLowMemoryMode() const {
356 return low_memory_mode_;
357 }
358
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800359 // Freed bytes can be negative in cases where we copy objects from a compacted space to a
360 // free-list backed space.
361 void RecordFree(int64_t freed_objects, int64_t freed_bytes);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700362
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700363 // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
364 // The call is not needed if NULL is stored in the field.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800365 void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/,
366 const mirror::Object* /*new_value*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700367 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700368 }
369
370 // Write barrier for array operations that update many field positions
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800371 void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/,
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700372 size_t /*length TODO: element_count or byte_count?*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700373 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700374 }
375
Mathieu Chartier0732d592013-11-06 11:02:50 -0800376 void WriteBarrierEveryFieldOf(const mirror::Object* obj) {
377 card_table_->MarkCard(obj);
378 }
379
Ian Rogers1d54e732013-05-02 21:10:01 -0700380 accounting::CardTable* GetCardTable() const {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700381 return card_table_.get();
Ian Rogers5d76c432011-10-31 21:42:49 -0700382 }
383
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800384 void AddFinalizerReference(Thread* self, mirror::Object* object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700385
Ian Rogers1d54e732013-05-02 21:10:01 -0700386 // Returns the number of bytes currently allocated.
387 size_t GetBytesAllocated() const {
388 return num_bytes_allocated_;
389 }
390
391 // Returns the number of objects currently allocated.
Mathieu Chartier590fee92013-09-13 13:46:47 -0700392 size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700393
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700394 // Returns the total number of objects allocated since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700395 size_t GetObjectsAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700396
397 // Returns the total number of bytes allocated since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700398 size_t GetBytesAllocatedEver() const;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700399
400 // Returns the total number of objects freed since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700401 size_t GetObjectsFreedEver() const {
402 return total_objects_freed_ever_;
403 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700404
405 // Returns the total number of bytes freed since the heap was created.
Ian Rogers1d54e732013-05-02 21:10:01 -0700406 size_t GetBytesFreedEver() const {
407 return total_bytes_freed_ever_;
408 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700409
Ian Rogers1d54e732013-05-02 21:10:01 -0700410 // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can
411 // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx
412 // were specified. Android apps start with a growth limit (small heap size) which is
413 // cleared/extended for large apps.
414 int64_t GetMaxMemory() const {
415 return growth_limit_;
416 }
417
418 // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an
419 // application.
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700420 int64_t GetTotalMemory() const;
Ian Rogers1d54e732013-05-02 21:10:01 -0700421
422 // Implements java.lang.Runtime.freeMemory.
423 int64_t GetFreeMemory() const {
424 return GetTotalMemory() - num_bytes_allocated_;
425 }
426
427 // Get the space that corresponds to an object's address. Current implementation searches all
428 // spaces in turn. If fail_ok is false then failing to find a space will cause an abort.
429 // TODO: consider using faster data structure like binary tree.
430 space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const;
431 space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*,
432 bool fail_ok) const;
433 space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700434
Mathieu Chartier037813d2012-08-23 16:44:59 -0700435 void DumpForSigQuit(std::ostream& os);
Elliott Hughesc967f782012-04-16 10:23:15 -0700436
Mathieu Chartier590fee92013-09-13 13:46:47 -0700437 // Trim the managed and native heaps by releasing unused memory back to the OS.
438 void Trim();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700439
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700440 void RevokeThreadLocalBuffers(Thread* thread);
441 void RevokeAllThreadLocalBuffers();
442
Ian Rogers1d54e732013-05-02 21:10:01 -0700443 accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700444 return live_bitmap_.get();
445 }
446
Ian Rogers1d54e732013-05-02 21:10:01 -0700447 accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700448 return mark_bitmap_.get();
449 }
450
Ian Rogers1d54e732013-05-02 21:10:01 -0700451 accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800452 return live_stack_.get();
453 }
454
Mathieu Chartier590fee92013-09-13 13:46:47 -0700455 void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700456
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700457 // Mark and empty stack.
458 void FlushAllocStack()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700459 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700460
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700461 // Mark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800462 void MarkAllocStack(accounting::SpaceBitmap* bitmap1, accounting::SpaceBitmap* bitmap2,
Mathieu Chartierdb7f37d2014-01-10 11:09:06 -0800463 accounting::ObjectSet* large_objects, accounting::ObjectStack* stack)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700464 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700465
Mathieu Chartier590fee92013-09-13 13:46:47 -0700466 // Mark the specified allocation stack as live.
467 void MarkAllocStackAsLive(accounting::ObjectStack* stack)
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800468 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier82353312013-07-18 10:47:51 -0700469
Mathieu Chartiera1602f22014-01-13 17:19:19 -0800470 // Unbind any bound bitmaps.
471 void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
472
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700473 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700474 // Assumes there is only one image space.
Ian Rogers1d54e732013-05-02 21:10:01 -0700475 space::ImageSpace* GetImageSpace() const;
476
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800477 space::DlMallocSpace* GetDlMallocSpace() const {
478 return dlmalloc_space_;
479 }
480
481 space::RosAllocSpace* GetRosAllocSpace() const {
482 return rosalloc_space_;
483 }
484
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700485 space::MallocSpace* GetNonMovingSpace() const {
Mathieu Chartier590fee92013-09-13 13:46:47 -0700486 return non_moving_space_;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700487 }
Ian Rogers1d54e732013-05-02 21:10:01 -0700488
489 space::LargeObjectSpace* GetLargeObjectsSpace() const {
490 return large_object_space_;
491 }
492
Hiroshi Yamauchi05e713a2014-01-09 13:24:51 -0800493 // Returns the free list space that may contain movable objects (the
494 // one that's not the non-moving space), either rosalloc_space_ or
495 // dlmalloc_space_.
496 space::MallocSpace* GetPrimaryFreeListSpace() {
497 if (kUseRosAlloc) {
498 DCHECK(rosalloc_space_ != nullptr);
499 // reinterpret_cast is necessary as the space class hierarchy
500 // isn't known (#included) yet here.
501 return reinterpret_cast<space::MallocSpace*>(rosalloc_space_);
502 } else {
503 DCHECK(dlmalloc_space_ != nullptr);
504 return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_);
505 }
506 }
507
Mathieu Chartier590fee92013-09-13 13:46:47 -0700508 void DumpSpaces(std::ostream& stream = LOG(INFO));
Elliott Hughesf8349362012-06-18 15:00:06 -0700509
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700510 // GC performance measuring
Elliott Hughes8b788fe2013-04-17 15:57:01 -0700511 void DumpGcPerformanceInfo(std::ostream& os);
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700512
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700513 // Returns true if we currently care about pause times.
514 bool CareAboutPauseTimes() const {
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800515 return process_state_ == kProcessStateJankPerceptible;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700516 }
517
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700518 // Thread pool.
519 void CreateThreadPool();
520 void DeleteThreadPool();
521 ThreadPool* GetThreadPool() {
522 return thread_pool_.get();
523 }
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700524 size_t GetParallelGCThreadCount() const {
525 return parallel_gc_threads_;
526 }
527 size_t GetConcGCThreadCount() const {
528 return conc_gc_threads_;
529 }
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700530 accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space);
531 void AddModUnionTable(accounting::ModUnionTable* mod_union_table);
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700532
Mathieu Chartier590fee92013-09-13 13:46:47 -0700533 bool IsCompilingBoot() const;
534 bool HasImageSpace() const;
535
Carl Shapiro58551df2011-07-24 03:09:51 -0700536 private:
Mathieu Chartier590fee92013-09-13 13:46:47 -0700537 void Compact(space::ContinuousMemMapAllocSpace* target_space,
538 space::ContinuousMemMapAllocSpace* source_space);
539
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800540 bool StartGC(Thread* self, bool is_compacting) LOCKS_EXCLUDED(gc_complete_lock_);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800541 void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_);
542
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800543 static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) {
544 return
545 allocator_type != kAllocatorTypeBumpPointer &&
546 allocator_type != kAllocatorTypeTLAB;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800547 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800548 static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) {
549 return AllocatorHasAllocationStack(allocator_type);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800550 }
Mathieu Chartier9be9a7a2014-01-24 14:07:33 -0800551 static bool IsCompactingGC(CollectorType collector_type) {
552 return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS;
553 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800554 bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const;
555 ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
556 mirror::Object* obj);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700557
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800558 // We don't force this to be inlined since it is a slow path.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800559 template <bool kInstrumented, typename PreFenceVisitor>
560 mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count,
561 const PreFenceVisitor& pre_fence_visitor)
562 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
563
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700564 // Handles Allocate()'s slow allocation path with GC involved after
565 // an initial allocation attempt failed.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800566 mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800567 size_t* bytes_allocated, mirror::Class** klass)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700568 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
569 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera6399032012-06-11 18:49:50 -0700570
Mathieu Chartier590fee92013-09-13 13:46:47 -0700571 // Allocate into a specific space.
572 mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c,
573 size_t bytes)
574 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
575
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800576 // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so
577 // that the switch statement is constant optimized in the entrypoints.
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800578 template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800579 ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type,
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800580 size_t alloc_size, size_t* bytes_allocated)
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700581 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
582
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700583 void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation)
584 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800585
586 template <bool kGrow>
587 bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700588
Elliott Hughesadb460d2011-10-05 17:02:34 -0700589 // Pushes a list of cleared references out to the managed heap.
Mathieu Chartier39e32612013-11-12 16:28:05 -0800590 void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent)
591 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
592 mirror::Object* GetReferenceReferent(mirror::Object* reference)
593 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
594 void ClearReferenceReferent(mirror::Object* reference)
595 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
596 SetReferenceReferent(reference, nullptr);
597 }
598 void EnqueueClearedReferences();
599 // Returns true if the reference object has not yet been enqueued.
600 bool IsEnqueuable(const mirror::Object* ref) const;
601 bool IsEnqueued(mirror::Object* ref) const;
602 void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, RootVisitor mark_visitor,
603 void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700604
605 // Run the finalizers.
606 void RunFinalization(JNIEnv* env);
607
608 // Blocks the caller until the garbage collector becomes idle and returns the type of GC we
609 // waited for.
610 collector::GcType WaitForGcToCompleteLocked(Thread* self)
611 EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_);
612
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700613 void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
614 void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700615 bool IsGCRequestPending() const;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800616
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700617 size_t RecordAllocationInstrumented(size_t size, mirror::Object* object)
618 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
619 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
620
621 size_t RecordAllocationUninstrumented(size_t size, mirror::Object* object)
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700622 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
623 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700624
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700625 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
626 // which type of Gc was actually ran.
Ian Rogers1d54e732013-05-02 21:10:01 -0700627 collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause,
628 bool clear_soft_references)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700629 LOCKS_EXCLUDED(gc_complete_lock_,
Ian Rogersb726dcb2012-09-05 08:57:23 -0700630 Locks::heap_bitmap_lock_,
Ian Rogersb726dcb2012-09-05 08:57:23 -0700631 Locks::thread_suspend_count_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800632
Ian Rogers1d54e732013-05-02 21:10:01 -0700633 void PreGcVerification(collector::GarbageCollector* gc);
634 void PreSweepingGcVerification(collector::GarbageCollector* gc)
Mathieu Chartierad2541a2013-10-25 10:05:23 -0700635 EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
636 void PostGcVerification(collector::GarbageCollector* gc)
Ian Rogers1d54e732013-05-02 21:10:01 -0700637 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700638
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700639 // Update the watermark for the native allocated bytes based on the current number of native
640 // bytes allocated and the target utilization ratio.
641 void UpdateMaxNativeFootprint();
642
Ian Rogers3bb17a62012-01-27 23:56:44 -0800643 // Given the current contents of the alloc space, increase the allowed heap footprint to match
644 // the target utilization ratio. This should only be called immediately after a full garbage
645 // collection.
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700646 void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700647
Mathieu Chartier637e3482012-08-17 10:41:32 -0700648 size_t GetPercentFree();
Elliott Hughesc967f782012-04-16 10:23:15 -0700649
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800650 void AddSpace(space::Space* space, bool set_as_default = true)
651 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
652 void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700653
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700654 // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
655 // lock ordering for it.
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800656 void VerifyObjectBody(const mirror::Object *obj) NO_THREAD_SAFETY_ANALYSIS;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700657
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800658 static void VerificationCallback(mirror::Object* obj, void* arg)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700659 SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700660
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700661 // Swap the allocation stack with the live stack.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700662 void SwapStacks();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700663
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700664 // Clear cards and update the mod union table.
Ian Rogers5fe9af72013-11-14 00:17:20 -0800665 void ProcessCards(TimingLogger& timings);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700666
Ian Rogers1d54e732013-05-02 21:10:01 -0700667 // All-known continuous spaces, where objects lie within fixed bounds.
668 std::vector<space::ContinuousSpace*> continuous_spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700669
Ian Rogers1d54e732013-05-02 21:10:01 -0700670 // All-known discontinuous spaces, where objects may be placed throughout virtual memory.
671 std::vector<space::DiscontinuousSpace*> discontinuous_spaces_;
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700672
Mathieu Chartier590fee92013-09-13 13:46:47 -0700673 // All-known alloc spaces, where objects may be or have been allocated.
674 std::vector<space::AllocSpace*> alloc_spaces_;
675
676 // A space where non-movable objects are allocated, when compaction is enabled it contains
677 // Classes, ArtMethods, ArtFields, and non moving objects.
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700678 space::MallocSpace* non_moving_space_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700679
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800680 // Space which we use for the kAllocatorTypeROSAlloc.
681 space::RosAllocSpace* rosalloc_space_;
682
683 // Space which we use for the kAllocatorTypeDlMalloc.
684 space::DlMallocSpace* dlmalloc_space_;
685
Mathieu Chartierfc5b5282014-01-09 16:15:36 -0800686 // The main space is the space which the GC copies to and from on process state updates. This
687 // space is typically either the dlmalloc_space_ or the rosalloc_space_.
688 space::MallocSpace* main_space_;
689
Ian Rogers1d54e732013-05-02 21:10:01 -0700690 // The large object space we are currently allocating into.
691 space::LargeObjectSpace* large_object_space_;
692
693 // The card table, dirtied by the write barrier.
694 UniquePtr<accounting::CardTable> card_table_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700695
Mathieu Chartier11409ae2013-09-23 11:49:36 -0700696 // A mod-union table remembers all of the references from the it's space to other spaces.
697 SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700698
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800699 // Keep the free list allocator mem map lying around when we transition to background so that we
700 // don't have to worry about virtual address space fragmentation.
701 UniquePtr<MemMap> allocator_mem_map_;
702
Mathieu Chartier938a03b2014-01-16 15:10:31 -0800703 // The mem-map which we will use for the non-moving space after the zygote is done forking:
704 UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_;
705
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800706 // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark
707 // sweep GC, false for other GC types.
708 bool concurrent_gc_;
Mathieu Chartier0de9f732013-11-22 17:58:48 -0800709
710 // The current collector type.
711 CollectorType collector_type_;
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800712 // Which collector we will switch to after zygote fork.
713 CollectorType post_zygote_collector_type_;
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800714 // Which collector we will use when the app is notified of a transition to background.
715 CollectorType background_collector_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700716
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700717 // How many GC threads we may use for paused parts of garbage collection.
718 const size_t parallel_gc_threads_;
719
720 // How many GC threads we may use for unpaused parts of garbage collection.
721 const size_t conc_gc_threads_;
Mathieu Chartier63a54342013-07-23 13:17:59 -0700722
Mathieu Chartiere0a53e92013-08-05 10:17:40 -0700723 // Boolean for if we are in low memory mode.
724 const bool low_memory_mode_;
725
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700726 // If we get a pause longer than long pause log threshold, then we print out the GC after it
727 // finishes.
728 const size_t long_pause_log_threshold_;
729
730 // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes.
731 const size_t long_gc_log_threshold_;
732
733 // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is
734 // useful for benchmarking since it reduces time spent in GC to a low %.
735 const bool ignore_max_footprint_;
736
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700737 // If we have a zygote space.
738 bool have_zygote_space_;
739
Mathieu Chartier590fee92013-09-13 13:46:47 -0700740 // Number of pinned primitive arrays in the movable space.
741 // Block all GC until this hits zero, or we hit the timeout!
742 size_t number_gc_blockers_;
743 static constexpr size_t KGCBlockTimeout = 30000;
744
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700745 // Guards access to the state of GC, associated conditional variable is used to signal when a GC
746 // completes.
747 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
748 UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
749
Mathieu Chartier39e32612013-11-12 16:28:05 -0800750 // Reference queues.
751 ReferenceQueue soft_reference_queue_;
752 ReferenceQueue weak_reference_queue_;
753 ReferenceQueue finalizer_reference_queue_;
754 ReferenceQueue phantom_reference_queue_;
755 ReferenceQueue cleared_references_;
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700756
Carl Shapiro58551df2011-07-24 03:09:51 -0700757 // True while the garbage collector is running.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700758 volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
759
760 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
Ian Rogers1d54e732013-05-02 21:10:01 -0700761 volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartierbdd0fb92013-07-02 10:16:15 -0700762 collector::GcType next_gc_type_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700763
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700764 // Maximum size that the heap can reach.
Ian Rogers1d54e732013-05-02 21:10:01 -0700765 const size_t capacity_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700766
Ian Rogers1d54e732013-05-02 21:10:01 -0700767 // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap
768 // programs it is "cleared" making it the same as capacity.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700769 size_t growth_limit_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700770
Ian Rogers1d54e732013-05-02 21:10:01 -0700771 // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating
772 // a GC should be triggered.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700773 size_t max_allowed_footprint_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700774
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700775 // The watermark at which a concurrent GC is requested by registerNativeAllocation.
776 size_t native_footprint_gc_watermark_;
Mathieu Chartier2775ee42013-08-20 17:43:47 -0700777
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700778 // The watermark at which a GC is performed inside of registerNativeAllocation.
779 size_t native_footprint_limit_;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700780
Mathieu Chartier590fee92013-09-13 13:46:47 -0700781 // Whether or not we need to run finalizers in the next native allocation.
782 bool native_need_to_run_finalization_;
783
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700784 // Whether or not we currently care about pause times.
Mathieu Chartierca2a24d2013-11-25 15:12:12 -0800785 ProcessState process_state_;
Mathieu Chartierc39e3422013-08-07 16:41:36 -0700786
Ian Rogers1d54e732013-05-02 21:10:01 -0700787 // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
788 // it completes ahead of an allocation failing.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700789 size_t concurrent_start_bytes_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700790
Ian Rogers1d54e732013-05-02 21:10:01 -0700791 // Since the heap was created, how many bytes have been freed.
792 size_t total_bytes_freed_ever_;
793
794 // Since the heap was created, how many objects have been freed.
795 size_t total_objects_freed_ever_;
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700796
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700797 // Number of bytes allocated. Adjusted after each allocation and free.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700798 AtomicInteger num_bytes_allocated_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700799
Mathieu Chartier987ccff2013-07-08 11:05:21 -0700800 // Bytes which are allocated and managed by native code but still need to be accounted for.
801 AtomicInteger native_bytes_allocated_;
802
Mathieu Chartier0a9dc052013-07-25 11:01:28 -0700803 // Data structure GC overhead.
804 AtomicInteger gc_memory_overhead_;
805
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700806 // Heap verification flags.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700807 const bool verify_missing_card_marks_;
808 const bool verify_system_weaks_;
809 const bool verify_pre_gc_heap_;
810 const bool verify_post_gc_heap_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700811 const bool verify_mod_union_table_;
812
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700813 // Parallel GC data structures.
814 UniquePtr<ThreadPool> thread_pool_;
815
Ian Rogers1d54e732013-05-02 21:10:01 -0700816 // The last time a heap trim occurred.
817 uint64_t last_trim_time_ms_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700818
Ian Rogers1d54e732013-05-02 21:10:01 -0700819 // The nanosecond time at which the last GC ended.
820 uint64_t last_gc_time_ns_;
Mathieu Chartier65db8802012-11-20 12:36:46 -0800821
822 // How many bytes were allocated at the end of the last GC.
823 uint64_t last_gc_size_;
824
Ian Rogers1d54e732013-05-02 21:10:01 -0700825 // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle
826 // and the start of the current one.
Mathieu Chartier65db8802012-11-20 12:36:46 -0800827 uint64_t allocation_rate_;
828
Ian Rogers1d54e732013-05-02 21:10:01 -0700829 // For a GC cycle, a bitmap that is set corresponding to the
830 UniquePtr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
831 UniquePtr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700832
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700833 // Mark stack that we reuse to avoid re-allocating the mark stack.
Ian Rogers1d54e732013-05-02 21:10:01 -0700834 UniquePtr<accounting::ObjectStack> mark_stack_;
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700835
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700836 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
837 // to use the live bitmap as the old mark bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700838 const size_t max_allocation_stack_size_;
Ian Rogers1d54e732013-05-02 21:10:01 -0700839 UniquePtr<accounting::ObjectStack> allocation_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700840
841 // Second allocation stack so that we can process allocation with the heap unlocked.
Ian Rogers1d54e732013-05-02 21:10:01 -0700842 UniquePtr<accounting::ObjectStack> live_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700843
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800844 // Allocator type.
Mathieu Chartier50482232013-11-21 11:48:14 -0800845 AllocatorType current_allocator_;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800846 const AllocatorType current_non_moving_allocator_;
847
848 // Which GCs we run in order when we an allocation fails.
849 std::vector<collector::GcType> gc_plan_;
850
Mathieu Chartier590fee92013-09-13 13:46:47 -0700851 // Bump pointer spaces.
852 space::BumpPointerSpace* bump_pointer_space_;
853 // Temp space is the space which the semispace collector copies to.
854 space::BumpPointerSpace* temp_space_;
855
Brian Carlstrom1f870082011-08-23 16:02:11 -0700856 // offset of java.lang.ref.Reference.referent
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800857 MemberOffset reference_referent_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700858 // offset of java.lang.ref.Reference.queue
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800859 MemberOffset reference_queue_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700860 // offset of java.lang.ref.Reference.queueNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800861 MemberOffset reference_queueNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700862 // offset of java.lang.ref.Reference.pendingNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800863 MemberOffset reference_pendingNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700864 // offset of java.lang.ref.FinalizerReference.zombie
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800865 MemberOffset finalizer_reference_zombie_offset_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700866
Mathieu Chartier0051be62012-10-12 17:47:11 -0700867 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
868 // utilization, regardless of target utilization ratio.
869 size_t min_free_;
870
871 // The ideal maximum free size, when we grow the heap for utilization.
872 size_t max_free_;
873
Brian Carlstrom395520e2011-09-25 19:35:00 -0700874 // Target ideal heap utilization ratio
Mathieu Chartier0051be62012-10-12 17:47:11 -0700875 double target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -0700876
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700877 // Total time which mutators are paused or waiting for GC to complete.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700878 uint64_t total_wait_time_;
879
880 // Total number of objects allocated in microseconds.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700881 AtomicInteger total_allocation_time_;
882
Ian Rogers04d7aa92013-03-16 14:29:17 -0700883 // The current state of heap verification, may be enabled or disabled.
884 HeapVerificationMode verify_object_mode_;
885
Mathieu Chartier1d27b342014-01-28 12:51:09 -0800886 // Compacting GC disable count, prevents compacting GC from running iff > 0.
887 size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_);
Mathieu Chartier590fee92013-09-13 13:46:47 -0700888
889 std::vector<collector::GarbageCollector*> garbage_collectors_;
890 collector::SemiSpace* semi_space_collector_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700891
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700892 const bool running_on_valgrind_;
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800893 const bool use_tlab_;
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700894
Ian Rogers1d54e732013-05-02 21:10:01 -0700895 friend class collector::MarkSweep;
Mathieu Chartier590fee92013-09-13 13:46:47 -0700896 friend class collector::SemiSpace;
Mathieu Chartier39e32612013-11-12 16:28:05 -0800897 friend class ReferenceQueue;
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700898 friend class VerifyReferenceCardVisitor;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700899 friend class VerifyReferenceVisitor;
900 friend class VerifyObjectVisitor;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700901 friend class ScopedHeapLock;
Ian Rogers1d54e732013-05-02 21:10:01 -0700902 friend class space::SpaceTest;
Ian Rogers30fab402012-01-23 15:43:46 -0800903
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700904 class AllocationTimer {
905 private:
906 Heap* heap_;
907 mirror::Object** allocated_obj_ptr_;
908 uint64_t allocation_start_time_;
909 public:
910 AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr);
911 ~AllocationTimer();
912 };
913
Carl Shapiro69759ea2011-07-21 18:13:35 -0700914 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
915};
916
Ian Rogers1d54e732013-05-02 21:10:01 -0700917} // namespace gc
Carl Shapiro1fb86202011-06-27 17:43:13 -0700918} // namespace art
919
Brian Carlstromfc0e3212013-07-17 14:40:12 -0700920#endif // ART_RUNTIME_GC_HEAP_H_