Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 16 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 17 | #ifndef ART_RUNTIME_GC_HEAP_H_ |
| 18 | #define ART_RUNTIME_GC_HEAP_H_ |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 19 | |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 20 | #include <iosfwd> |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 21 | #include <string> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 22 | #include <vector> |
| 23 | |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 24 | #include "atomic.h" |
Sameer Abu Asal | a843954 | 2013-02-14 16:06:42 -0800 | [diff] [blame] | 25 | #include "base/timing_logger.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 26 | #include "gc/accounting/atomic_stack.h" |
| 27 | #include "gc/accounting/card_table.h" |
Hiroshi Yamauchi | 6f4ffe4 | 2014-01-13 12:30:44 -0800 | [diff] [blame] | 28 | #include "gc/gc_cause.h" |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 29 | #include "gc/collector/gc_type.h" |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 30 | #include "gc/collector_type.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 31 | #include "globals.h" |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 32 | #include "gtest/gtest.h" |
Mathieu Chartier | c39e342 | 2013-08-07 16:41:36 -0700 | [diff] [blame] | 33 | #include "jni.h" |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 34 | #include "locks.h" |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 35 | #include "object_callbacks.h" |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 36 | #include "offsets.h" |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 37 | #include "reference_queue.h" |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 38 | #include "safe_map.h" |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 39 | #include "thread_pool.h" |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 40 | #include "verify_object.h" |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 41 | |
| 42 | namespace art { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 43 | |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 44 | class ConditionVariable; |
Ian Rogers | 81d425b | 2012-09-27 16:03:43 -0700 | [diff] [blame] | 45 | class Mutex; |
Ian Rogers | 40e3bac | 2012-11-20 00:09:14 -0800 | [diff] [blame] | 46 | class StackVisitor; |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 47 | class Thread; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 48 | class TimingLogger; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 49 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 50 | namespace mirror { |
| 51 | class Class; |
| 52 | class Object; |
| 53 | } // namespace mirror |
| 54 | |
| 55 | namespace gc { |
| 56 | namespace accounting { |
| 57 | class HeapBitmap; |
| 58 | class ModUnionTable; |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 59 | class ObjectSet; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 60 | } // namespace accounting |
| 61 | |
| 62 | namespace collector { |
| 63 | class GarbageCollector; |
| 64 | class MarkSweep; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 65 | class SemiSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 66 | } // namespace collector |
| 67 | |
| 68 | namespace space { |
| 69 | class AllocSpace; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 70 | class BumpPointerSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 71 | class DiscontinuousSpace; |
| 72 | class DlMallocSpace; |
| 73 | class ImageSpace; |
| 74 | class LargeObjectSpace; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 75 | class MallocSpace; |
| 76 | class RosAllocSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 77 | class Space; |
| 78 | class SpaceTest; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 79 | class ContinuousMemMapAllocSpace; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 80 | } // namespace space |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 81 | |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 82 | class AgeCardVisitor { |
| 83 | public: |
Brian Carlstrom | df62950 | 2013-07-17 22:39:56 -0700 | [diff] [blame] | 84 | byte operator()(byte card) const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 85 | if (card == accounting::CardTable::kCardDirty) { |
Mathieu Chartier | d22d548 | 2012-11-06 17:14:12 -0800 | [diff] [blame] | 86 | return card - 1; |
| 87 | } else { |
| 88 | return 0; |
| 89 | } |
| 90 | } |
| 91 | }; |
| 92 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 93 | // Different types of allocators. |
| 94 | enum AllocatorType { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 95 | kAllocatorTypeBumpPointer, // Use BumpPointer allocator, has entrypoints. |
| 96 | kAllocatorTypeTLAB, // Use TLAB allocator, has entrypoints. |
| 97 | kAllocatorTypeRosAlloc, // Use RosAlloc allocator, has entrypoints. |
| 98 | kAllocatorTypeDlMalloc, // Use dlmalloc allocator, has entrypoints. |
| 99 | kAllocatorTypeNonMoving, // Special allocator for non moving objects, doesn't have entrypoints. |
| 100 | kAllocatorTypeLOS, // Large object space, also doesn't have entrypoints. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 101 | }; |
| 102 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 103 | // If true, use rosalloc/RosAllocSpace instead of dlmalloc/DlMallocSpace |
Hiroshi Yamauchi | d31fb97 | 2013-11-19 11:08:27 -0800 | [diff] [blame] | 104 | static constexpr bool kUseRosAlloc = true; |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 105 | |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 106 | // If true, use thread-local allocation stack. |
| 107 | static constexpr bool kUseThreadLocalAllocationStack = true; |
| 108 | |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 109 | // The process state passed in from the activity manager, used to determine when to do trimming |
| 110 | // and compaction. |
| 111 | enum ProcessState { |
| 112 | kProcessStateJankPerceptible = 0, |
| 113 | kProcessStateJankImperceptible = 1, |
| 114 | }; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 115 | std::ostream& operator<<(std::ostream& os, const ProcessState& process_state); |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 116 | |
Ian Rogers | 50b35e2 | 2012-10-04 10:09:15 -0700 | [diff] [blame] | 117 | class Heap { |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 118 | public: |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 119 | // If true, measure the total allocation time. |
| 120 | static constexpr bool kMeasureAllocationTime = false; |
| 121 | // Primitive arrays larger than this size are put in the large object space. |
Mathieu Chartier | bd0a653 | 2014-02-27 11:14:21 -0800 | [diff] [blame] | 122 | static constexpr size_t kDefaultLargeObjectThreshold = 3 * kPageSize; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 123 | |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 124 | static constexpr size_t kDefaultInitialSize = 2 * MB; |
| 125 | static constexpr size_t kDefaultMaximumSize = 32 * MB; |
| 126 | static constexpr size_t kDefaultMaxFree = 2 * MB; |
| 127 | static constexpr size_t kDefaultMinFree = kDefaultMaxFree / 4; |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 128 | static constexpr size_t kDefaultLongPauseLogThreshold = MsToNs(5); |
| 129 | static constexpr size_t kDefaultLongGCLogThreshold = MsToNs(100); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 130 | static constexpr size_t kDefaultTLABSize = 256 * KB; |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 131 | |
| 132 | // Default target utilization. |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 133 | static constexpr double kDefaultTargetUtilization = 0.5; |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 134 | |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 135 | // Used so that we don't overflow the allocation time atomic integer. |
Mathieu Chartier | 720ef76 | 2013-08-17 14:46:54 -0700 | [diff] [blame] | 136 | static constexpr size_t kTimeAdjust = 1024; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 137 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 138 | // Create a heap with the requested sizes. The possible empty |
| 139 | // image_file_names names specify Spaces to load based on |
| 140 | // ImageWriter output. |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 141 | explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free, |
| 142 | size_t max_free, double target_utilization, size_t capacity, |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 143 | const std::string& original_image_file_name, |
| 144 | CollectorType post_zygote_collector_type, CollectorType background_collector_type, |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 145 | size_t parallel_gc_threads, size_t conc_gc_threads, bool low_memory_mode, |
Mathieu Chartier | ff3b24a | 2013-11-22 16:04:25 -0800 | [diff] [blame] | 146 | size_t long_pause_threshold, size_t long_gc_threshold, |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 147 | bool ignore_max_footprint, bool use_tlab, bool verify_pre_gc_heap, |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 148 | bool verify_post_gc_heap, bool verify_pre_gc_rosalloc, |
| 149 | bool verify_post_gc_rosalloc); |
Carl Shapiro | 61e019d | 2011-07-14 16:53:09 -0700 | [diff] [blame] | 150 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 151 | ~Heap(); |
Brian Carlstrom | a7f4f48 | 2011-07-17 17:01:34 -0700 | [diff] [blame] | 152 | |
Brian Carlstrom | a40f9bc | 2011-07-26 21:26:07 -0700 | [diff] [blame] | 153 | // Allocates and initializes storage for an object instance. |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 154 | template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor> |
| 155 | mirror::Object* AllocObject(Thread* self, mirror::Class* klass, size_t num_bytes, |
| 156 | const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 157 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 158 | return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 159 | GetCurrentAllocator(), |
| 160 | pre_fence_visitor); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 161 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 162 | |
| 163 | template <bool kInstrumented, typename PreFenceVisitor = VoidFunctor> |
| 164 | mirror::Object* AllocNonMovableObject(Thread* self, mirror::Class* klass, size_t num_bytes, |
| 165 | const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 166 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 167 | return AllocObjectWithAllocator<kInstrumented, true>(self, klass, num_bytes, |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 168 | GetCurrentNonMovingAllocator(), |
| 169 | pre_fence_visitor); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 170 | } |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 171 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 172 | template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor = VoidFunctor> |
Mathieu Chartier | 1febddf | 2013-11-20 12:33:14 -0800 | [diff] [blame] | 173 | ALWAYS_INLINE mirror::Object* AllocObjectWithAllocator( |
| 174 | Thread* self, mirror::Class* klass, size_t byte_count, AllocatorType allocator, |
| 175 | const PreFenceVisitor& pre_fence_visitor = VoidFunctor()) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 176 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 177 | |
| 178 | AllocatorType GetCurrentAllocator() const { |
| 179 | return current_allocator_; |
| 180 | } |
| 181 | |
| 182 | AllocatorType GetCurrentNonMovingAllocator() const { |
| 183 | return current_non_moving_allocator_; |
| 184 | } |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 185 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 186 | // Visit all of the live objects in the heap. |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 187 | void VisitObjects(ObjectCallback callback, void* arg) |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 188 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
| 189 | |
| 190 | void SwapSemiSpaces() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 191 | |
| 192 | void DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 193 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 194 | void ThrowOutOfMemoryError(size_t byte_count, bool large_object_allocation); |
Brian Carlstrom | a7f4f48 | 2011-07-17 17:01:34 -0700 | [diff] [blame] | 195 | |
Ian Rogers | 1eb512d | 2013-10-18 15:42:20 -0700 | [diff] [blame] | 196 | void RegisterNativeAllocation(JNIEnv* env, int bytes); |
| 197 | void RegisterNativeFree(JNIEnv* env, int bytes); |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 198 | |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 199 | // Change the allocator, updates entrypoints. |
Mathieu Chartier | d889178 | 2014-03-02 13:28:37 -0800 | [diff] [blame] | 200 | void ChangeAllocator(AllocatorType allocator) |
| 201 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 202 | LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 203 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 204 | // Transition the garbage collector during runtime, may copy objects from one space to another. |
| 205 | void TransitionCollector(CollectorType collector_type); |
| 206 | |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 207 | // Change the collector to be one of the possible options (MS, CMS, SS). |
Mathieu Chartier | d889178 | 2014-03-02 13:28:37 -0800 | [diff] [blame] | 208 | void ChangeCollector(CollectorType collector_type) |
| 209 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 210 | |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 211 | // The given reference is believed to be to an object in the Java heap, check the soundness of it. |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 212 | // TODO: NO_THREAD_SAFETY_ANALYSIS since we call this everywhere and it is impossible to find a |
| 213 | // proper lock ordering for it. |
| 214 | void VerifyObjectBody(mirror::Object* o) NO_THREAD_SAFETY_ANALYSIS; |
Ian Rogers | 408f79a | 2011-08-23 18:22:33 -0700 | [diff] [blame] | 215 | |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 216 | // Check sanity of all live references. |
Ian Rogers | f0bbeab | 2012-10-10 18:26:27 -0700 | [diff] [blame] | 217 | void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 218 | bool VerifyHeapReferences() |
Mathieu Chartier | c22c59e | 2014-02-24 15:16:06 -0800 | [diff] [blame] | 219 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 220 | bool VerifyMissingCardMarks() |
Mathieu Chartier | c22c59e | 2014-02-24 15:16:06 -0800 | [diff] [blame] | 221 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 222 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 223 | // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 224 | // and doesn't abort on error, allowing the caller to report more |
| 225 | // meaningful diagnostics. |
Mathieu Chartier | d68ac70 | 2014-02-11 14:50:51 -0800 | [diff] [blame] | 226 | bool IsValidObjectAddress(const mirror::Object* obj) const |
| 227 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 228 | |
Mathieu Chartier | d68ac70 | 2014-02-11 14:50:51 -0800 | [diff] [blame] | 229 | // Faster alternative to IsHeapAddress since finding if an object is in the large object space is |
| 230 | // very slow. |
| 231 | bool IsNonDiscontinuousSpaceHeapAddress(const mirror::Object* obj) const |
| 232 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 233 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 234 | // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). |
| 235 | // Requires the heap lock to be held. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 236 | bool IsLiveObjectLocked(mirror::Object* obj, bool search_allocation_stack = true, |
Mathieu Chartier | 0f72e41 | 2013-09-06 16:40:01 -0700 | [diff] [blame] | 237 | bool search_live_stack = true, bool sorted = false) |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 238 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_); |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 239 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 240 | // Returns true if there is any chance that the object (obj) will move. |
| 241 | bool IsMovableObject(const mirror::Object* obj) const; |
| 242 | |
| 243 | // Returns true if an object is in the temp space, if this happens its usually indicative of |
| 244 | // compaction related errors. |
| 245 | bool IsInTempSpace(const mirror::Object* obj) const; |
| 246 | |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 247 | // Enables us to compacting GC until objects are released. |
| 248 | void IncrementDisableMovingGC(Thread* self); |
| 249 | void DecrementDisableMovingGC(Thread* self); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 250 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 251 | // Initiates an explicit garbage collection. |
Mathieu Chartier | 412c7fc | 2014-02-07 12:18:39 -0800 | [diff] [blame] | 252 | void CollectGarbage(bool clear_soft_references); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 253 | |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 254 | // Does a concurrent GC, should only be called by the GC daemon thread |
| 255 | // through runtime. |
Ian Rogers | f0bbeab | 2012-10-10 18:26:27 -0700 | [diff] [blame] | 256 | void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 257 | |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 258 | // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount. |
| 259 | // The boolean decides whether to use IsAssignableFrom or == when comparing classes. |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 260 | void CountInstances(const std::vector<mirror::Class*>& classes, bool use_is_assignable_from, |
Elliott Hughes | ec0f83d | 2013-01-15 16:54:08 -0800 | [diff] [blame] | 261 | uint64_t* counts) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 262 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) |
| 263 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 264 | // Implements JDWP RT_Instances. |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 265 | void GetInstances(mirror::Class* c, int32_t max_count, std::vector<mirror::Object*>& instances) |
Elliott Hughes | 3b78c94 | 2013-01-15 17:35:41 -0800 | [diff] [blame] | 266 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) |
| 267 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 268 | // Implements JDWP OR_ReferringObjects. |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 269 | void GetReferringObjects(mirror::Object* o, int32_t max_count, std::vector<mirror::Object*>& referring_objects) |
Elliott Hughes | 0cbaff5 | 2013-01-16 15:28:01 -0800 | [diff] [blame] | 270 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) |
| 271 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 272 | |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 273 | // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to |
| 274 | // implement dalvik.system.VMRuntime.clearGrowthLimit. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 275 | void ClearGrowthLimit(); |
jeffhao | c116070 | 2011-10-27 15:48:45 -0700 | [diff] [blame] | 276 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 277 | // Target ideal heap utilization ratio, implements |
| 278 | // dalvik.system.VMRuntime.getTargetHeapUtilization. |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 279 | double GetTargetHeapUtilization() const { |
| 280 | return target_utilization_; |
| 281 | } |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 282 | |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 283 | // Data structure memory usage tracking. |
| 284 | void RegisterGCAllocation(size_t bytes); |
| 285 | void RegisterGCDeAllocation(size_t bytes); |
| 286 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 287 | // Set target ideal heap utilization ratio, implements |
| 288 | // dalvik.system.VMRuntime.setTargetHeapUtilization. |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 289 | void SetTargetHeapUtilization(float target); |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 290 | |
| 291 | // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate |
| 292 | // from the system. Doesn't allow the space to exceed its growth limit. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 293 | void SetIdealFootprint(size_t max_allowed_footprint); |
Elliott Hughes | 7ede61e | 2011-09-14 18:18:06 -0700 | [diff] [blame] | 294 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 295 | // Blocks the caller until the garbage collector becomes idle and returns the type of GC we |
| 296 | // waited for. |
| 297 | collector::GcType WaitForGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 298 | |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 299 | // Update the heap's process state to a new value, may cause compaction to occur. |
| 300 | void UpdateProcessState(ProcessState process_state); |
| 301 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 302 | const std::vector<space::ContinuousSpace*>& GetContinuousSpaces() const { |
| 303 | return continuous_spaces_; |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 304 | } |
| 305 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 306 | const std::vector<space::DiscontinuousSpace*>& GetDiscontinuousSpaces() const { |
| 307 | return discontinuous_spaces_; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 308 | } |
Carl Shapiro | 61e019d | 2011-07-14 16:53:09 -0700 | [diff] [blame] | 309 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 310 | void SetReferenceOffsets(MemberOffset reference_referent_offset, |
| 311 | MemberOffset reference_queue_offset, |
| 312 | MemberOffset reference_queueNext_offset, |
| 313 | MemberOffset reference_pendingNext_offset, |
| 314 | MemberOffset finalizer_reference_zombie_offset); |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 315 | MemberOffset GetReferenceReferentOffset() const { |
| 316 | return reference_referent_offset_; |
| 317 | } |
| 318 | MemberOffset GetReferenceQueueOffset() const { |
| 319 | return reference_queue_offset_; |
| 320 | } |
| 321 | MemberOffset GetReferenceQueueNextOffset() const { |
| 322 | return reference_queueNext_offset_; |
| 323 | } |
| 324 | MemberOffset GetReferencePendingNextOffset() const { |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 325 | return reference_pendingNext_offset_; |
| 326 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 327 | MemberOffset GetFinalizerReferenceZombieOffset() const { |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 328 | return finalizer_reference_zombie_offset_; |
| 329 | } |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 330 | static mirror::Object* PreserveSoftReferenceCallback(mirror::Object* obj, void* arg); |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 331 | void ProcessReferences(TimingLogger& timings, bool clear_soft, |
| 332 | IsMarkedCallback* is_marked_callback, |
Mathieu Chartier | 3bb57c7 | 2014-02-18 11:38:45 -0800 | [diff] [blame] | 333 | MarkObjectCallback* mark_object_callback, |
| 334 | ProcessMarkStackCallback* process_mark_stack_callback, |
| 335 | void* arg) |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 336 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 337 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 338 | |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 339 | // Enable verification of object references when the runtime is sufficiently initialized. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 340 | void EnableObjectValidation() { |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 341 | verify_object_mode_ = kVerifyObjectSupport; |
| 342 | if (verify_object_mode_ > kVerifyObjectModeDisabled) { |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 343 | VerifyHeap(); |
| 344 | } |
Elliott Hughes | 85d1545 | 2011-09-16 17:33:01 -0700 | [diff] [blame] | 345 | } |
| 346 | |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 347 | // Disable object reference verification for image writing. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 348 | void DisableObjectValidation() { |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 349 | verify_object_mode_ = kVerifyObjectModeDisabled; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 350 | } |
| 351 | |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 352 | // Other checks may be performed if we know the heap should be in a sane state. |
Ian Rogers | 23435d0 | 2012-09-24 11:23:12 -0700 | [diff] [blame] | 353 | bool IsObjectValidationEnabled() const { |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 354 | return verify_object_mode_ > kVerifyObjectModeDisabled; |
Ian Rogers | 23435d0 | 2012-09-24 11:23:12 -0700 | [diff] [blame] | 355 | } |
| 356 | |
Mathieu Chartier | e0a53e9 | 2013-08-05 10:17:40 -0700 | [diff] [blame] | 357 | // Returns true if low memory mode is enabled. |
| 358 | bool IsLowMemoryMode() const { |
| 359 | return low_memory_mode_; |
| 360 | } |
| 361 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 362 | // Freed bytes can be negative in cases where we copy objects from a compacted space to a |
| 363 | // free-list backed space. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 364 | void RecordFree(size_t freed_objects, size_t freed_bytes); |
Brian Carlstrom | 693267a | 2011-09-06 09:25:34 -0700 | [diff] [blame] | 365 | |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 366 | // Must be called if a field of an Object in the heap changes, and before any GC safe-point. |
| 367 | // The call is not needed if NULL is stored in the field. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 368 | void WriteBarrierField(const mirror::Object* dst, MemberOffset /*offset*/, |
| 369 | const mirror::Object* /*new_value*/) { |
Mathieu Chartier | e35517a | 2012-10-30 18:49:55 -0700 | [diff] [blame] | 370 | card_table_->MarkCard(dst); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | // Write barrier for array operations that update many field positions |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 374 | void WriteBarrierArray(const mirror::Object* dst, int /*start_offset*/, |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 375 | size_t /*length TODO: element_count or byte_count?*/) { |
Mathieu Chartier | e35517a | 2012-10-30 18:49:55 -0700 | [diff] [blame] | 376 | card_table_->MarkCard(dst); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 377 | } |
| 378 | |
Mathieu Chartier | 0732d59 | 2013-11-06 11:02:50 -0800 | [diff] [blame] | 379 | void WriteBarrierEveryFieldOf(const mirror::Object* obj) { |
| 380 | card_table_->MarkCard(obj); |
| 381 | } |
| 382 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 383 | accounting::CardTable* GetCardTable() const { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 384 | return card_table_.get(); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 385 | } |
| 386 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 387 | void AddFinalizerReference(Thread* self, mirror::Object* object); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 388 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 389 | // Returns the number of bytes currently allocated. |
| 390 | size_t GetBytesAllocated() const { |
| 391 | return num_bytes_allocated_; |
| 392 | } |
| 393 | |
| 394 | // Returns the number of objects currently allocated. |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 395 | size_t GetObjectsAllocated() const LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 396 | |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 397 | // Returns the total number of objects allocated since the heap was created. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 398 | size_t GetObjectsAllocatedEver() const; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 399 | |
| 400 | // Returns the total number of bytes allocated since the heap was created. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 401 | size_t GetBytesAllocatedEver() const; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 402 | |
| 403 | // Returns the total number of objects freed since the heap was created. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 404 | size_t GetObjectsFreedEver() const { |
| 405 | return total_objects_freed_ever_; |
| 406 | } |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 407 | |
| 408 | // Returns the total number of bytes freed since the heap was created. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 409 | size_t GetBytesFreedEver() const { |
| 410 | return total_bytes_freed_ever_; |
| 411 | } |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 412 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 413 | // Implements java.lang.Runtime.maxMemory, returning the maximum amount of memory a program can |
| 414 | // consume. For a regular VM this would relate to the -Xmx option and would return -1 if no Xmx |
| 415 | // were specified. Android apps start with a growth limit (small heap size) which is |
| 416 | // cleared/extended for large apps. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 417 | size_t GetMaxMemory() const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 418 | return growth_limit_; |
| 419 | } |
| 420 | |
| 421 | // Implements java.lang.Runtime.totalMemory, returning the amount of memory consumed by an |
| 422 | // application. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 423 | size_t GetTotalMemory() const; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 424 | |
| 425 | // Implements java.lang.Runtime.freeMemory. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 426 | size_t GetFreeMemory() const { |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 427 | return GetTotalMemory() - num_bytes_allocated_; |
| 428 | } |
| 429 | |
| 430 | // Get the space that corresponds to an object's address. Current implementation searches all |
| 431 | // spaces in turn. If fail_ok is false then failing to find a space will cause an abort. |
| 432 | // TODO: consider using faster data structure like binary tree. |
| 433 | space::ContinuousSpace* FindContinuousSpaceFromObject(const mirror::Object*, bool fail_ok) const; |
| 434 | space::DiscontinuousSpace* FindDiscontinuousSpaceFromObject(const mirror::Object*, |
| 435 | bool fail_ok) const; |
| 436 | space::Space* FindSpaceFromObject(const mirror::Object*, bool fail_ok) const; |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 437 | |
Mathieu Chartier | 037813d | 2012-08-23 16:44:59 -0700 | [diff] [blame] | 438 | void DumpForSigQuit(std::ostream& os); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 439 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 440 | // Trim the managed and native heaps by releasing unused memory back to the OS. |
| 441 | void Trim(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 442 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 443 | void RevokeThreadLocalBuffers(Thread* thread); |
| 444 | void RevokeAllThreadLocalBuffers(); |
| 445 | |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 446 | void PreGcRosAllocVerification(TimingLogger* timings) |
| 447 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 448 | void PostGcRosAllocVerification(TimingLogger* timings) |
| 449 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 450 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 451 | accounting::HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 452 | return live_bitmap_.get(); |
| 453 | } |
| 454 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 455 | accounting::HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 456 | return mark_bitmap_.get(); |
| 457 | } |
| 458 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 459 | accounting::ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 460 | return live_stack_.get(); |
| 461 | } |
| 462 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 463 | void PreZygoteFork() NO_THREAD_SAFETY_ANALYSIS; |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 464 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 465 | // Mark and empty stack. |
| 466 | void FlushAllocStack() |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 467 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 468 | |
Hiroshi Yamauchi | 90d7068 | 2014-02-20 16:17:30 -0800 | [diff] [blame] | 469 | // Revoke all the thread-local allocation stacks. |
Mathieu Chartier | c22c59e | 2014-02-24 15:16:06 -0800 | [diff] [blame] | 470 | void RevokeAllThreadLocalAllocationStacks(Thread* self) |
Mathieu Chartier | d889178 | 2014-03-02 13:28:37 -0800 | [diff] [blame] | 471 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_) |
| 472 | LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_, Locks::thread_list_lock_); |
Hiroshi Yamauchi | 90d7068 | 2014-02-20 16:17:30 -0800 | [diff] [blame] | 473 | |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 474 | // Mark all the objects in the allocation stack in the specified bitmap. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 475 | void MarkAllocStack(accounting::SpaceBitmap* bitmap1, accounting::SpaceBitmap* bitmap2, |
Mathieu Chartier | db7f37d | 2014-01-10 11:09:06 -0800 | [diff] [blame] | 476 | accounting::ObjectSet* large_objects, accounting::ObjectStack* stack) |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 477 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 478 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 479 | // Mark the specified allocation stack as live. |
| 480 | void MarkAllocStackAsLive(accounting::ObjectStack* stack) |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 481 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 8235331 | 2013-07-18 10:47:51 -0700 | [diff] [blame] | 482 | |
Mathieu Chartier | a1602f2 | 2014-01-13 17:19:19 -0800 | [diff] [blame] | 483 | // Unbind any bound bitmaps. |
| 484 | void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
| 485 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 486 | // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 487 | // Assumes there is only one image space. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 488 | space::ImageSpace* GetImageSpace() const; |
| 489 | |
Mathieu Chartier | 6dda898 | 2014-03-06 11:11:48 -0800 | [diff] [blame] | 490 | // Permenantly disable compaction. |
| 491 | void DisableCompaction(); |
| 492 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 493 | space::DlMallocSpace* GetDlMallocSpace() const { |
| 494 | return dlmalloc_space_; |
| 495 | } |
| 496 | |
| 497 | space::RosAllocSpace* GetRosAllocSpace() const { |
| 498 | return rosalloc_space_; |
| 499 | } |
| 500 | |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 501 | space::MallocSpace* GetNonMovingSpace() const { |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 502 | return non_moving_space_; |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 503 | } |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 504 | |
| 505 | space::LargeObjectSpace* GetLargeObjectsSpace() const { |
| 506 | return large_object_space_; |
| 507 | } |
| 508 | |
Hiroshi Yamauchi | 05e713a | 2014-01-09 13:24:51 -0800 | [diff] [blame] | 509 | // Returns the free list space that may contain movable objects (the |
| 510 | // one that's not the non-moving space), either rosalloc_space_ or |
| 511 | // dlmalloc_space_. |
| 512 | space::MallocSpace* GetPrimaryFreeListSpace() { |
| 513 | if (kUseRosAlloc) { |
| 514 | DCHECK(rosalloc_space_ != nullptr); |
| 515 | // reinterpret_cast is necessary as the space class hierarchy |
| 516 | // isn't known (#included) yet here. |
| 517 | return reinterpret_cast<space::MallocSpace*>(rosalloc_space_); |
| 518 | } else { |
| 519 | DCHECK(dlmalloc_space_ != nullptr); |
| 520 | return reinterpret_cast<space::MallocSpace*>(dlmalloc_space_); |
| 521 | } |
| 522 | } |
| 523 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 524 | void DumpSpaces(std::ostream& stream = LOG(INFO)); |
Elliott Hughes | f834936 | 2012-06-18 15:00:06 -0700 | [diff] [blame] | 525 | |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 526 | // Dump object should only be used by the signal handler. |
| 527 | void DumpObject(std::ostream& stream, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; |
| 528 | // Safe version of pretty type of which check to make sure objects are heap addresses. |
| 529 | std::string SafeGetClassDescriptor(mirror::Class* klass) NO_THREAD_SAFETY_ANALYSIS; |
| 530 | std::string SafePrettyTypeOf(mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS; |
| 531 | |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 532 | // GC performance measuring |
Elliott Hughes | 8b788fe | 2013-04-17 15:57:01 -0700 | [diff] [blame] | 533 | void DumpGcPerformanceInfo(std::ostream& os); |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 534 | |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 535 | // Returns true if we currently care about pause times. |
| 536 | bool CareAboutPauseTimes() const { |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 537 | return process_state_ == kProcessStateJankPerceptible; |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 538 | } |
| 539 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 540 | // Thread pool. |
| 541 | void CreateThreadPool(); |
| 542 | void DeleteThreadPool(); |
| 543 | ThreadPool* GetThreadPool() { |
| 544 | return thread_pool_.get(); |
| 545 | } |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 546 | size_t GetParallelGCThreadCount() const { |
| 547 | return parallel_gc_threads_; |
| 548 | } |
| 549 | size_t GetConcGCThreadCount() const { |
| 550 | return conc_gc_threads_; |
| 551 | } |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 552 | accounting::ModUnionTable* FindModUnionTableFromSpace(space::Space* space); |
| 553 | void AddModUnionTable(accounting::ModUnionTable* mod_union_table); |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 554 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 555 | bool IsCompilingBoot() const; |
| 556 | bool HasImageSpace() const; |
| 557 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 558 | private: |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 559 | void Compact(space::ContinuousMemMapAllocSpace* target_space, |
| 560 | space::ContinuousMemMapAllocSpace* source_space); |
| 561 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 562 | void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); |
| 563 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 564 | static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { |
| 565 | return |
| 566 | allocator_type != kAllocatorTypeBumpPointer && |
| 567 | allocator_type != kAllocatorTypeTLAB; |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 568 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 569 | static ALWAYS_INLINE bool AllocatorMayHaveConcurrentGC(AllocatorType allocator_type) { |
| 570 | return AllocatorHasAllocationStack(allocator_type); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 571 | } |
Mathieu Chartier | 9be9a7a | 2014-01-24 14:07:33 -0800 | [diff] [blame] | 572 | static bool IsCompactingGC(CollectorType collector_type) { |
| 573 | return collector_type == kCollectorTypeSS || collector_type == kCollectorTypeGSS; |
| 574 | } |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 575 | bool ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const |
| 576 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 577 | ALWAYS_INLINE void CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, |
Mathieu Chartier | f517f1a | 2014-03-06 15:52:27 -0800 | [diff] [blame^] | 578 | mirror::Object** obj) |
| 579 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 580 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 581 | // We don't force this to be inlined since it is a slow path. |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 582 | template <bool kInstrumented, typename PreFenceVisitor> |
| 583 | mirror::Object* AllocLargeObject(Thread* self, mirror::Class* klass, size_t byte_count, |
| 584 | const PreFenceVisitor& pre_fence_visitor) |
| 585 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 586 | |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 587 | // Handles Allocate()'s slow allocation path with GC involved after |
| 588 | // an initial allocation attempt failed. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 589 | mirror::Object* AllocateInternalWithGc(Thread* self, AllocatorType allocator, size_t num_bytes, |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 590 | size_t* bytes_allocated, size_t* usable_size, |
| 591 | mirror::Class** klass) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 592 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) |
| 593 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | a639903 | 2012-06-11 18:49:50 -0700 | [diff] [blame] | 594 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 595 | // Allocate into a specific space. |
| 596 | mirror::Object* AllocateInto(Thread* self, space::AllocSpace* space, mirror::Class* c, |
| 597 | size_t bytes) |
| 598 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 599 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 600 | // Try to allocate a number of bytes, this function never does any GCs. Needs to be inlined so |
| 601 | // that the switch statement is constant optimized in the entrypoints. |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 602 | template <const bool kInstrumented, const bool kGrow> |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 603 | ALWAYS_INLINE mirror::Object* TryToAllocate(Thread* self, AllocatorType allocator_type, |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 604 | size_t alloc_size, size_t* bytes_allocated, |
| 605 | size_t* usable_size) |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 606 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 607 | |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 608 | void ThrowOutOfMemoryError(Thread* self, size_t byte_count, bool large_object_allocation) |
| 609 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 610 | |
| 611 | template <bool kGrow> |
| 612 | bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size); |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 613 | |
Mathieu Chartier | 15d3402 | 2014-02-26 17:16:38 -0800 | [diff] [blame] | 614 | // Returns true if the address passed in is within the address range of a continuous space. |
| 615 | bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const |
| 616 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 617 | |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 618 | // Pushes a list of cleared references out to the managed heap. |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 619 | void SetReferenceReferent(mirror::Object* reference, mirror::Object* referent) |
| 620 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 621 | mirror::Object* GetReferenceReferent(mirror::Object* reference) |
| 622 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 623 | void ClearReferenceReferent(mirror::Object* reference) |
| 624 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { |
| 625 | SetReferenceReferent(reference, nullptr); |
| 626 | } |
| 627 | void EnqueueClearedReferences(); |
| 628 | // Returns true if the reference object has not yet been enqueued. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 629 | bool IsEnqueuable(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 630 | bool IsEnqueued(mirror::Object* ref) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | 83c8ee0 | 2014-01-28 14:50:23 -0800 | [diff] [blame] | 631 | void DelayReferenceReferent(mirror::Class* klass, mirror::Object* obj, |
| 632 | IsMarkedCallback is_marked_callback, void* arg) |
| 633 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 634 | |
| 635 | // Run the finalizers. |
| 636 | void RunFinalization(JNIEnv* env); |
| 637 | |
| 638 | // Blocks the caller until the garbage collector becomes idle and returns the type of GC we |
| 639 | // waited for. |
| 640 | collector::GcType WaitForGcToCompleteLocked(Thread* self) |
| 641 | EXCLUSIVE_LOCKS_REQUIRED(gc_complete_lock_); |
| 642 | |
Ian Rogers | f0bbeab | 2012-10-10 18:26:27 -0700 | [diff] [blame] | 643 | void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); |
| 644 | void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_); |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 645 | bool IsGCRequestPending() const; |
Elliott Hughes | 8cf5bc0 | 2012-02-02 16:32:16 -0800 | [diff] [blame] | 646 | |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 647 | // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns |
| 648 | // which type of Gc was actually ran. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 649 | collector::GcType CollectGarbageInternal(collector::GcType gc_plan, GcCause gc_cause, |
| 650 | bool clear_soft_references) |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 651 | LOCKS_EXCLUDED(gc_complete_lock_, |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 652 | Locks::heap_bitmap_lock_, |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 653 | Locks::thread_suspend_count_lock_); |
Mathieu Chartier | 2b82db4 | 2012-11-14 17:29:05 -0800 | [diff] [blame] | 654 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 655 | void PreGcVerification(collector::GarbageCollector* gc); |
| 656 | void PreSweepingGcVerification(collector::GarbageCollector* gc) |
Mathieu Chartier | ad2541a | 2013-10-25 10:05:23 -0700 | [diff] [blame] | 657 | EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 658 | void PostGcVerification(collector::GarbageCollector* gc) |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 659 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 660 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 661 | // Update the watermark for the native allocated bytes based on the current number of native |
| 662 | // bytes allocated and the target utilization ratio. |
| 663 | void UpdateMaxNativeFootprint(); |
| 664 | |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 665 | // Given the current contents of the alloc space, increase the allowed heap footprint to match |
| 666 | // the target utilization ratio. This should only be called immediately after a full garbage |
| 667 | // collection. |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 668 | void GrowForUtilization(collector::GcType gc_type, uint64_t gc_duration); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 669 | |
Mathieu Chartier | 637e348 | 2012-08-17 10:41:32 -0700 | [diff] [blame] | 670 | size_t GetPercentFree(); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 671 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 672 | void AddSpace(space::Space* space, bool set_as_default = true) |
| 673 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
| 674 | void RemoveSpace(space::Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 675 | |
Ian Rogers | 2dd0e2c | 2013-01-24 12:42:14 -0800 | [diff] [blame] | 676 | static void VerificationCallback(mirror::Object* obj, void* arg) |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 677 | SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 678 | |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 679 | // Swap the allocation stack with the live stack. |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 680 | void SwapStacks(Thread* self); |
| 681 | |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 682 | // Clear cards and update the mod union table. |
Ian Rogers | 5fe9af7 | 2013-11-14 00:17:20 -0800 | [diff] [blame] | 683 | void ProcessCards(TimingLogger& timings); |
Mathieu Chartier | 7469ebf | 2012-09-24 16:28:36 -0700 | [diff] [blame] | 684 | |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 685 | // Push an object onto the allocation stack. |
| 686 | void PushOnAllocationStack(Thread* self, mirror::Object* obj); |
| 687 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 688 | // All-known continuous spaces, where objects lie within fixed bounds. |
| 689 | std::vector<space::ContinuousSpace*> continuous_spaces_; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 690 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 691 | // All-known discontinuous spaces, where objects may be placed throughout virtual memory. |
| 692 | std::vector<space::DiscontinuousSpace*> discontinuous_spaces_; |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 693 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 694 | // All-known alloc spaces, where objects may be or have been allocated. |
| 695 | std::vector<space::AllocSpace*> alloc_spaces_; |
| 696 | |
| 697 | // A space where non-movable objects are allocated, when compaction is enabled it contains |
| 698 | // Classes, ArtMethods, ArtFields, and non moving objects. |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 699 | space::MallocSpace* non_moving_space_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 700 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 701 | // Space which we use for the kAllocatorTypeROSAlloc. |
| 702 | space::RosAllocSpace* rosalloc_space_; |
| 703 | |
| 704 | // Space which we use for the kAllocatorTypeDlMalloc. |
| 705 | space::DlMallocSpace* dlmalloc_space_; |
| 706 | |
Mathieu Chartier | fc5b528 | 2014-01-09 16:15:36 -0800 | [diff] [blame] | 707 | // The main space is the space which the GC copies to and from on process state updates. This |
| 708 | // space is typically either the dlmalloc_space_ or the rosalloc_space_. |
| 709 | space::MallocSpace* main_space_; |
| 710 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 711 | // The large object space we are currently allocating into. |
| 712 | space::LargeObjectSpace* large_object_space_; |
| 713 | |
| 714 | // The card table, dirtied by the write barrier. |
| 715 | UniquePtr<accounting::CardTable> card_table_; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 716 | |
Mathieu Chartier | 11409ae | 2013-09-23 11:49:36 -0700 | [diff] [blame] | 717 | // A mod-union table remembers all of the references from the it's space to other spaces. |
| 718 | SafeMap<space::Space*, accounting::ModUnionTable*> mod_union_tables_; |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 719 | |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 720 | // Keep the free list allocator mem map lying around when we transition to background so that we |
| 721 | // don't have to worry about virtual address space fragmentation. |
| 722 | UniquePtr<MemMap> allocator_mem_map_; |
| 723 | |
Mathieu Chartier | 938a03b | 2014-01-16 15:10:31 -0800 | [diff] [blame] | 724 | // The mem-map which we will use for the non-moving space after the zygote is done forking: |
| 725 | UniquePtr<MemMap> post_zygote_non_moving_space_mem_map_; |
| 726 | |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 727 | // What kind of concurrency behavior is the runtime after? Currently true for concurrent mark |
| 728 | // sweep GC, false for other GC types. |
| 729 | bool concurrent_gc_; |
Mathieu Chartier | 0de9f73 | 2013-11-22 17:58:48 -0800 | [diff] [blame] | 730 | |
| 731 | // The current collector type. |
| 732 | CollectorType collector_type_; |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 733 | // Which collector we will switch to after zygote fork. |
| 734 | CollectorType post_zygote_collector_type_; |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 735 | // Which collector we will use when the app is notified of a transition to background. |
| 736 | CollectorType background_collector_type_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 737 | |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 738 | // How many GC threads we may use for paused parts of garbage collection. |
| 739 | const size_t parallel_gc_threads_; |
| 740 | |
| 741 | // How many GC threads we may use for unpaused parts of garbage collection. |
| 742 | const size_t conc_gc_threads_; |
Mathieu Chartier | 63a5434 | 2013-07-23 13:17:59 -0700 | [diff] [blame] | 743 | |
Mathieu Chartier | e0a53e9 | 2013-08-05 10:17:40 -0700 | [diff] [blame] | 744 | // Boolean for if we are in low memory mode. |
| 745 | const bool low_memory_mode_; |
| 746 | |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 747 | // If we get a pause longer than long pause log threshold, then we print out the GC after it |
| 748 | // finishes. |
| 749 | const size_t long_pause_log_threshold_; |
| 750 | |
| 751 | // If we get a GC longer than long GC log threshold, then we print out the GC after it finishes. |
| 752 | const size_t long_gc_log_threshold_; |
| 753 | |
| 754 | // If we ignore the max footprint it lets the heap grow until it hits the heap capacity, this is |
| 755 | // useful for benchmarking since it reduces time spent in GC to a low %. |
| 756 | const bool ignore_max_footprint_; |
| 757 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 758 | // If we have a zygote space. |
| 759 | bool have_zygote_space_; |
| 760 | |
Mathieu Chartier | bd0a653 | 2014-02-27 11:14:21 -0800 | [diff] [blame] | 761 | // Minimum allocation size of large object. |
| 762 | size_t large_object_threshold_; |
| 763 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 764 | // Guards access to the state of GC, associated conditional variable is used to signal when a GC |
| 765 | // completes. |
| 766 | Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 767 | UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_); |
| 768 | |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 769 | // Reference queues. |
| 770 | ReferenceQueue soft_reference_queue_; |
| 771 | ReferenceQueue weak_reference_queue_; |
| 772 | ReferenceQueue finalizer_reference_queue_; |
| 773 | ReferenceQueue phantom_reference_queue_; |
| 774 | ReferenceQueue cleared_references_; |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 775 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 776 | // True while the garbage collector is running. |
Mathieu Chartier | d5a89ee | 2014-01-31 09:55:13 -0800 | [diff] [blame] | 777 | volatile CollectorType collector_type_running_ GUARDED_BY(gc_complete_lock_); |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 778 | |
| 779 | // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 780 | volatile collector::GcType last_gc_type_ GUARDED_BY(gc_complete_lock_); |
Mathieu Chartier | bdd0fb9 | 2013-07-02 10:16:15 -0700 | [diff] [blame] | 781 | collector::GcType next_gc_type_; |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 782 | |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 783 | // Maximum size that the heap can reach. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 784 | const size_t capacity_; |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 785 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 786 | // The size the heap is limited to. This is initially smaller than capacity, but for largeHeap |
| 787 | // programs it is "cleared" making it the same as capacity. |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 788 | size_t growth_limit_; |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 789 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 790 | // When the number of bytes allocated exceeds the footprint TryAllocate returns NULL indicating |
| 791 | // a GC should be triggered. |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 792 | size_t max_allowed_footprint_; |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 793 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 794 | // The watermark at which a concurrent GC is requested by registerNativeAllocation. |
| 795 | size_t native_footprint_gc_watermark_; |
Mathieu Chartier | 2775ee4 | 2013-08-20 17:43:47 -0700 | [diff] [blame] | 796 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 797 | // The watermark at which a GC is performed inside of registerNativeAllocation. |
| 798 | size_t native_footprint_limit_; |
Mathieu Chartier | 2fde533 | 2012-09-14 14:51:54 -0700 | [diff] [blame] | 799 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 800 | // Whether or not we need to run finalizers in the next native allocation. |
| 801 | bool native_need_to_run_finalization_; |
| 802 | |
Mathieu Chartier | c39e342 | 2013-08-07 16:41:36 -0700 | [diff] [blame] | 803 | // Whether or not we currently care about pause times. |
Mathieu Chartier | ca2a24d | 2013-11-25 15:12:12 -0800 | [diff] [blame] | 804 | ProcessState process_state_; |
Mathieu Chartier | c39e342 | 2013-08-07 16:41:36 -0700 | [diff] [blame] | 805 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 806 | // When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that |
| 807 | // it completes ahead of an allocation failing. |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 808 | size_t concurrent_start_bytes_; |
Mathieu Chartier | 1c23e1e | 2012-10-12 14:14:11 -0700 | [diff] [blame] | 809 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 810 | // Since the heap was created, how many bytes have been freed. |
| 811 | size_t total_bytes_freed_ever_; |
| 812 | |
| 813 | // Since the heap was created, how many objects have been freed. |
| 814 | size_t total_objects_freed_ever_; |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 815 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 816 | // Number of bytes allocated. Adjusted after each allocation and free. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 817 | Atomic<size_t> num_bytes_allocated_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 818 | |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 819 | // Bytes which are allocated and managed by native code but still need to be accounted for. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 820 | Atomic<size_t> native_bytes_allocated_; |
Mathieu Chartier | 987ccff | 2013-07-08 11:05:21 -0700 | [diff] [blame] | 821 | |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 822 | // Data structure GC overhead. |
Ian Rogers | ef7d42f | 2014-01-06 12:55:46 -0800 | [diff] [blame] | 823 | Atomic<size_t> gc_memory_overhead_; |
Mathieu Chartier | 0a9dc05 | 2013-07-25 11:01:28 -0700 | [diff] [blame] | 824 | |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 825 | // Heap verification flags. |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 826 | const bool verify_missing_card_marks_; |
| 827 | const bool verify_system_weaks_; |
| 828 | const bool verify_pre_gc_heap_; |
| 829 | const bool verify_post_gc_heap_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 830 | const bool verify_mod_union_table_; |
Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 831 | bool verify_pre_gc_rosalloc_; |
| 832 | bool verify_post_gc_rosalloc_; |
| 833 | |
| 834 | // RAII that temporarily disables the rosalloc verification during |
| 835 | // the zygote fork. |
| 836 | class ScopedDisableRosAllocVerification { |
| 837 | private: |
| 838 | Heap* heap_; |
| 839 | bool orig_verify_pre_gc_; |
| 840 | bool orig_verify_post_gc_; |
| 841 | public: |
| 842 | explicit ScopedDisableRosAllocVerification(Heap* heap) |
| 843 | : heap_(heap), |
| 844 | orig_verify_pre_gc_(heap_->verify_pre_gc_rosalloc_), |
| 845 | orig_verify_post_gc_(heap_->verify_post_gc_rosalloc_) { |
| 846 | heap_->verify_pre_gc_rosalloc_ = false; |
| 847 | heap_->verify_post_gc_rosalloc_ = false; |
| 848 | } |
| 849 | ~ScopedDisableRosAllocVerification() { |
| 850 | heap_->verify_pre_gc_rosalloc_ = orig_verify_pre_gc_; |
| 851 | heap_->verify_post_gc_rosalloc_ = orig_verify_post_gc_; |
| 852 | } |
| 853 | }; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 854 | |
Mathieu Chartier | 02b6a78 | 2012-10-26 13:51:26 -0700 | [diff] [blame] | 855 | // Parallel GC data structures. |
| 856 | UniquePtr<ThreadPool> thread_pool_; |
| 857 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 858 | // The last time a heap trim occurred. |
| 859 | uint64_t last_trim_time_ms_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 860 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 861 | // The nanosecond time at which the last GC ended. |
| 862 | uint64_t last_gc_time_ns_; |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 863 | |
| 864 | // How many bytes were allocated at the end of the last GC. |
| 865 | uint64_t last_gc_size_; |
| 866 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 867 | // Estimated allocation rate (bytes / second). Computed between the time of the last GC cycle |
| 868 | // and the start of the current one. |
Mathieu Chartier | 65db880 | 2012-11-20 12:36:46 -0800 | [diff] [blame] | 869 | uint64_t allocation_rate_; |
| 870 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 871 | // For a GC cycle, a bitmap that is set corresponding to the |
| 872 | UniquePtr<accounting::HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); |
| 873 | UniquePtr<accounting::HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 874 | |
Mathieu Chartier | e0f0cb3 | 2012-08-28 11:26:00 -0700 | [diff] [blame] | 875 | // Mark stack that we reuse to avoid re-allocating the mark stack. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 876 | UniquePtr<accounting::ObjectStack> mark_stack_; |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 877 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 878 | // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us |
| 879 | // to use the live bitmap as the old mark bitmap. |
Mathieu Chartier | d8195f1 | 2012-10-05 12:21:28 -0700 | [diff] [blame] | 880 | const size_t max_allocation_stack_size_; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 881 | UniquePtr<accounting::ObjectStack> allocation_stack_; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 882 | |
| 883 | // Second allocation stack so that we can process allocation with the heap unlocked. |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 884 | UniquePtr<accounting::ObjectStack> live_stack_; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 885 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 886 | // Allocator type. |
Mathieu Chartier | 5048223 | 2013-11-21 11:48:14 -0800 | [diff] [blame] | 887 | AllocatorType current_allocator_; |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 888 | const AllocatorType current_non_moving_allocator_; |
| 889 | |
| 890 | // Which GCs we run in order when we an allocation fails. |
| 891 | std::vector<collector::GcType> gc_plan_; |
| 892 | |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 893 | // Bump pointer spaces. |
| 894 | space::BumpPointerSpace* bump_pointer_space_; |
| 895 | // Temp space is the space which the semispace collector copies to. |
| 896 | space::BumpPointerSpace* temp_space_; |
| 897 | |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 898 | // offset of java.lang.ref.Reference.referent |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 899 | MemberOffset reference_referent_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 900 | // offset of java.lang.ref.Reference.queue |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 901 | MemberOffset reference_queue_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 902 | // offset of java.lang.ref.Reference.queueNext |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 903 | MemberOffset reference_queueNext_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 904 | // offset of java.lang.ref.Reference.pendingNext |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 905 | MemberOffset reference_pendingNext_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 906 | // offset of java.lang.ref.FinalizerReference.zombie |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 907 | MemberOffset finalizer_reference_zombie_offset_; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 908 | |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 909 | // Minimum free guarantees that you always have at least min_free_ free bytes after growing for |
| 910 | // utilization, regardless of target utilization ratio. |
| 911 | size_t min_free_; |
| 912 | |
| 913 | // The ideal maximum free size, when we grow the heap for utilization. |
| 914 | size_t max_free_; |
| 915 | |
Brian Carlstrom | 395520e | 2011-09-25 19:35:00 -0700 | [diff] [blame] | 916 | // Target ideal heap utilization ratio |
Mathieu Chartier | 0051be6 | 2012-10-12 17:47:11 -0700 | [diff] [blame] | 917 | double target_utilization_; |
Brian Carlstrom | 395520e | 2011-09-25 19:35:00 -0700 | [diff] [blame] | 918 | |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 919 | // Total time which mutators are paused or waiting for GC to complete. |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 920 | uint64_t total_wait_time_; |
| 921 | |
| 922 | // Total number of objects allocated in microseconds. |
Mathieu Chartier | 155dfe9 | 2012-10-09 14:24:49 -0700 | [diff] [blame] | 923 | AtomicInteger total_allocation_time_; |
| 924 | |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 925 | // The current state of heap verification, may be enabled or disabled. |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 926 | VerifyObjectMode verify_object_mode_; |
Ian Rogers | 04d7aa9 | 2013-03-16 14:29:17 -0700 | [diff] [blame] | 927 | |
Mathieu Chartier | 1d27b34 | 2014-01-28 12:51:09 -0800 | [diff] [blame] | 928 | // Compacting GC disable count, prevents compacting GC from running iff > 0. |
| 929 | size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_); |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 930 | |
| 931 | std::vector<collector::GarbageCollector*> garbage_collectors_; |
| 932 | collector::SemiSpace* semi_space_collector_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 933 | |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 934 | const bool running_on_valgrind_; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 935 | const bool use_tlab_; |
Hiroshi Yamauchi | 50b2928 | 2013-07-30 13:58:37 -0700 | [diff] [blame] | 936 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 937 | friend class collector::MarkSweep; |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 938 | friend class collector::SemiSpace; |
Mathieu Chartier | 39e3261 | 2013-11-12 16:28:05 -0800 | [diff] [blame] | 939 | friend class ReferenceQueue; |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame] | 940 | friend class VerifyReferenceCardVisitor; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 941 | friend class VerifyReferenceVisitor; |
| 942 | friend class VerifyObjectVisitor; |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 943 | friend class ScopedHeapLock; |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 944 | friend class space::SpaceTest; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 945 | |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 946 | class AllocationTimer { |
| 947 | private: |
| 948 | Heap* heap_; |
| 949 | mirror::Object** allocated_obj_ptr_; |
| 950 | uint64_t allocation_start_time_; |
| 951 | public: |
| 952 | AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr); |
| 953 | ~AllocationTimer(); |
| 954 | }; |
| 955 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 956 | DISALLOW_IMPLICIT_CONSTRUCTORS(Heap); |
| 957 | }; |
| 958 | |
Ian Rogers | 1d54e73 | 2013-05-02 21:10:01 -0700 | [diff] [blame] | 959 | } // namespace gc |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 960 | } // namespace art |
| 961 | |
Brian Carlstrom | fc0e321 | 2013-07-17 14:40:12 -0700 | [diff] [blame] | 962 | #endif // ART_RUNTIME_GC_HEAP_H_ |