Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2008 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 16 | |
| 17 | #ifndef ART_SRC_HEAP_H_ |
| 18 | #define ART_SRC_HEAP_H_ |
| 19 | |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 20 | #include <iosfwd> |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 21 | #include <string> |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 22 | #include <vector> |
| 23 | |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 24 | #include "card_table.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 25 | #include "globals.h" |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 26 | #include "gtest/gtest.h" |
Elliott Hughes | 5e71b52 | 2011-10-20 13:12:32 -0700 | [diff] [blame] | 27 | #include "heap_bitmap.h" |
Brian Carlstrom | cd74c4b | 2012-01-23 13:21:00 -0800 | [diff] [blame] | 28 | #include "mutex.h" |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 29 | #include "offsets.h" |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 30 | #include "safe_map.h" |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 31 | #include "timing_logger.h" |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 32 | |
Elliott Hughes | 3e465b1 | 2011-09-02 18:26:12 -0700 | [diff] [blame] | 33 | #define VERIFY_OBJECT_ENABLED 0 |
| 34 | |
Mathieu Chartier | dcf8d72 | 2012-08-02 14:55:54 -0700 | [diff] [blame] | 35 | // Fast verification means we do not verify the classes of objects. |
| 36 | #define VERIFY_OBJECT_FAST 1 |
| 37 | |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 38 | namespace art { |
| 39 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 40 | class AllocSpace; |
Brian Carlstrom | a40f9bc | 2011-07-26 21:26:07 -0700 | [diff] [blame] | 41 | class Class; |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 42 | class HeapBitmap; |
Brian Carlstrom | fddf6f6 | 2012-03-15 16:56:45 -0700 | [diff] [blame] | 43 | class ImageSpace; |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 44 | class MarkStack; |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 45 | class ModUnionTable; |
Brian Carlstrom | a40f9bc | 2011-07-26 21:26:07 -0700 | [diff] [blame] | 46 | class Object; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 47 | class Space; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 48 | class SpaceTest; |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 49 | class Thread; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 50 | class TimingLogger; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 51 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 52 | typedef std::vector<Space*> Spaces; |
| 53 | |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 54 | // The ordering of the enum matters, it is used to determine which GCs are run first. |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 55 | enum GcType { |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 56 | // No Gc |
| 57 | kGcTypeNone, |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 58 | // Sticky mark bits "generational" GC. |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 59 | kGcTypeSticky, |
| 60 | // Partial GC, over only the alloc space. |
| 61 | kGcTypePartial, |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 62 | // Full GC |
| 63 | kGcTypeFull, |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 64 | // Number of different Gc types. |
| 65 | kGcTypeMax, |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 66 | }; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 67 | std::ostream& operator<<(std::ostream& os, const GcType& policy); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 68 | |
Elliott Hughes | f834936 | 2012-06-18 15:00:06 -0700 | [diff] [blame] | 69 | class LOCKABLE Heap { |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 70 | public: |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 71 | static const size_t kInitialSize = 2 * MB; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 72 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 73 | static const size_t kMaximumSize = 32 * MB; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 74 | |
Elliott Hughes | 410c0c8 | 2011-09-01 17:58:25 -0700 | [diff] [blame] | 75 | typedef void (RootVisitor)(const Object* root, void* arg); |
Elliott Hughes | c33a32b | 2011-10-11 18:18:07 -0700 | [diff] [blame] | 76 | typedef bool (IsMarkedTester)(const Object* object, void* arg); |
Brian Carlstrom | 7e93b50 | 2011-08-04 14:16:22 -0700 | [diff] [blame] | 77 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 78 | // Create a heap with the requested sizes. The possible empty |
| 79 | // image_file_names names specify Spaces to load based on |
| 80 | // ImageWriter output. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 81 | explicit Heap(size_t starting_size, size_t growth_limit, size_t capacity, |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 82 | const std::string& image_file_name, bool concurrent_gc); |
Carl Shapiro | 61e019d | 2011-07-14 16:53:09 -0700 | [diff] [blame] | 83 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 84 | ~Heap(); |
Brian Carlstrom | a7f4f48 | 2011-07-17 17:01:34 -0700 | [diff] [blame] | 85 | |
Brian Carlstrom | a40f9bc | 2011-07-26 21:26:07 -0700 | [diff] [blame] | 86 | // Allocates and initializes storage for an object instance. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 87 | Object* AllocObject(Class* klass, size_t num_bytes) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 88 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Brian Carlstrom | a7f4f48 | 2011-07-17 17:01:34 -0700 | [diff] [blame] | 89 | |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 90 | // Check sanity of given reference. Requires the heap lock. |
Elliott Hughes | 3e465b1 | 2011-09-02 18:26:12 -0700 | [diff] [blame] | 91 | #if VERIFY_OBJECT_ENABLED |
Elliott Hughes | 1bac54f | 2012-03-16 12:48:31 -0700 | [diff] [blame] | 92 | void VerifyObject(const Object* o); |
Elliott Hughes | 3e465b1 | 2011-09-02 18:26:12 -0700 | [diff] [blame] | 93 | #else |
Elliott Hughes | 1bac54f | 2012-03-16 12:48:31 -0700 | [diff] [blame] | 94 | void VerifyObject(const Object*) {} |
Elliott Hughes | 3e465b1 | 2011-09-02 18:26:12 -0700 | [diff] [blame] | 95 | #endif |
Ian Rogers | 408f79a | 2011-08-23 18:22:33 -0700 | [diff] [blame] | 96 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 97 | // Check sanity of all live references. Requires the heap lock. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 98 | void VerifyHeap(); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 99 | static void RootMatchesObjectVisitor(const Object* root, void* arg); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame^] | 100 | bool VerifyHeapReferences() |
| 101 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) |
| 102 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
| 103 | bool VerifyMissingCardMarks() |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 104 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) |
| 105 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 106 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 107 | // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock, |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 108 | // and doesn't abort on error, allowing the caller to report more |
| 109 | // meaningful diagnostics. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 110 | bool IsHeapAddress(const Object* obj); |
| 111 | |
Elliott Hughes | 6a5bd49 | 2011-10-28 14:33:57 -0700 | [diff] [blame] | 112 | // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses). |
| 113 | // Requires the heap lock to be held. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 114 | bool IsLiveObjectLocked(const Object* obj) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 115 | SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 116 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 117 | // Initiates an explicit garbage collection. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 118 | void CollectGarbage(bool clear_soft_references) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 119 | LOCKS_EXCLUDED(Locks::mutator_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 120 | |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 121 | // Does a concurrent GC, should only be called by the GC daemon thread |
| 122 | // through runtime. |
| 123 | void ConcurrentGC(); |
| 124 | |
Elliott Hughes | bf86d04 | 2011-08-31 17:53:14 -0700 | [diff] [blame] | 125 | // Implements java.lang.Runtime.maxMemory. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 126 | int64_t GetMaxMemory(); |
Elliott Hughes | bf86d04 | 2011-08-31 17:53:14 -0700 | [diff] [blame] | 127 | // Implements java.lang.Runtime.totalMemory. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 128 | int64_t GetTotalMemory(); |
Elliott Hughes | bf86d04 | 2011-08-31 17:53:14 -0700 | [diff] [blame] | 129 | // Implements java.lang.Runtime.freeMemory. |
Mathieu Chartier | 037813d | 2012-08-23 16:44:59 -0700 | [diff] [blame] | 130 | int64_t GetFreeMemory(); |
Elliott Hughes | bf86d04 | 2011-08-31 17:53:14 -0700 | [diff] [blame] | 131 | |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 132 | // Implements VMDebug.countInstancesOfClass. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 133 | int64_t CountInstances(Class* c, bool count_assignable) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 134 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_) |
| 135 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Elliott Hughes | 9d5ccec | 2011-09-19 13:19:50 -0700 | [diff] [blame] | 136 | |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 137 | // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to |
| 138 | // implement dalvik.system.VMRuntime.clearGrowthLimit. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 139 | void ClearGrowthLimit(); |
jeffhao | c116070 | 2011-10-27 15:48:45 -0700 | [diff] [blame] | 140 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 141 | // Target ideal heap utilization ratio, implements |
| 142 | // dalvik.system.VMRuntime.getTargetHeapUtilization. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 143 | float GetTargetHeapUtilization() { |
Brian Carlstrom | 395520e | 2011-09-25 19:35:00 -0700 | [diff] [blame] | 144 | return target_utilization_; |
Elliott Hughes | 7ede61e | 2011-09-14 18:18:06 -0700 | [diff] [blame] | 145 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 146 | // Set target ideal heap utilization ratio, implements |
| 147 | // dalvik.system.VMRuntime.setTargetHeapUtilization. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 148 | void SetTargetHeapUtilization(float target) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 149 | DCHECK_GT(target, 0.0f); // asserted in Java code |
| 150 | DCHECK_LT(target, 1.0f); |
Brian Carlstrom | 395520e | 2011-09-25 19:35:00 -0700 | [diff] [blame] | 151 | target_utilization_ = target; |
Elliott Hughes | 7ede61e | 2011-09-14 18:18:06 -0700 | [diff] [blame] | 152 | } |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 153 | |
| 154 | // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate |
| 155 | // from the system. Doesn't allow the space to exceed its growth limit. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 156 | void SetIdealFootprint(size_t max_allowed_footprint); |
Elliott Hughes | 7ede61e | 2011-09-14 18:18:06 -0700 | [diff] [blame] | 157 | |
Mathieu Chartier | fc8cfac | 2012-06-19 11:56:36 -0700 | [diff] [blame] | 158 | // Blocks the caller until the garbage collector becomes idle and returns |
| 159 | // true if we waited for the GC to complete. |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 160 | GcType WaitForConcurrentGcToComplete(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 161 | |
Mathieu Chartier | 654d3a2 | 2012-07-11 17:54:18 -0700 | [diff] [blame] | 162 | const Spaces& GetSpaces() { |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 163 | return spaces_; |
| 164 | } |
Carl Shapiro | 61e019d | 2011-07-14 16:53:09 -0700 | [diff] [blame] | 165 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 166 | void SetReferenceOffsets(MemberOffset reference_referent_offset, |
| 167 | MemberOffset reference_queue_offset, |
| 168 | MemberOffset reference_queueNext_offset, |
| 169 | MemberOffset reference_pendingNext_offset, |
| 170 | MemberOffset finalizer_reference_zombie_offset); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 171 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 172 | Object* GetReferenceReferent(Object* reference); |
| 173 | void ClearReferenceReferent(Object* reference); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 174 | |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 175 | // Returns true if the reference object has not yet been enqueued. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 176 | bool IsEnqueuable(const Object* ref); |
| 177 | void EnqueueReference(Object* ref, Object** list); |
| 178 | void EnqueuePendingReference(Object* ref, Object** list); |
| 179 | Object* DequeuePendingReference(Object** list); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 180 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 181 | MemberOffset GetReferencePendingNextOffset() { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 182 | DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 183 | return reference_pendingNext_offset_; |
| 184 | } |
| 185 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 186 | MemberOffset GetFinalizerReferenceZombieOffset() { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 187 | DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 188 | return finalizer_reference_zombie_offset_; |
| 189 | } |
| 190 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 191 | void EnableObjectValidation() { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 192 | #if VERIFY_OBJECT_ENABLED |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 193 | VerifyHeap(); |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 194 | #endif |
Elliott Hughes | 85d1545 | 2011-09-16 17:33:01 -0700 | [diff] [blame] | 195 | verify_objects_ = true; |
| 196 | } |
| 197 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 198 | void DisableObjectValidation() { |
Elliott Hughes | 85d1545 | 2011-09-16 17:33:01 -0700 | [diff] [blame] | 199 | verify_objects_ = false; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 200 | } |
| 201 | |
Mathieu Chartier | 037813d | 2012-08-23 16:44:59 -0700 | [diff] [blame] | 202 | void RecordFree(size_t freed_objects, size_t freed_bytes); |
Brian Carlstrom | 693267a | 2011-09-06 09:25:34 -0700 | [diff] [blame] | 203 | |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 204 | // Must be called if a field of an Object in the heap changes, and before any GC safe-point. |
| 205 | // The call is not needed if NULL is stored in the field. |
Elliott Hughes | 1bac54f | 2012-03-16 12:48:31 -0700 | [diff] [blame] | 206 | void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) { |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 207 | if (!card_marking_disabled_) { |
Elliott Hughes | 1bac54f | 2012-03-16 12:48:31 -0700 | [diff] [blame] | 208 | card_table_->MarkCard(dst); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 209 | } |
| 210 | } |
| 211 | |
| 212 | // Write barrier for array operations that update many field positions |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 213 | void WriteBarrierArray(const Object* dst, int /*start_offset*/, |
| 214 | size_t /*length TODO: element_count or byte_count?*/) { |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 215 | if (UNLIKELY(!card_marking_disabled_)) { |
Elliott Hughes | 1bac54f | 2012-03-16 12:48:31 -0700 | [diff] [blame] | 216 | card_table_->MarkCard(dst); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 217 | } |
| 218 | } |
| 219 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 220 | CardTable* GetCardTable() { |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 221 | return card_table_.get(); |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 222 | } |
| 223 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 224 | void DisableCardMarking() { |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 225 | // TODO: we shouldn't need to disable card marking, this is here to help the image_writer |
| 226 | card_marking_disabled_ = true; |
Elliott Hughes | 3a4f8df | 2011-09-13 15:22:36 -0700 | [diff] [blame] | 227 | } |
Elliott Hughes | 5ea047b | 2011-09-13 14:38:18 -0700 | [diff] [blame] | 228 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 229 | void AddFinalizerReference(Thread* self, Object* object); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 230 | |
Mathieu Chartier | 037813d | 2012-08-23 16:44:59 -0700 | [diff] [blame] | 231 | size_t GetBytesAllocated() const; |
| 232 | size_t GetObjectsAllocated() const; |
| 233 | size_t GetConcurrentStartSize() const; |
| 234 | size_t GetConcurrentMinFree() const; |
| 235 | size_t GetUsedMemorySize() const; |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 236 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 237 | // Functions for getting the bitmap which corresponds to an object's address. |
| 238 | // This is probably slow, TODO: use better data structure like binary tree . |
| 239 | Space* FindSpaceFromObject(const Object*) const; |
| 240 | |
Mathieu Chartier | 037813d | 2012-08-23 16:44:59 -0700 | [diff] [blame] | 241 | void DumpForSigQuit(std::ostream& os); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 242 | |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 243 | void Trim(); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 244 | |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 245 | HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 246 | return live_bitmap_.get(); |
| 247 | } |
| 248 | |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 249 | HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) { |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 250 | return mark_bitmap_.get(); |
| 251 | } |
| 252 | |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 253 | void PreZygoteFork(); |
| 254 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 255 | // Mark and empty stack. |
| 256 | void FlushAllocStack() |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 257 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 258 | |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame^] | 259 | // Mark all the objects in the allocation stack in the specified bitmap. |
| 260 | void MarkAllocStack(SpaceBitmap* bitmap, MarkStack* stack) |
| 261 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 262 | |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame^] | 263 | // Unmark all the objects in the allocation stack in the specified bitmap. |
| 264 | void UnMarkAllocStack(SpaceBitmap* bitmap, MarkStack* stack) |
| 265 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 266 | |
| 267 | // Update and mark mod union table based on gc type. |
| 268 | void UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 269 | EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 270 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 271 | // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 272 | // Assumes there is only one image space. |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 273 | ImageSpace* GetImageSpace(); |
| 274 | AllocSpace* GetAllocSpace(); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 275 | void DumpSpaces(); |
Elliott Hughes | f834936 | 2012-06-18 15:00:06 -0700 | [diff] [blame] | 276 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 277 | private: |
| 278 | // Allocates uninitialized storage. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 279 | Object* Allocate(AllocSpace* space, size_t num_bytes) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 280 | LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) |
| 281 | SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); |
Mathieu Chartier | a639903 | 2012-06-11 18:49:50 -0700 | [diff] [blame] | 282 | |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 283 | // Pushes a list of cleared references out to the managed heap. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 284 | void EnqueueClearedReferences(Object** cleared_references); |
Elliott Hughes | adb460d | 2011-10-05 17:02:34 -0700 | [diff] [blame] | 285 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 286 | void RequestHeapTrim(); |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 287 | void RequestConcurrentGC(); |
Elliott Hughes | 8cf5bc0 | 2012-02-02 16:32:16 -0800 | [diff] [blame] | 288 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 289 | void RecordAllocation(AllocSpace* space, const Object* object) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 290 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 291 | |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 292 | // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns |
| 293 | // which type of Gc was actually ran. |
| 294 | GcType CollectGarbageInternal(GcType gc_plan, bool clear_soft_references) |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 295 | LOCKS_EXCLUDED(gc_complete_lock_, |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 296 | Locks::heap_bitmap_lock_, |
| 297 | Locks::mutator_lock_, |
| 298 | Locks::thread_suspend_count_lock_); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 299 | void CollectGarbageMarkSweepPlan(GcType gc_plan, bool clear_soft_references) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 300 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, |
| 301 | Locks::mutator_lock_); |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 302 | void CollectGarbageConcurrentMarkSweepPlan(GcType gc_plan, bool clear_soft_references) |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 303 | LOCKS_EXCLUDED(Locks::heap_bitmap_lock_, |
| 304 | Locks::mutator_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 305 | |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 306 | // Given the current contents of the alloc space, increase the allowed heap footprint to match |
| 307 | // the target utilization ratio. This should only be called immediately after a full garbage |
| 308 | // collection. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 309 | void GrowForUtilization(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 310 | |
Mathieu Chartier | 637e348 | 2012-08-17 10:41:32 -0700 | [diff] [blame] | 311 | size_t GetPercentFree(); |
Elliott Hughes | c967f78 | 2012-04-16 10:23:15 -0700 | [diff] [blame] | 312 | |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 313 | void AddSpace(Space* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_); |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 314 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 315 | // No thread saftey analysis since we call this everywhere and it is impossible to find a proper |
| 316 | // lock ordering for it. |
| 317 | void VerifyObjectBody(const Object *obj) |
| 318 | NO_THREAD_SAFETY_ANALYSIS; |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 319 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 320 | static void VerificationCallback(Object* obj, void* arg) |
| 321 | SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 322 | |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 323 | // Swpa bitmaps (if we are a full Gc then we swap the zygote bitmap too). |
| 324 | void SwapBitmaps(); |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame^] | 325 | void SwapStacks(); |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 326 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 327 | Spaces spaces_; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 328 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 329 | // The alloc space which we are currently allocating into. |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 330 | AllocSpace* alloc_space_; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 331 | |
Mathieu Chartier | 0325e62 | 2012-09-05 14:22:51 -0700 | [diff] [blame] | 332 | // One cumulative logger for each type of Gc. |
| 333 | typedef SafeMap<GcType, CumulativeLogger*> CumulativeTimings; |
| 334 | CumulativeTimings cumulative_timings_; |
| 335 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 336 | // The mod-union table remembers all of the references from the image space to the alloc / |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 337 | // zygote spaces. |
| 338 | UniquePtr<ModUnionTable> mod_union_table_; |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 339 | |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 340 | // This table holds all of the references from the zygote space to the alloc space. |
| 341 | UniquePtr<ModUnionTable> zygote_mod_union_table_; |
| 342 | |
| 343 | UniquePtr<CardTable> card_table_; |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 344 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 345 | // True for concurrent mark sweep GC, false for mark sweep. |
| 346 | const bool concurrent_gc_; |
| 347 | |
| 348 | // If we have a zygote space. |
| 349 | bool have_zygote_space_; |
| 350 | |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 351 | // Used by the image writer to disable card marking on copied objects |
| 352 | // TODO: remove |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 353 | bool card_marking_disabled_; |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 354 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 355 | // Guards access to the state of GC, associated conditional variable is used to signal when a GC |
| 356 | // completes. |
| 357 | Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; |
| 358 | UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_); |
| 359 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 360 | // True while the garbage collector is running. |
Mathieu Chartier | 866fb2a | 2012-09-10 10:47:49 -0700 | [diff] [blame] | 361 | volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_); |
| 362 | |
| 363 | // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on. |
| 364 | volatile GcType last_gc_type_ GUARDED_BY(gc_complete_lock_); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 365 | |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 366 | // Bytes until concurrent GC starts. |
Mathieu Chartier | 637e348 | 2012-08-17 10:41:32 -0700 | [diff] [blame] | 367 | volatile size_t concurrent_start_bytes_; |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 368 | size_t concurrent_start_size_; |
| 369 | size_t concurrent_min_free_; |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 370 | size_t sticky_gc_count_; |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 371 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 372 | // Number of bytes allocated. Adjusted after each allocation and free. |
Mathieu Chartier | 637e348 | 2012-08-17 10:41:32 -0700 | [diff] [blame] | 373 | volatile size_t num_bytes_allocated_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 374 | |
| 375 | // Number of objects allocated. Adjusted after each allocation and free. |
Mathieu Chartier | 637e348 | 2012-08-17 10:41:32 -0700 | [diff] [blame] | 376 | volatile size_t num_objects_allocated_; |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 377 | |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 378 | // Heap verification flags. |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame^] | 379 | const bool verify_missing_card_marks_; |
| 380 | const bool verify_system_weaks_; |
| 381 | const bool verify_pre_gc_heap_; |
| 382 | const bool verify_post_gc_heap_; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 383 | const bool verify_mod_union_table_; |
| 384 | |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame^] | 385 | // After how many GCs we force to do a partial GC instead of sticky mark bits GC. |
| 386 | const size_t partial_gc_frequency_; |
| 387 | |
| 388 | // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then |
| 389 | // it's probably better to just do a partial GC. |
| 390 | const size_t min_alloc_space_size_for_sticky_gc_; |
| 391 | |
| 392 | // Minimum remaining size for sticky GC. Since sticky GC doesn't free up as much memory as a |
| 393 | // normal GC, it is important to not use it when we are almost out of memory. |
| 394 | const size_t min_remaining_space_for_sticky_gc_; |
| 395 | |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 396 | // Last trim time |
| 397 | uint64_t last_trim_time_; |
| 398 | |
Ian Rogers | b726dcb | 2012-09-05 08:57:23 -0700 | [diff] [blame] | 399 | UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); |
| 400 | UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_); |
Mathieu Chartier | b062fdd | 2012-07-03 09:51:48 -0700 | [diff] [blame] | 401 | |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 402 | // True while the garbage collector is trying to signal the GC daemon thread. |
| 403 | // This flag is needed to prevent recursion from occurring when the JNI calls |
| 404 | // allocate memory and request another GC. |
| 405 | bool try_running_gc_; |
| 406 | |
| 407 | // Used to ensure that we don't ever recursively request GC. |
Ian Rogers | 00f7d0e | 2012-07-19 15:28:27 -0700 | [diff] [blame] | 408 | volatile bool requesting_gc_; |
Mathieu Chartier | 7664f5c | 2012-06-08 18:15:32 -0700 | [diff] [blame] | 409 | |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 410 | // Mark stack that we reuse to avoid re-allocating the mark stack |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 411 | UniquePtr<MarkStack> mark_stack_; |
Mathieu Chartier | 5301cd2 | 2012-05-31 12:11:36 -0700 | [diff] [blame] | 412 | |
Mathieu Chartier | 357e9be | 2012-08-01 11:00:14 -0700 | [diff] [blame] | 413 | // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us |
| 414 | // to use the live bitmap as the old mark bitmap. |
| 415 | UniquePtr<MarkStack> allocation_stack_; |
| 416 | |
| 417 | // Second allocation stack so that we can process allocation with the heap unlocked. |
| 418 | UniquePtr<MarkStack> live_stack_; |
| 419 | |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 420 | // offset of java.lang.ref.Reference.referent |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 421 | MemberOffset reference_referent_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 422 | |
| 423 | // offset of java.lang.ref.Reference.queue |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 424 | MemberOffset reference_queue_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 425 | |
| 426 | // offset of java.lang.ref.Reference.queueNext |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 427 | MemberOffset reference_queueNext_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 428 | |
| 429 | // offset of java.lang.ref.Reference.pendingNext |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 430 | MemberOffset reference_pendingNext_offset_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 431 | |
| 432 | // offset of java.lang.ref.FinalizerReference.zombie |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 433 | MemberOffset finalizer_reference_zombie_offset_; |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 434 | |
Brian Carlstrom | 395520e | 2011-09-25 19:35:00 -0700 | [diff] [blame] | 435 | // Target ideal heap utilization ratio |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 436 | float target_utilization_; |
Brian Carlstrom | 395520e | 2011-09-25 19:35:00 -0700 | [diff] [blame] | 437 | |
Elliott Hughes | b3bd5f0 | 2012-03-08 21:05:27 -0800 | [diff] [blame] | 438 | bool verify_objects_; |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 439 | |
Mathieu Chartier | c7b83a0 | 2012-09-11 18:07:39 -0700 | [diff] [blame^] | 440 | friend class MarkSweep; |
| 441 | friend class VerifyReferenceCardVisitor; |
Mathieu Chartier | fd678be | 2012-08-30 14:50:54 -0700 | [diff] [blame] | 442 | friend class VerifyReferenceVisitor; |
| 443 | friend class VerifyObjectVisitor; |
Mathieu Chartier | b43b7d4 | 2012-06-19 13:15:09 -0700 | [diff] [blame] | 444 | friend class ScopedHeapLock; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 445 | FRIEND_TEST(SpaceTest, AllocAndFree); |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 446 | FRIEND_TEST(SpaceTest, AllocAndFreeList); |
Mathieu Chartier | cc236d7 | 2012-07-20 10:29:05 -0700 | [diff] [blame] | 447 | FRIEND_TEST(SpaceTest, ZygoteSpace); |
Ian Rogers | 3bb17a6 | 2012-01-27 23:56:44 -0800 | [diff] [blame] | 448 | friend class SpaceTest; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame] | 449 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 450 | DISALLOW_IMPLICIT_CONSTRUCTORS(Heap); |
| 451 | }; |
| 452 | |
Carl Shapiro | 1fb8620 | 2011-06-27 17:43:13 -0700 | [diff] [blame] | 453 | } // namespace art |
| 454 | |
| 455 | #endif // ART_SRC_HEAP_H_ |