blob: b91868caaa6ef3d1a2c86f5db6fcbaf6d420b0e0 [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
17#ifndef ART_SRC_HEAP_H_
18#define ART_SRC_HEAP_H_
19
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <vector>
23
Ian Rogers5d76c432011-10-31 21:42:49 -070024#include "card_table.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070025#include "globals.h"
Ian Rogers30fab402012-01-23 15:43:46 -080026#include "gtest/gtest.h"
Elliott Hughes5e71b522011-10-20 13:12:32 -070027#include "heap_bitmap.h"
Brian Carlstromcd74c4b2012-01-23 13:21:00 -080028#include "mutex.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070029#include "offsets.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070030
Elliott Hughes3e465b12011-09-02 18:26:12 -070031#define VERIFY_OBJECT_ENABLED 0
32
Carl Shapiro1fb86202011-06-27 17:43:13 -070033namespace art {
34
Ian Rogers30fab402012-01-23 15:43:46 -080035class AllocSpace;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070036class Class;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070037class HeapBitmap;
Brian Carlstromfddf6f62012-03-15 16:56:45 -070038class ImageSpace;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070039class MarkStack;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070040class ModUnionTable;
41class ModUnionTableBitmap;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070042class Object;
Carl Shapiro69759ea2011-07-21 18:13:35 -070043class Space;
Ian Rogers30fab402012-01-23 15:43:46 -080044class SpaceTest;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070045class Thread;
Carl Shapiro69759ea2011-07-21 18:13:35 -070046
Elliott Hughesf8349362012-06-18 15:00:06 -070047class LOCKABLE Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -070048 public:
Ian Rogers30fab402012-01-23 15:43:46 -080049 static const size_t kInitialSize = 2 * MB;
Carl Shapiro69759ea2011-07-21 18:13:35 -070050
Ian Rogers30fab402012-01-23 15:43:46 -080051 static const size_t kMaximumSize = 32 * MB;
Carl Shapiro69759ea2011-07-21 18:13:35 -070052
Elliott Hughes410c0c82011-09-01 17:58:25 -070053 typedef void (RootVisitor)(const Object* root, void* arg);
Elliott Hughesc33a32b2011-10-11 18:18:07 -070054 typedef bool (IsMarkedTester)(const Object* object, void* arg);
Brian Carlstrom7e93b502011-08-04 14:16:22 -070055
Brian Carlstrom58ae9412011-10-04 00:56:06 -070056 // Create a heap with the requested sizes. The possible empty
57 // image_file_names names specify Spaces to load based on
58 // ImageWriter output.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080059 explicit Heap(size_t starting_size, size_t growth_limit, size_t capacity,
60 const std::string& image_file_name);
Carl Shapiro61e019d2011-07-14 16:53:09 -070061
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080062 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -070063
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070064 // Allocates and initializes storage for an object instance.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080065 Object* AllocObject(Class* klass, size_t num_bytes);
Brian Carlstroma7f4f482011-07-17 17:01:34 -070066
Elliott Hughesa2501992011-08-26 19:39:54 -070067 // Check sanity of given reference. Requires the heap lock.
Elliott Hughes3e465b12011-09-02 18:26:12 -070068#if VERIFY_OBJECT_ENABLED
Elliott Hughes1bac54f2012-03-16 12:48:31 -070069 void VerifyObject(const Object* o);
Elliott Hughes3e465b12011-09-02 18:26:12 -070070#else
Elliott Hughes1bac54f2012-03-16 12:48:31 -070071 void VerifyObject(const Object*) {}
Elliott Hughes3e465b12011-09-02 18:26:12 -070072#endif
Ian Rogers408f79a2011-08-23 18:22:33 -070073
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070074 // Check sanity of all live references. Requires the heap lock.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080075 void VerifyHeap();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070076
Elliott Hughes6a5bd492011-10-28 14:33:57 -070077 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -070078 // and doesn't abort on error, allowing the caller to report more
79 // meaningful diagnostics.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080080 bool IsHeapAddress(const Object* obj);
81
Elliott Hughes6a5bd492011-10-28 14:33:57 -070082 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
83 // Requires the heap lock to be held.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080084 bool IsLiveObjectLocked(const Object* obj);
Elliott Hughesa2501992011-08-26 19:39:54 -070085
Carl Shapiro69759ea2011-07-21 18:13:35 -070086 // Initiates an explicit garbage collection.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080087 void CollectGarbage(bool clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -070088
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070089 // Does a concurrent GC, should only be called by the GC daemon thread
90 // through runtime.
91 void ConcurrentGC();
92
Elliott Hughesbf86d042011-08-31 17:53:14 -070093 // Implements java.lang.Runtime.maxMemory.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080094 int64_t GetMaxMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -070095 // Implements java.lang.Runtime.totalMemory.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080096 int64_t GetTotalMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -070097 // Implements java.lang.Runtime.freeMemory.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080098 int64_t GetFreeMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -070099
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700100 // Implements VMDebug.countInstancesOfClass.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800101 int64_t CountInstances(Class* c, bool count_assignable);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700102
Ian Rogers3bb17a62012-01-27 23:56:44 -0800103 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
104 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800105 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700106
Ian Rogers30fab402012-01-23 15:43:46 -0800107 // Target ideal heap utilization ratio, implements
108 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800109 float GetTargetHeapUtilization() {
Brian Carlstrom395520e2011-09-25 19:35:00 -0700110 return target_utilization_;
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700111 }
Ian Rogers30fab402012-01-23 15:43:46 -0800112 // Set target ideal heap utilization ratio, implements
113 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800114 void SetTargetHeapUtilization(float target) {
Ian Rogers30fab402012-01-23 15:43:46 -0800115 DCHECK_GT(target, 0.0f); // asserted in Java code
116 DCHECK_LT(target, 1.0f);
Brian Carlstrom395520e2011-09-25 19:35:00 -0700117 target_utilization_ = target;
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700118 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800119
120 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
121 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800122 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700123
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700124 // Blocks the caller until the garbage collector becomes idle and returns
125 // true if we waited for the GC to complete.
126 bool WaitForConcurrentGcToComplete();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700127
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800128 pid_t GetLockOwner(); // For SignalCatcher.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800129 void AssertLockHeld() {
Brian Carlstromcd74c4b2012-01-23 13:21:00 -0800130 lock_->AssertHeld();
131 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800132 void AssertLockNotHeld() {
Brian Carlstromcd74c4b2012-01-23 13:21:00 -0800133 lock_->AssertNotHeld();
134 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700135
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800136 const std::vector<Space*>& GetSpaces() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700137 return spaces_;
138 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700139
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800140 HeapBitmap* GetLiveBits() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700141 return live_bitmap_;
Carl Shapiro744ad052011-08-06 15:53:36 -0700142 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700143
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800144 HeapBitmap* GetMarkBits() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700145 return mark_bitmap_;
Carl Shapiro744ad052011-08-06 15:53:36 -0700146 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700147
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800148 void SetReferenceOffsets(MemberOffset reference_referent_offset,
149 MemberOffset reference_queue_offset,
150 MemberOffset reference_queueNext_offset,
151 MemberOffset reference_pendingNext_offset,
152 MemberOffset finalizer_reference_zombie_offset);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700153
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800154 Object* GetReferenceReferent(Object* reference);
155 void ClearReferenceReferent(Object* reference);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700156
Elliott Hughesadb460d2011-10-05 17:02:34 -0700157 // Returns true if the reference object has not yet been enqueued.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800158 bool IsEnqueuable(const Object* ref);
159 void EnqueueReference(Object* ref, Object** list);
160 void EnqueuePendingReference(Object* ref, Object** list);
161 Object* DequeuePendingReference(Object** list);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700162
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800163 MemberOffset GetReferencePendingNextOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700164 DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700165 return reference_pendingNext_offset_;
166 }
167
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800168 MemberOffset GetFinalizerReferenceZombieOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700169 DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700170 return finalizer_reference_zombie_offset_;
171 }
172
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800173 void EnableObjectValidation() {
Ian Rogers30fab402012-01-23 15:43:46 -0800174#if VERIFY_OBJECT_ENABLED
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800175 VerifyHeap();
Ian Rogers30fab402012-01-23 15:43:46 -0800176#endif
Elliott Hughes85d15452011-09-16 17:33:01 -0700177 verify_objects_ = true;
178 }
179
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800180 void DisableObjectValidation() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700181 verify_objects_ = false;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700182 }
183
Elliott Hughes92b3b562011-09-08 16:32:26 -0700184 // Callers must hold the heap lock.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800185 void RecordFreeLocked(size_t freed_objects, size_t freed_bytes);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700186
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700187 // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
188 // The call is not needed if NULL is stored in the field.
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700189 void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) {
Ian Rogers5d76c432011-10-31 21:42:49 -0700190 if (!card_marking_disabled_) {
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700191 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700192 }
193 }
194
195 // Write barrier for array operations that update many field positions
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700196 void WriteBarrierArray(const Object* dst, int /*start_offset*/, size_t /*length TODO: element_count or byte_count?*/) {
Ian Rogers5d76c432011-10-31 21:42:49 -0700197 if (UNLIKELY(!card_marking_disabled_)) {
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700198 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700199 }
200 }
201
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800202 CardTable* GetCardTable() {
Ian Rogers5d76c432011-10-31 21:42:49 -0700203 return card_table_;
204 }
205
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800206 void DisableCardMarking() {
Ian Rogers5d76c432011-10-31 21:42:49 -0700207 // TODO: we shouldn't need to disable card marking, this is here to help the image_writer
208 card_marking_disabled_ = true;
Elliott Hughes3a4f8df2011-09-13 15:22:36 -0700209 }
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700210
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800211 void AddFinalizerReference(Thread* self, Object* object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700212
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800213 size_t GetBytesAllocated() { return num_bytes_allocated_; }
214 size_t GetObjectsAllocated() { return num_objects_allocated_; }
Elliott Hughes7162ad92011-10-27 14:08:42 -0700215
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700216 ImageSpace* GetImageSpace() {
217 CHECK(image_space_ != NULL);
218 return image_space_;
219 }
220
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800221 AllocSpace* GetAllocSpace() {
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700222 CHECK(alloc_space_ != NULL);
Ian Rogers5d76c432011-10-31 21:42:49 -0700223 return alloc_space_;
224 }
225
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700226 size_t GetConcurrentStartSize() const { return concurrent_start_size_; }
227
228 void SetConcurrentStartSize(size_t size) {
229 concurrent_start_size_ = size;
230 }
231
232 size_t GetConcurrentMinFree() const { return concurrent_min_free_; }
233
234 void SetConcurrentMinFree(size_t size) {
235 concurrent_min_free_ = size;
236 }
237
Elliott Hughesc967f782012-04-16 10:23:15 -0700238 void DumpForSigQuit(std::ostream& os);
239
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700240 void Trim();
Elliott Hughesf8349362012-06-18 15:00:06 -0700241
Carl Shapiro58551df2011-07-24 03:09:51 -0700242 private:
243 // Allocates uninitialized storage.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800244 Object* AllocateLocked(size_t num_bytes);
245 Object* AllocateLocked(AllocSpace* space, size_t num_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700246
Elliott Hughesf8349362012-06-18 15:00:06 -0700247 void Lock() EXCLUSIVE_LOCK_FUNCTION();
248 void Unlock() UNLOCK_FUNCTION();
Mathieu Chartiera6399032012-06-11 18:49:50 -0700249
Elliott Hughesadb460d2011-10-05 17:02:34 -0700250 // Pushes a list of cleared references out to the managed heap.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800251 void EnqueueClearedReferences(Object** cleared_references);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700252
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800253 void RequestHeapTrim();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700254 void RequestConcurrentGC();
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800255
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800256 void RecordAllocationLocked(AllocSpace* space, const Object* object);
257 void RecordImageAllocations(Space* space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700258
Elliott Hughesf8349362012-06-18 15:00:06 -0700259 // TODO: can we teach GCC to understand the weird locking in here?
260 void CollectGarbageInternal(bool concurrent, bool clear_soft_references) NO_THREAD_SAFETY_ANALYSIS;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700261
Ian Rogers3bb17a62012-01-27 23:56:44 -0800262 // Given the current contents of the alloc space, increase the allowed heap footprint to match
263 // the target utilization ratio. This should only be called immediately after a full garbage
264 // collection.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800265 void GrowForUtilization();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700266
Elliott Hughesc967f782012-04-16 10:23:15 -0700267 size_t GetPercentFree();
268
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800269 void AddSpace(Space* space);
Ian Rogers30fab402012-01-23 15:43:46 -0800270
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800271 void VerifyObjectLocked(const Object *obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700272
jeffhao25045522012-03-13 19:34:37 -0700273 void VerifyHeapLocked();
274
Brian Carlstrom78128a62011-09-15 17:21:19 -0700275 static void VerificationCallback(Object* obj, void* arg);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700276
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800277 Mutex* lock_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700278 ConditionVariable* condition_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700279
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800280 std::vector<Space*> spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700281
Brian Carlstromfddf6f62012-03-15 16:56:45 -0700282 ImageSpace* image_space_;
283
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700284 // default Space for allocations
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800285 AllocSpace* alloc_space_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700286
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800287 HeapBitmap* mark_bitmap_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700288
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800289 HeapBitmap* live_bitmap_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700290
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700291 // TODO: Reduce memory usage, this bitmap currently takes 1 bit per 8 bytes
292 // of image space.
293 ModUnionTable* mod_union_table_;
294
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800295 CardTable* card_table_;
Ian Rogers5d76c432011-10-31 21:42:49 -0700296
297 // Used by the image writer to disable card marking on copied objects
298 // TODO: remove
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800299 bool card_marking_disabled_;
Ian Rogers5d76c432011-10-31 21:42:49 -0700300
Carl Shapiro58551df2011-07-24 03:09:51 -0700301 // True while the garbage collector is running.
Mathieu Chartiera6399032012-06-11 18:49:50 -0700302 volatile bool is_gc_running_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700303
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700304 // Bytes until concurrent GC
305 size_t concurrent_start_bytes_;
306 size_t concurrent_start_size_;
307 size_t concurrent_min_free_;
308
309 // True while the garbage collector is trying to signal the GC daemon thread.
310 // This flag is needed to prevent recursion from occurring when the JNI calls
311 // allocate memory and request another GC.
312 bool try_running_gc_;
313
314 // Used to ensure that we don't ever recursively request GC.
315 bool requesting_gc_;
316
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700317 // Mark stack that we reuse to avoid re-allocating the mark stack
318 MarkStack* mark_stack_;
319
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800320 // Number of bytes allocated. Adjusted after each allocation and free.
321 size_t num_bytes_allocated_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700322
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800323 // Number of objects allocated. Adjusted after each allocation and free.
324 size_t num_objects_allocated_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700325
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700326 // Last trim time
327 uint64_t last_trim_time_;
328
Brian Carlstrom1f870082011-08-23 16:02:11 -0700329 // offset of java.lang.ref.Reference.referent
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800330 MemberOffset reference_referent_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700331
332 // offset of java.lang.ref.Reference.queue
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800333 MemberOffset reference_queue_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700334
335 // offset of java.lang.ref.Reference.queueNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800336 MemberOffset reference_queueNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700337
338 // offset of java.lang.ref.Reference.pendingNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800339 MemberOffset reference_pendingNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700340
341 // offset of java.lang.ref.FinalizerReference.zombie
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800342 MemberOffset finalizer_reference_zombie_offset_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700343
Brian Carlstrom395520e2011-09-25 19:35:00 -0700344 // Target ideal heap utilization ratio
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800345 float target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -0700346
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800347 bool verify_objects_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700348
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700349 friend class ScopedHeapLock;
Ian Rogers30fab402012-01-23 15:43:46 -0800350 FRIEND_TEST(SpaceTest, AllocAndFree);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800351 FRIEND_TEST(SpaceTest, AllocAndFreeList);
352 friend class SpaceTest;
Ian Rogers30fab402012-01-23 15:43:46 -0800353
Carl Shapiro69759ea2011-07-21 18:13:35 -0700354 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
355};
356
Carl Shapiro1fb86202011-06-27 17:43:13 -0700357} // namespace art
358
359#endif // ART_SRC_HEAP_H_