blob: 89b6ac45cfc535fdbc25d842ad12a59cf52ad568 [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
17#ifndef ART_SRC_HEAP_H_
18#define ART_SRC_HEAP_H_
19
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <vector>
23
Ian Rogers5d76c432011-10-31 21:42:49 -070024#include "card_table.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070025#include "globals.h"
Ian Rogers30fab402012-01-23 15:43:46 -080026#include "gtest/gtest.h"
Elliott Hughes5e71b522011-10-20 13:12:32 -070027#include "heap_bitmap.h"
Brian Carlstromcd74c4b2012-01-23 13:21:00 -080028#include "mutex.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070029#include "offsets.h"
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070030#include "safe_map.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070031
Elliott Hughes3e465b12011-09-02 18:26:12 -070032#define VERIFY_OBJECT_ENABLED 0
33
Mathieu Chartierdcf8d722012-08-02 14:55:54 -070034// Fast verification means we do not verify the classes of objects.
35#define VERIFY_OBJECT_FAST 1
36
Carl Shapiro1fb86202011-06-27 17:43:13 -070037namespace art {
38
Ian Rogers30fab402012-01-23 15:43:46 -080039class AllocSpace;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070040class Class;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070041class HeapBitmap;
Brian Carlstromfddf6f62012-03-15 16:56:45 -070042class ImageSpace;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070043class MarkStack;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070044class ModUnionTable;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070045class Object;
Carl Shapiro69759ea2011-07-21 18:13:35 -070046class Space;
Ian Rogers30fab402012-01-23 15:43:46 -080047class SpaceTest;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070048class Thread;
Carl Shapiro69759ea2011-07-21 18:13:35 -070049
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070050typedef std::vector<Space*> Spaces;
51
Elliott Hughesf8349362012-06-18 15:00:06 -070052class LOCKABLE Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -070053 public:
Ian Rogers30fab402012-01-23 15:43:46 -080054 static const size_t kInitialSize = 2 * MB;
Carl Shapiro69759ea2011-07-21 18:13:35 -070055
Ian Rogers30fab402012-01-23 15:43:46 -080056 static const size_t kMaximumSize = 32 * MB;
Carl Shapiro69759ea2011-07-21 18:13:35 -070057
Elliott Hughes410c0c82011-09-01 17:58:25 -070058 typedef void (RootVisitor)(const Object* root, void* arg);
Elliott Hughesc33a32b2011-10-11 18:18:07 -070059 typedef bool (IsMarkedTester)(const Object* object, void* arg);
Brian Carlstrom7e93b502011-08-04 14:16:22 -070060
Brian Carlstrom58ae9412011-10-04 00:56:06 -070061 // Create a heap with the requested sizes. The possible empty
62 // image_file_names names specify Spaces to load based on
63 // ImageWriter output.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080064 explicit Heap(size_t starting_size, size_t growth_limit, size_t capacity,
Ian Rogers00f7d0e2012-07-19 15:28:27 -070065 const std::string& image_file_name, bool concurrent_gc);
Carl Shapiro61e019d2011-07-14 16:53:09 -070066
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080067 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -070068
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070069 // Allocates and initializes storage for an object instance.
Ian Rogers00f7d0e2012-07-19 15:28:27 -070070 Object* AllocObject(Class* klass, size_t num_bytes)
71 LOCKS_EXCLUDED(statistics_lock_)
72 SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
Brian Carlstroma7f4f482011-07-17 17:01:34 -070073
Elliott Hughesa2501992011-08-26 19:39:54 -070074 // Check sanity of given reference. Requires the heap lock.
Elliott Hughes3e465b12011-09-02 18:26:12 -070075#if VERIFY_OBJECT_ENABLED
Elliott Hughes1bac54f2012-03-16 12:48:31 -070076 void VerifyObject(const Object* o);
Elliott Hughes3e465b12011-09-02 18:26:12 -070077#else
Elliott Hughes1bac54f2012-03-16 12:48:31 -070078 void VerifyObject(const Object*) {}
Elliott Hughes3e465b12011-09-02 18:26:12 -070079#endif
Ian Rogers408f79a2011-08-23 18:22:33 -070080
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070081 // Check sanity of all live references. Requires the heap lock.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080082 void VerifyHeap();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070083
Elliott Hughes6a5bd492011-10-28 14:33:57 -070084 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -070085 // and doesn't abort on error, allowing the caller to report more
86 // meaningful diagnostics.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080087 bool IsHeapAddress(const Object* obj);
88
Elliott Hughes6a5bd492011-10-28 14:33:57 -070089 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
90 // Requires the heap lock to be held.
Ian Rogers00f7d0e2012-07-19 15:28:27 -070091 bool IsLiveObjectLocked(const Object* obj)
92 SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_);
Elliott Hughesa2501992011-08-26 19:39:54 -070093
Carl Shapiro69759ea2011-07-21 18:13:35 -070094 // Initiates an explicit garbage collection.
Ian Rogers00f7d0e2012-07-19 15:28:27 -070095 void CollectGarbage(bool clear_soft_references)
96 LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -070097
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070098 // Does a concurrent GC, should only be called by the GC daemon thread
99 // through runtime.
100 void ConcurrentGC();
101
Elliott Hughesbf86d042011-08-31 17:53:14 -0700102 // Implements java.lang.Runtime.maxMemory.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800103 int64_t GetMaxMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700104 // Implements java.lang.Runtime.totalMemory.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800105 int64_t GetTotalMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700106 // Implements java.lang.Runtime.freeMemory.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700107 int64_t GetFreeMemory() LOCKS_EXCLUDED(statistics_lock_);
Elliott Hughesbf86d042011-08-31 17:53:14 -0700108
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700109 // Implements VMDebug.countInstancesOfClass.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700110 int64_t CountInstances(Class* c, bool count_assignable)
111 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
112 SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700113
Ian Rogers3bb17a62012-01-27 23:56:44 -0800114 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
115 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800116 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700117
Ian Rogers30fab402012-01-23 15:43:46 -0800118 // Target ideal heap utilization ratio, implements
119 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800120 float GetTargetHeapUtilization() {
Brian Carlstrom395520e2011-09-25 19:35:00 -0700121 return target_utilization_;
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700122 }
Ian Rogers30fab402012-01-23 15:43:46 -0800123 // Set target ideal heap utilization ratio, implements
124 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800125 void SetTargetHeapUtilization(float target) {
Ian Rogers30fab402012-01-23 15:43:46 -0800126 DCHECK_GT(target, 0.0f); // asserted in Java code
127 DCHECK_LT(target, 1.0f);
Brian Carlstrom395520e2011-09-25 19:35:00 -0700128 target_utilization_ = target;
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700129 }
Ian Rogers3bb17a62012-01-27 23:56:44 -0800130
131 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
132 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800133 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700134
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700135 // Blocks the caller until the garbage collector becomes idle and returns
136 // true if we waited for the GC to complete.
137 bool WaitForConcurrentGcToComplete();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700138
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700139 const Spaces& GetSpaces() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700140 return spaces_;
141 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700142
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800143 void SetReferenceOffsets(MemberOffset reference_referent_offset,
144 MemberOffset reference_queue_offset,
145 MemberOffset reference_queueNext_offset,
146 MemberOffset reference_pendingNext_offset,
147 MemberOffset finalizer_reference_zombie_offset);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700148
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800149 Object* GetReferenceReferent(Object* reference);
150 void ClearReferenceReferent(Object* reference);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700151
Elliott Hughesadb460d2011-10-05 17:02:34 -0700152 // Returns true if the reference object has not yet been enqueued.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800153 bool IsEnqueuable(const Object* ref);
154 void EnqueueReference(Object* ref, Object** list);
155 void EnqueuePendingReference(Object* ref, Object** list);
156 Object* DequeuePendingReference(Object** list);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700157
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800158 MemberOffset GetReferencePendingNextOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700159 DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700160 return reference_pendingNext_offset_;
161 }
162
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800163 MemberOffset GetFinalizerReferenceZombieOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700164 DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700165 return finalizer_reference_zombie_offset_;
166 }
167
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800168 void EnableObjectValidation() {
Ian Rogers30fab402012-01-23 15:43:46 -0800169#if VERIFY_OBJECT_ENABLED
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800170 VerifyHeap();
Ian Rogers30fab402012-01-23 15:43:46 -0800171#endif
Elliott Hughes85d15452011-09-16 17:33:01 -0700172 verify_objects_ = true;
173 }
174
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800175 void DisableObjectValidation() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700176 verify_objects_ = false;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700177 }
178
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700179 void RecordFree(size_t freed_objects, size_t freed_bytes) LOCKS_EXCLUDED(statistics_lock_);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700180
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700181 // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
182 // The call is not needed if NULL is stored in the field.
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700183 void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) {
Ian Rogers5d76c432011-10-31 21:42:49 -0700184 if (!card_marking_disabled_) {
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700185 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700186 }
187 }
188
189 // Write barrier for array operations that update many field positions
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700190 void WriteBarrierArray(const Object* dst, int /*start_offset*/,
191 size_t /*length TODO: element_count or byte_count?*/) {
Ian Rogers5d76c432011-10-31 21:42:49 -0700192 if (UNLIKELY(!card_marking_disabled_)) {
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700193 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700194 }
195 }
196
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800197 CardTable* GetCardTable() {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700198 return card_table_.get();
Ian Rogers5d76c432011-10-31 21:42:49 -0700199 }
200
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800201 void DisableCardMarking() {
Ian Rogers5d76c432011-10-31 21:42:49 -0700202 // TODO: we shouldn't need to disable card marking, this is here to help the image_writer
203 card_marking_disabled_ = true;
Elliott Hughes3a4f8df2011-09-13 15:22:36 -0700204 }
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700205
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800206 void AddFinalizerReference(Thread* self, Object* object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700207
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700208 size_t GetBytesAllocated() const LOCKS_EXCLUDED(statistics_lock_);
209 size_t GetObjectsAllocated() const LOCKS_EXCLUDED(statistics_lock_);
210 size_t GetConcurrentStartSize() const LOCKS_EXCLUDED(statistics_lock_);
211 size_t GetConcurrentMinFree() const LOCKS_EXCLUDED(statistics_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700212
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700213 // Functions for getting the bitmap which corresponds to an object's address.
214 // This is probably slow, TODO: use better data structure like binary tree .
215 Space* FindSpaceFromObject(const Object*) const;
216
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700217 void DumpForSigQuit(std::ostream& os) LOCKS_EXCLUDED(statistics_lock_);
Elliott Hughesc967f782012-04-16 10:23:15 -0700218
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700219 void Trim(AllocSpace* alloc_space);
220
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700221 HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700222 return live_bitmap_.get();
223 }
224
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700225 HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(GlobalSynchronization::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700226 return mark_bitmap_.get();
227 }
228
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700229 void PreZygoteFork();
230
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700231 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700232 // Assumes there is only one image space.
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700233 ImageSpace* GetImageSpace();
234 AllocSpace* GetAllocSpace();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700235 void DumpSpaces();
Elliott Hughesf8349362012-06-18 15:00:06 -0700236
Carl Shapiro58551df2011-07-24 03:09:51 -0700237 private:
238 // Allocates uninitialized storage.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700239 Object* Allocate(size_t num_bytes)
240 SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
241 Object* Allocate(AllocSpace* space, size_t num_bytes)
242 LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
243 SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
Mathieu Chartiera6399032012-06-11 18:49:50 -0700244
Elliott Hughesadb460d2011-10-05 17:02:34 -0700245 // Pushes a list of cleared references out to the managed heap.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800246 void EnqueueClearedReferences(Object** cleared_references);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700247
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800248 void RequestHeapTrim();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700249 void RequestConcurrentGC();
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800250
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700251 void RecordAllocation(AllocSpace* space, const Object* object)
252 LOCKS_EXCLUDED(statistics_lock_, GlobalSynchronization::heap_bitmap_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700253
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700254 void CollectGarbageInternal(bool partial_gc, bool clear_soft_references)
255 LOCKS_EXCLUDED(gc_complete_lock_,
256 GlobalSynchronization::heap_bitmap_lock_,
257 GlobalSynchronization::mutator_lock_,
258 GlobalSynchronization::thread_suspend_count_lock_);
259 void CollectGarbageMarkSweepPlan(bool partial_gc, bool clear_soft_references)
260 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_,
261 GlobalSynchronization::mutator_lock_);
262 void CollectGarbageConcurrentMarkSweepPlan(bool partial_gc, bool clear_soft_references)
263 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_,
264 GlobalSynchronization::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700265
Ian Rogers3bb17a62012-01-27 23:56:44 -0800266 // Given the current contents of the alloc space, increase the allowed heap footprint to match
267 // the target utilization ratio. This should only be called immediately after a full garbage
268 // collection.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800269 void GrowForUtilization();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700270
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700271 size_t GetPercentFree() EXCLUSIVE_LOCKS_REQUIRED(statistics_lock_);
Elliott Hughesc967f782012-04-16 10:23:15 -0700272
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700273 void AddSpace(Space* space) LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800274
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700275 void VerifyObjectLocked(const Object *obj)
276 SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700277
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700278 static void VerificationCallback(Object* obj, void* arg)
279 SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700280
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700281 Spaces spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700282
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700283 // The alloc space which we are currently allocating into.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800284 AllocSpace* alloc_space_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700285
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700286 // The mod-union table remembers all of the references from the image space to the alloc /
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700287 // zygote spaces.
288 UniquePtr<ModUnionTable> mod_union_table_;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700289
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700290 // This table holds all of the references from the zygote space to the alloc space.
291 UniquePtr<ModUnionTable> zygote_mod_union_table_;
292
293 UniquePtr<CardTable> card_table_;
Ian Rogers5d76c432011-10-31 21:42:49 -0700294
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700295 // True for concurrent mark sweep GC, false for mark sweep.
296 const bool concurrent_gc_;
297
298 // If we have a zygote space.
299 bool have_zygote_space_;
300
Ian Rogers5d76c432011-10-31 21:42:49 -0700301 // Used by the image writer to disable card marking on copied objects
302 // TODO: remove
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800303 bool card_marking_disabled_;
Ian Rogers5d76c432011-10-31 21:42:49 -0700304
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700305 // Guards access to the state of GC, associated conditional variable is used to signal when a GC
306 // completes.
307 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
308 UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
309
Carl Shapiro58551df2011-07-24 03:09:51 -0700310 // True while the garbage collector is running.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700311 volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
312
313 // Guards access to heap statistics, some used to calculate when concurrent GC should occur.
314 // TODO: move bytes/objects allocated to thread-locals and remove need for lock?
315 Mutex* statistics_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700316
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700317 // Bytes until concurrent GC starts.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700318 size_t concurrent_start_bytes_ GUARDED_BY(statistics_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700319 size_t concurrent_start_size_;
320 size_t concurrent_min_free_;
321
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700322 // Number of bytes allocated. Adjusted after each allocation and free.
323 size_t num_bytes_allocated_ GUARDED_BY(statistics_lock_);
324
325 // Number of objects allocated. Adjusted after each allocation and free.
326 size_t num_objects_allocated_ GUARDED_BY(statistics_lock_);
327
328 // Last trim time
329 uint64_t last_trim_time_;
330
331 UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_);
332 UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(GlobalSynchronization::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700333
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700334 // True while the garbage collector is trying to signal the GC daemon thread.
335 // This flag is needed to prevent recursion from occurring when the JNI calls
336 // allocate memory and request another GC.
337 bool try_running_gc_;
338
339 // Used to ensure that we don't ever recursively request GC.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700340 volatile bool requesting_gc_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700341
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700342 // Mark stack that we reuse to avoid re-allocating the mark stack
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700343 UniquePtr<MarkStack> mark_stack_;
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700344
Brian Carlstrom1f870082011-08-23 16:02:11 -0700345 // offset of java.lang.ref.Reference.referent
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800346 MemberOffset reference_referent_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700347
348 // offset of java.lang.ref.Reference.queue
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800349 MemberOffset reference_queue_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700350
351 // offset of java.lang.ref.Reference.queueNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800352 MemberOffset reference_queueNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700353
354 // offset of java.lang.ref.Reference.pendingNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800355 MemberOffset reference_pendingNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700356
357 // offset of java.lang.ref.FinalizerReference.zombie
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800358 MemberOffset finalizer_reference_zombie_offset_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700359
Brian Carlstrom395520e2011-09-25 19:35:00 -0700360 // Target ideal heap utilization ratio
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800361 float target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -0700362
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800363 bool verify_objects_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700364
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700365 friend class ScopedHeapLock;
Ian Rogers30fab402012-01-23 15:43:46 -0800366 FRIEND_TEST(SpaceTest, AllocAndFree);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800367 FRIEND_TEST(SpaceTest, AllocAndFreeList);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700368 FRIEND_TEST(SpaceTest, ZygoteSpace);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800369 friend class SpaceTest;
Ian Rogers30fab402012-01-23 15:43:46 -0800370
Carl Shapiro69759ea2011-07-21 18:13:35 -0700371 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
372};
373
Carl Shapiro1fb86202011-06-27 17:43:13 -0700374} // namespace art
375
376#endif // ART_SRC_HEAP_H_