blob: bd9945da10d6a74a7545b59f8e1a3cb0006b42f5 [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
17#ifndef ART_SRC_HEAP_H_
18#define ART_SRC_HEAP_H_
19
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <vector>
23
Mathieu Chartier2fde5332012-09-14 14:51:54 -070024#include "atomic_integer.h"
Mathieu Chartier7469ebf2012-09-24 16:28:36 -070025#include "gc/atomic_stack.h"
26#include "gc/card_table.h"
27#include "gc/heap_bitmap.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070028#include "globals.h"
Ian Rogers30fab402012-01-23 15:43:46 -080029#include "gtest/gtest.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070030#include "locks.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070031#include "offsets.h"
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070032#include "safe_map.h"
Mathieu Chartier0325e622012-09-05 14:22:51 -070033#include "timing_logger.h"
Mathieu Chartier02b6a782012-10-26 13:51:26 -070034#include "thread_pool.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070035
Elliott Hughes3e465b12011-09-02 18:26:12 -070036#define VERIFY_OBJECT_ENABLED 0
37
Mathieu Chartierdcf8d722012-08-02 14:55:54 -070038// Fast verification means we do not verify the classes of objects.
39#define VERIFY_OBJECT_FAST 1
40
Carl Shapiro1fb86202011-06-27 17:43:13 -070041namespace art {
42
Ian Rogers30fab402012-01-23 15:43:46 -080043class AllocSpace;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070044class Class;
Ian Rogers81d425b2012-09-27 16:03:43 -070045class ConditionVariable;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070046class DlMallocSpace;
Mathieu Chartier2b82db42012-11-14 17:29:05 -080047class GarbageCollector;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070048class HeapBitmap;
Brian Carlstromfddf6f62012-03-15 16:56:45 -070049class ImageSpace;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -070050class LargeObjectSpace;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -070051class MarkSweep;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070052class ModUnionTable;
Ian Rogers81d425b2012-09-27 16:03:43 -070053class Mutex;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070054class Object;
Carl Shapiro69759ea2011-07-21 18:13:35 -070055class Space;
Ian Rogers30fab402012-01-23 15:43:46 -080056class SpaceTest;
Ian Rogers40e3bac2012-11-20 00:09:14 -080057class StackVisitor;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070058class Thread;
Mathieu Chartier357e9be2012-08-01 11:00:14 -070059class TimingLogger;
Carl Shapiro69759ea2011-07-21 18:13:35 -070060
Mathieu Chartierd8195f12012-10-05 12:21:28 -070061typedef AtomicStack<Object*> ObjectStack;
Mathieu Chartier2fde5332012-09-14 14:51:54 -070062typedef std::vector<ContinuousSpace*> Spaces;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070063
Mathieu Chartierd22d5482012-11-06 17:14:12 -080064class AgeCardVisitor {
65 public:
66 byte operator ()(byte card) const {
67 if (card == CardTable::kCardDirty) {
68 return card - 1;
69 } else {
70 return 0;
71 }
72 }
73};
74
Mathieu Chartier866fb2a2012-09-10 10:47:49 -070075// The ordering of the enum matters, it is used to determine which GCs are run first.
Mathieu Chartier357e9be2012-08-01 11:00:14 -070076enum GcType {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -070077 // No Gc
78 kGcTypeNone,
Mathieu Chartier357e9be2012-08-01 11:00:14 -070079 // Sticky mark bits "generational" GC.
Mathieu Chartier0325e622012-09-05 14:22:51 -070080 kGcTypeSticky,
81 // Partial GC, over only the alloc space.
82 kGcTypePartial,
Mathieu Chartier866fb2a2012-09-10 10:47:49 -070083 // Full GC
84 kGcTypeFull,
Mathieu Chartier0325e622012-09-05 14:22:51 -070085 // Number of different Gc types.
86 kGcTypeMax,
Mathieu Chartier357e9be2012-08-01 11:00:14 -070087};
Mathieu Chartierfd678be2012-08-30 14:50:54 -070088std::ostream& operator<<(std::ostream& os, const GcType& policy);
Mathieu Chartier357e9be2012-08-01 11:00:14 -070089
Mathieu Chartier2fde5332012-09-14 14:51:54 -070090enum GcCause {
91 kGcCauseForAlloc,
92 kGcCauseBackground,
93 kGcCauseExplicit,
94};
95std::ostream& operator<<(std::ostream& os, const GcCause& policy);
96
Ian Rogers50b35e22012-10-04 10:09:15 -070097class Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -070098 public:
Mathieu Chartier0051be62012-10-12 17:47:11 -070099 static const size_t kDefaultInitialSize = 2 * MB;
100 static const size_t kDefaultMaximumSize = 32 * MB;
101 static const size_t kDefaultMaxFree = 2 * MB;
102 static const size_t kDefaultMinFree = kDefaultMaxFree / 4;
103
104 // Default target utilization.
105 static const double kDefaultTargetUtilization;
106
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700107 // Used so that we don't overflow the allocation time atomic integer.
108 static const size_t kTimeAdjust = 1024;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700109
Elliott Hughes410c0c82011-09-01 17:58:25 -0700110 typedef void (RootVisitor)(const Object* root, void* arg);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700111 typedef void (VerifyRootVisitor)(const Object* root, void* arg, size_t vreg,
Ian Rogers40e3bac2012-11-20 00:09:14 -0800112 const StackVisitor* visitor);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700113 typedef bool (IsMarkedTester)(const Object* object, void* arg);
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700114
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700115 // Create a heap with the requested sizes. The possible empty
116 // image_file_names names specify Spaces to load based on
117 // ImageWriter output.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700118 explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
119 size_t max_free, double target_utilization, size_t capacity,
120 const std::string& original_image_file_name, bool concurrent_gc);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700121
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800122 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700123
Brian Carlstroma40f9bc2011-07-26 21:26:07 -0700124 // Allocates and initializes storage for an object instance.
Ian Rogers50b35e22012-10-04 10:09:15 -0700125 Object* AllocObject(Thread* self, Class* klass, size_t num_bytes)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700126 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700127
Elliott Hughesa2501992011-08-26 19:39:54 -0700128 // Check sanity of given reference. Requires the heap lock.
Elliott Hughes3e465b12011-09-02 18:26:12 -0700129#if VERIFY_OBJECT_ENABLED
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700130 void VerifyObject(const Object* o);
Elliott Hughes3e465b12011-09-02 18:26:12 -0700131#else
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700132 void VerifyObject(const Object*) {}
Elliott Hughes3e465b12011-09-02 18:26:12 -0700133#endif
Ian Rogers408f79a2011-08-23 18:22:33 -0700134
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700135 // Check sanity of all live references. Requires the heap lock.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700136 void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700137 static void RootMatchesObjectVisitor(const Object* root, void* arg);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700138 bool VerifyHeapReferences()
139 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
140 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
141 bool VerifyMissingCardMarks()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700142 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
143 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700144
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700145 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -0700146 // and doesn't abort on error, allowing the caller to report more
147 // meaningful diagnostics.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800148 bool IsHeapAddress(const Object* obj);
149
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700150 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
151 // Requires the heap lock to be held.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700152 bool IsLiveObjectLocked(const Object* obj)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700153 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Elliott Hughesa2501992011-08-26 19:39:54 -0700154
Carl Shapiro69759ea2011-07-21 18:13:35 -0700155 // Initiates an explicit garbage collection.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700156 void CollectGarbage(bool clear_soft_references)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700157 LOCKS_EXCLUDED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700158
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700159 // Does a concurrent GC, should only be called by the GC daemon thread
160 // through runtime.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700161 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700162
Elliott Hughesbf86d042011-08-31 17:53:14 -0700163 // Implements java.lang.Runtime.maxMemory.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700164 int64_t GetMaxMemory() const;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700165 // Implements java.lang.Runtime.totalMemory.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700166 int64_t GetTotalMemory() const;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700167 // Implements java.lang.Runtime.freeMemory.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700168 int64_t GetFreeMemory() const;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700169
Elliott Hughesec0f83d2013-01-15 16:54:08 -0800170 // Implements VMDebug.countInstancesOfClass and JDWP VM_InstanceCount.
171 // The boolean decides whether to use IsAssignableFrom or == when comparing classes.
172 void CountInstances(const std::vector<Class*>& classes, bool use_is_assignable_from,
173 uint64_t* counts)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700174 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
175 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700176
Ian Rogers3bb17a62012-01-27 23:56:44 -0800177 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
178 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800179 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700180
Ian Rogers30fab402012-01-23 15:43:46 -0800181 // Target ideal heap utilization ratio, implements
182 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700183 double GetTargetHeapUtilization() const {
184 return target_utilization_;
185 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700186
Ian Rogers30fab402012-01-23 15:43:46 -0800187 // Set target ideal heap utilization ratio, implements
188 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700189 void SetTargetHeapUtilization(float target);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800190
191 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
192 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800193 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700194
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700195 // Blocks the caller until the garbage collector becomes idle and returns
196 // true if we waited for the GC to complete.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700197 GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700198
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800199 const Spaces& GetSpaces() const {
200 return spaces_;
201 }
202
203 Spaces& GetSpaces() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700204 return spaces_;
205 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700206
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800207 void SetReferenceOffsets(MemberOffset reference_referent_offset,
208 MemberOffset reference_queue_offset,
209 MemberOffset reference_queueNext_offset,
210 MemberOffset reference_pendingNext_offset,
211 MemberOffset finalizer_reference_zombie_offset);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700212
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800213 Object* GetReferenceReferent(Object* reference);
Ian Rogers23435d02012-09-24 11:23:12 -0700214 void ClearReferenceReferent(Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700215
Elliott Hughesadb460d2011-10-05 17:02:34 -0700216 // Returns true if the reference object has not yet been enqueued.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800217 bool IsEnqueuable(const Object* ref);
Ian Rogers23435d02012-09-24 11:23:12 -0700218 void EnqueueReference(Object* ref, Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
219 void EnqueuePendingReference(Object* ref, Object** list)
220 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
221 Object* DequeuePendingReference(Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700222
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800223 MemberOffset GetReferencePendingNextOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700224 DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700225 return reference_pendingNext_offset_;
226 }
227
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800228 MemberOffset GetFinalizerReferenceZombieOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700229 DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700230 return finalizer_reference_zombie_offset_;
231 }
232
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800233 void EnableObjectValidation() {
Ian Rogers30fab402012-01-23 15:43:46 -0800234#if VERIFY_OBJECT_ENABLED
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800235 VerifyHeap();
Ian Rogers30fab402012-01-23 15:43:46 -0800236#endif
Elliott Hughes85d15452011-09-16 17:33:01 -0700237 verify_objects_ = true;
238 }
239
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800240 void DisableObjectValidation() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700241 verify_objects_ = false;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700242 }
243
Ian Rogers23435d02012-09-24 11:23:12 -0700244 bool IsObjectValidationEnabled() const {
245 return verify_objects_;
246 }
247
Mathieu Chartier037813d2012-08-23 16:44:59 -0700248 void RecordFree(size_t freed_objects, size_t freed_bytes);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700249
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700250 // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
251 // The call is not needed if NULL is stored in the field.
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700252 void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700253 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700254 }
255
256 // Write barrier for array operations that update many field positions
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700257 void WriteBarrierArray(const Object* dst, int /*start_offset*/,
258 size_t /*length TODO: element_count or byte_count?*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700259 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700260 }
261
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800262 CardTable* GetCardTable() {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700263 return card_table_.get();
Ian Rogers5d76c432011-10-31 21:42:49 -0700264 }
265
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800266 void AddFinalizerReference(Thread* self, Object* object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700267
Mathieu Chartier037813d2012-08-23 16:44:59 -0700268 size_t GetBytesAllocated() const;
269 size_t GetObjectsAllocated() const;
270 size_t GetConcurrentStartSize() const;
271 size_t GetConcurrentMinFree() const;
272 size_t GetUsedMemorySize() const;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700273
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700274 // Returns the total number of objects allocated since the heap was created.
275 size_t GetTotalObjectsAllocated() const;
276
277 // Returns the total number of bytes allocated since the heap was created.
278 size_t GetTotalBytesAllocated() const;
279
280 // Returns the total number of objects freed since the heap was created.
281 size_t GetTotalObjectsFreed() const;
282
283 // Returns the total number of bytes freed since the heap was created.
284 size_t GetTotalBytesFreed() const;
285
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700286 // Functions for getting the bitmap which corresponds to an object's address.
287 // This is probably slow, TODO: use better data structure like binary tree .
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700288 ContinuousSpace* FindSpaceFromObject(const Object*) const;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700289
Mathieu Chartier037813d2012-08-23 16:44:59 -0700290 void DumpForSigQuit(std::ostream& os);
Elliott Hughesc967f782012-04-16 10:23:15 -0700291
Mathieu Chartier3056d0c2012-10-19 10:49:56 -0700292 void Trim();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700293
Ian Rogersb726dcb2012-09-05 08:57:23 -0700294 HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700295 return live_bitmap_.get();
296 }
297
Ian Rogersb726dcb2012-09-05 08:57:23 -0700298 HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700299 return mark_bitmap_.get();
300 }
301
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800302 ObjectStack* GetLiveStack() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
303 return live_stack_.get();
304 }
305
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700306 void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700307
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700308 // Mark and empty stack.
309 void FlushAllocStack()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700310 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700311
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700312 // Mark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700313 void MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700314 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700315
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700316 // Unmark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700317 void UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700318 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700319
320 // Update and mark mod union table based on gc type.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700321 void UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, GcType gc_type)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700322 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700323
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700324 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700325 // Assumes there is only one image space.
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700326 ImageSpace* GetImageSpace();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700327 DlMallocSpace* GetAllocSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700328 LargeObjectSpace* GetLargeObjectsSpace() {
329 return large_object_space_.get();
330 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700331 void DumpSpaces();
Elliott Hughesf8349362012-06-18 15:00:06 -0700332
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700333 // UnReserve the address range where the oat file will be placed.
334 void UnReserveOatFileAddressRange();
335
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700336 // GC performance measuring
337 void DumpGcPerformanceInfo();
338
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700339 // Thread pool.
340 void CreateThreadPool();
341 void DeleteThreadPool();
342 ThreadPool* GetThreadPool() {
343 return thread_pool_.get();
344 }
345
Carl Shapiro58551df2011-07-24 03:09:51 -0700346 private:
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700347 // Allocates uninitialized storage. Passing in a null space tries to place the object in the
348 // large object space.
Ian Rogers50b35e22012-10-04 10:09:15 -0700349 Object* Allocate(Thread* self, AllocSpace* space, size_t num_bytes)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700350 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
351 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera6399032012-06-11 18:49:50 -0700352
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700353 // Try to allocate a number of bytes, this function never does any GCs.
Ian Rogers50b35e22012-10-04 10:09:15 -0700354 Object* TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow)
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700355 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
356 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
357
Elliott Hughesadb460d2011-10-05 17:02:34 -0700358 // Pushes a list of cleared references out to the managed heap.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800359 void EnqueueClearedReferences(Object** cleared_references);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700360
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700361 void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
362 void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800363
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700364 void RecordAllocation(size_t size, Object* object)
365 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
366 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700367
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700368 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
369 // which type of Gc was actually ran.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700370 GcType CollectGarbageInternal(GcType gc_plan, GcCause gc_cause, bool clear_soft_references)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700371 LOCKS_EXCLUDED(gc_complete_lock_,
Ian Rogersb726dcb2012-09-05 08:57:23 -0700372 Locks::heap_bitmap_lock_,
Ian Rogersb726dcb2012-09-05 08:57:23 -0700373 Locks::thread_suspend_count_lock_);
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800374
375 void PreGcVerification(GarbageCollector* gc);
376 void PreSweepingGcVerification(GarbageCollector* gc);
377 void PostGcVerification(GarbageCollector* gc);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700378
Ian Rogers3bb17a62012-01-27 23:56:44 -0800379 // Given the current contents of the alloc space, increase the allowed heap footprint to match
380 // the target utilization ratio. This should only be called immediately after a full garbage
381 // collection.
Mathieu Chartier65db8802012-11-20 12:36:46 -0800382 void GrowForUtilization(uint64_t gc_duration);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700383
Mathieu Chartier637e3482012-08-17 10:41:32 -0700384 size_t GetPercentFree();
Elliott Hughesc967f782012-04-16 10:23:15 -0700385
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700386 void AddSpace(ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700387
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700388 // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
389 // lock ordering for it.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700390 void VerifyObjectBody(const Object *obj) NO_THREAD_SAFETY_ANALYSIS;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700391
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700392 static void VerificationCallback(Object* obj, void* arg)
393 SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700394
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700395 // Swap the allocation stack with the live stack.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700396 void SwapStacks();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700397
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700398 // Clear cards and update the mod union table.
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800399 void ProcessCards(TimingLogger& timings);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700400
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700401 Spaces spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700402
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700403 // A map that we use to temporarily reserve address range for the oat file.
404 UniquePtr<MemMap> oat_file_map_;
405
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700406 // The alloc space which we are currently allocating into.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700407 DlMallocSpace* alloc_space_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700408
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700409 // The mod-union table remembers all of the references from the image space to the alloc /
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700410 // zygote spaces.
411 UniquePtr<ModUnionTable> mod_union_table_;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700412
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700413 // This table holds all of the references from the zygote space to the alloc space.
414 UniquePtr<ModUnionTable> zygote_mod_union_table_;
415
416 UniquePtr<CardTable> card_table_;
Ian Rogers5d76c432011-10-31 21:42:49 -0700417
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700418 // True for concurrent mark sweep GC, false for mark sweep.
419 const bool concurrent_gc_;
420
421 // If we have a zygote space.
422 bool have_zygote_space_;
423
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700424 // Guards access to the state of GC, associated conditional variable is used to signal when a GC
425 // completes.
426 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
427 UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
428
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700429 // Reference queue lock
430 UniquePtr<Mutex> reference_queue_lock_;
431
Carl Shapiro58551df2011-07-24 03:09:51 -0700432 // True while the garbage collector is running.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700433 volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
434
435 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
436 volatile GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700437
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700438 // If enabled, causes Gc for alloc when heap size reaches the current footprint limit before the
439 // Gc updates it.
440 const bool enforce_heap_growth_rate_;
441
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700442 // Maximum size that the heap can reach.
Mathieu Chartier80de7a62012-11-27 17:21:50 -0800443 size_t capacity_;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700444 size_t growth_limit_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700445 size_t max_allowed_footprint_;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700446
Mathieu Chartier65db8802012-11-20 12:36:46 -0800447 // Minimum bytes before concurrent GC starts.
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700448 size_t concurrent_start_size_;
449 size_t concurrent_min_free_;
Mathieu Chartier0051be62012-10-12 17:47:11 -0700450 size_t concurrent_start_bytes_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700451
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700452 // Number of bytes allocated since the last Gc, we use this to help determine when to schedule concurrent GCs.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700453 size_t sticky_gc_count_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700454
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700455 size_t total_bytes_freed_;
456 size_t total_objects_freed_;
457
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700458 // Primitive objects larger than this size are put in the large object space.
459 size_t large_object_threshold_;
460
461 // Large object space.
462 UniquePtr<LargeObjectSpace> large_object_space_;
463
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700464 // Number of bytes allocated. Adjusted after each allocation and free.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700465 AtomicInteger num_bytes_allocated_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700466
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700467 // Heap verification flags.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700468 const bool verify_missing_card_marks_;
469 const bool verify_system_weaks_;
470 const bool verify_pre_gc_heap_;
471 const bool verify_post_gc_heap_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700472 const bool verify_mod_union_table_;
473
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700474 // Parallel GC data structures.
475 UniquePtr<ThreadPool> thread_pool_;
476
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700477 // After how many GCs we force to do a partial GC instead of sticky mark bits GC.
478 const size_t partial_gc_frequency_;
479
480 // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then
481 // it's probably better to just do a partial GC.
482 const size_t min_alloc_space_size_for_sticky_gc_;
483
484 // Minimum remaining size for sticky GC. Since sticky GC doesn't free up as much memory as a
485 // normal GC, it is important to not use it when we are almost out of memory.
486 const size_t min_remaining_space_for_sticky_gc_;
487
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700488 // Last trim time
489 uint64_t last_trim_time_;
490
Mathieu Chartier65db8802012-11-20 12:36:46 -0800491 // The time at which the last GC ended.
492 uint64_t last_gc_time_;
493
494 // How many bytes were allocated at the end of the last GC.
495 uint64_t last_gc_size_;
496
497 // Estimated allocation rate (bytes / second).
498 uint64_t allocation_rate_;
499
Ian Rogersb726dcb2012-09-05 08:57:23 -0700500 UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
501 UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700502
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700503 // Mark stack that we reuse to avoid re-allocating the mark stack.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700504 UniquePtr<ObjectStack> mark_stack_;
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700505
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700506 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
507 // to use the live bitmap as the old mark bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700508 const size_t max_allocation_stack_size_;
509 UniquePtr<ObjectStack> allocation_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700510
511 // Second allocation stack so that we can process allocation with the heap unlocked.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700512 UniquePtr<ObjectStack> live_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700513
Brian Carlstrom1f870082011-08-23 16:02:11 -0700514 // offset of java.lang.ref.Reference.referent
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800515 MemberOffset reference_referent_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700516
517 // offset of java.lang.ref.Reference.queue
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800518 MemberOffset reference_queue_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700519
520 // offset of java.lang.ref.Reference.queueNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800521 MemberOffset reference_queueNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700522
523 // offset of java.lang.ref.Reference.pendingNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800524 MemberOffset reference_pendingNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700525
526 // offset of java.lang.ref.FinalizerReference.zombie
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800527 MemberOffset finalizer_reference_zombie_offset_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700528
Mathieu Chartier0051be62012-10-12 17:47:11 -0700529 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
530 // utilization, regardless of target utilization ratio.
531 size_t min_free_;
532
533 // The ideal maximum free size, when we grow the heap for utilization.
534 size_t max_free_;
535
Brian Carlstrom395520e2011-09-25 19:35:00 -0700536 // Target ideal heap utilization ratio
Mathieu Chartier0051be62012-10-12 17:47:11 -0700537 double target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -0700538
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700539 // Total time which mutators are paused or waiting for GC to complete.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700540 uint64_t total_wait_time_;
541
542 // Total number of objects allocated in microseconds.
543 const bool measure_allocation_time_;
544 AtomicInteger total_allocation_time_;
545
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800546 bool verify_objects_;
Mathieu Chartier2b82db42012-11-14 17:29:05 -0800547 typedef std::vector<MarkSweep*> Collectors;
548 Collectors mark_sweep_collectors_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700549
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700550 friend class MarkSweep;
551 friend class VerifyReferenceCardVisitor;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700552 friend class VerifyReferenceVisitor;
553 friend class VerifyObjectVisitor;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700554 friend class ScopedHeapLock;
Ian Rogers30fab402012-01-23 15:43:46 -0800555 FRIEND_TEST(SpaceTest, AllocAndFree);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800556 FRIEND_TEST(SpaceTest, AllocAndFreeList);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700557 FRIEND_TEST(SpaceTest, ZygoteSpace);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800558 friend class SpaceTest;
Ian Rogers30fab402012-01-23 15:43:46 -0800559
Carl Shapiro69759ea2011-07-21 18:13:35 -0700560 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
561};
562
Carl Shapiro1fb86202011-06-27 17:43:13 -0700563} // namespace art
564
565#endif // ART_SRC_HEAP_H_