blob: 584718ed4960d7f5985208afed24385ccb9e4b77 [file] [log] [blame]
Elliott Hughes9d5ccec2011-09-19 13:19:50 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
17#ifndef ART_SRC_HEAP_H_
18#define ART_SRC_HEAP_H_
19
Elliott Hughesc967f782012-04-16 10:23:15 -070020#include <iosfwd>
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080021#include <string>
Carl Shapiro58551df2011-07-24 03:09:51 -070022#include <vector>
23
Mathieu Chartier2fde5332012-09-14 14:51:54 -070024#include "atomic_integer.h"
Mathieu Chartier7469ebf2012-09-24 16:28:36 -070025#include "gc/atomic_stack.h"
26#include "gc/card_table.h"
27#include "gc/heap_bitmap.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070028#include "globals.h"
Ian Rogers30fab402012-01-23 15:43:46 -080029#include "gtest/gtest.h"
Ian Rogers81d425b2012-09-27 16:03:43 -070030#include "locks.h"
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070031#include "offsets.h"
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070032#include "safe_map.h"
Mathieu Chartier0325e622012-09-05 14:22:51 -070033#include "timing_logger.h"
Mathieu Chartier02b6a782012-10-26 13:51:26 -070034#include "thread_pool.h"
Carl Shapiro1fb86202011-06-27 17:43:13 -070035
Elliott Hughes3e465b12011-09-02 18:26:12 -070036#define VERIFY_OBJECT_ENABLED 0
37
Mathieu Chartierdcf8d722012-08-02 14:55:54 -070038// Fast verification means we do not verify the classes of objects.
39#define VERIFY_OBJECT_FAST 1
40
Carl Shapiro1fb86202011-06-27 17:43:13 -070041namespace art {
42
Ian Rogers30fab402012-01-23 15:43:46 -080043class AllocSpace;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070044class Class;
Ian Rogers81d425b2012-09-27 16:03:43 -070045class ConditionVariable;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070046class DlMallocSpace;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070047class HeapBitmap;
Brian Carlstromfddf6f62012-03-15 16:56:45 -070048class ImageSpace;
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -070049class LargeObjectSpace;
Mathieu Chartier7469ebf2012-09-24 16:28:36 -070050class MarkSweep;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070051class ModUnionTable;
Ian Rogers81d425b2012-09-27 16:03:43 -070052class Mutex;
Brian Carlstroma40f9bc2011-07-26 21:26:07 -070053class Object;
Carl Shapiro69759ea2011-07-21 18:13:35 -070054class Space;
Ian Rogers30fab402012-01-23 15:43:46 -080055class SpaceTest;
Mathieu Chartier5301cd22012-05-31 12:11:36 -070056class Thread;
Mathieu Chartier357e9be2012-08-01 11:00:14 -070057class TimingLogger;
Carl Shapiro69759ea2011-07-21 18:13:35 -070058
Mathieu Chartierd8195f12012-10-05 12:21:28 -070059typedef AtomicStack<Object*> ObjectStack;
Mathieu Chartier2fde5332012-09-14 14:51:54 -070060typedef std::vector<ContinuousSpace*> Spaces;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070061
Mathieu Chartierd22d5482012-11-06 17:14:12 -080062class AgeCardVisitor {
63 public:
64 byte operator ()(byte card) const {
65 if (card == CardTable::kCardDirty) {
66 return card - 1;
67 } else {
68 return 0;
69 }
70 }
71};
72
Mathieu Chartier866fb2a2012-09-10 10:47:49 -070073// The ordering of the enum matters, it is used to determine which GCs are run first.
Mathieu Chartier357e9be2012-08-01 11:00:14 -070074enum GcType {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -070075 // No Gc
76 kGcTypeNone,
Mathieu Chartier357e9be2012-08-01 11:00:14 -070077 // Sticky mark bits "generational" GC.
Mathieu Chartier0325e622012-09-05 14:22:51 -070078 kGcTypeSticky,
79 // Partial GC, over only the alloc space.
80 kGcTypePartial,
Mathieu Chartier866fb2a2012-09-10 10:47:49 -070081 // Full GC
82 kGcTypeFull,
Mathieu Chartier0325e622012-09-05 14:22:51 -070083 // Number of different Gc types.
84 kGcTypeMax,
Mathieu Chartier357e9be2012-08-01 11:00:14 -070085};
Mathieu Chartierfd678be2012-08-30 14:50:54 -070086std::ostream& operator<<(std::ostream& os, const GcType& policy);
Mathieu Chartier357e9be2012-08-01 11:00:14 -070087
Mathieu Chartier2fde5332012-09-14 14:51:54 -070088enum GcCause {
89 kGcCauseForAlloc,
90 kGcCauseBackground,
91 kGcCauseExplicit,
92};
93std::ostream& operator<<(std::ostream& os, const GcCause& policy);
94
Ian Rogers50b35e22012-10-04 10:09:15 -070095class Heap {
Carl Shapiro1fb86202011-06-27 17:43:13 -070096 public:
Mathieu Chartier0051be62012-10-12 17:47:11 -070097 static const size_t kDefaultInitialSize = 2 * MB;
98 static const size_t kDefaultMaximumSize = 32 * MB;
99 static const size_t kDefaultMaxFree = 2 * MB;
100 static const size_t kDefaultMinFree = kDefaultMaxFree / 4;
101
102 // Default target utilization.
103 static const double kDefaultTargetUtilization;
104
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700105 // Used so that we don't overflow the allocation time atomic integer.
106 static const size_t kTimeAdjust = 1024;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700107
Elliott Hughes410c0c82011-09-01 17:58:25 -0700108 typedef void (RootVisitor)(const Object* root, void* arg);
Mathieu Chartier6f1c9492012-10-15 12:08:41 -0700109 typedef void (VerifyRootVisitor)(const Object* root, void* arg, size_t vreg,
110 const AbstractMethod* method);
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700111 typedef bool (IsMarkedTester)(const Object* object, void* arg);
Brian Carlstrom7e93b502011-08-04 14:16:22 -0700112
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700113 // Create a heap with the requested sizes. The possible empty
114 // image_file_names names specify Spaces to load based on
115 // ImageWriter output.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700116 explicit Heap(size_t initial_size, size_t growth_limit, size_t min_free,
117 size_t max_free, double target_utilization, size_t capacity,
118 const std::string& original_image_file_name, bool concurrent_gc);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700119
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800120 ~Heap();
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700121
Brian Carlstroma40f9bc2011-07-26 21:26:07 -0700122 // Allocates and initializes storage for an object instance.
Ian Rogers50b35e22012-10-04 10:09:15 -0700123 Object* AllocObject(Thread* self, Class* klass, size_t num_bytes)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700124 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Brian Carlstroma7f4f482011-07-17 17:01:34 -0700125
Elliott Hughesa2501992011-08-26 19:39:54 -0700126 // Check sanity of given reference. Requires the heap lock.
Elliott Hughes3e465b12011-09-02 18:26:12 -0700127#if VERIFY_OBJECT_ENABLED
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700128 void VerifyObject(const Object* o);
Elliott Hughes3e465b12011-09-02 18:26:12 -0700129#else
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700130 void VerifyObject(const Object*) {}
Elliott Hughes3e465b12011-09-02 18:26:12 -0700131#endif
Ian Rogers408f79a2011-08-23 18:22:33 -0700132
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700133 // Check sanity of all live references. Requires the heap lock.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700134 void VerifyHeap() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700135 static void RootMatchesObjectVisitor(const Object* root, void* arg);
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700136 bool VerifyHeapReferences()
137 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
138 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
139 bool VerifyMissingCardMarks()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700140 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
141 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700142
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700143 // A weaker test than IsLiveObject or VerifyObject that doesn't require the heap lock,
Elliott Hughesa2501992011-08-26 19:39:54 -0700144 // and doesn't abort on error, allowing the caller to report more
145 // meaningful diagnostics.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800146 bool IsHeapAddress(const Object* obj);
147
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700148 // Returns true if 'obj' is a live heap object, false otherwise (including for invalid addresses).
149 // Requires the heap lock to be held.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700150 bool IsLiveObjectLocked(const Object* obj)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700151 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Elliott Hughesa2501992011-08-26 19:39:54 -0700152
Carl Shapiro69759ea2011-07-21 18:13:35 -0700153 // Initiates an explicit garbage collection.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700154 void CollectGarbage(bool clear_soft_references)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700155 LOCKS_EXCLUDED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700156
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700157 // Does a concurrent GC, should only be called by the GC daemon thread
158 // through runtime.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700159 void ConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700160
Elliott Hughesbf86d042011-08-31 17:53:14 -0700161 // Implements java.lang.Runtime.maxMemory.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700162 int64_t GetMaxMemory() const;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700163 // Implements java.lang.Runtime.totalMemory.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700164 int64_t GetTotalMemory() const;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700165 // Implements java.lang.Runtime.freeMemory.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700166 int64_t GetFreeMemory() const;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700167
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700168 // Implements VMDebug.countInstancesOfClass.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700169 int64_t CountInstances(Class* c, bool count_assignable)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700170 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_)
171 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700172
Ian Rogers3bb17a62012-01-27 23:56:44 -0800173 // Removes the growth limit on the alloc space so it may grow to its maximum capacity. Used to
174 // implement dalvik.system.VMRuntime.clearGrowthLimit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800175 void ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700176
Ian Rogers30fab402012-01-23 15:43:46 -0800177 // Target ideal heap utilization ratio, implements
178 // dalvik.system.VMRuntime.getTargetHeapUtilization.
Mathieu Chartier0051be62012-10-12 17:47:11 -0700179 double GetTargetHeapUtilization() const {
180 return target_utilization_;
181 }
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700182
Ian Rogers30fab402012-01-23 15:43:46 -0800183 // Set target ideal heap utilization ratio, implements
184 // dalvik.system.VMRuntime.setTargetHeapUtilization.
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700185 void SetTargetHeapUtilization(float target);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800186
187 // For the alloc space, sets the maximum number of bytes that the heap is allowed to allocate
188 // from the system. Doesn't allow the space to exceed its growth limit.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800189 void SetIdealFootprint(size_t max_allowed_footprint);
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700190
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700191 // Blocks the caller until the garbage collector becomes idle and returns
192 // true if we waited for the GC to complete.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700193 GcType WaitForConcurrentGcToComplete(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700194
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700195 const Spaces& GetSpaces() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700196 return spaces_;
197 }
Carl Shapiro61e019d2011-07-14 16:53:09 -0700198
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800199 void SetReferenceOffsets(MemberOffset reference_referent_offset,
200 MemberOffset reference_queue_offset,
201 MemberOffset reference_queueNext_offset,
202 MemberOffset reference_pendingNext_offset,
203 MemberOffset finalizer_reference_zombie_offset);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700204
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800205 Object* GetReferenceReferent(Object* reference);
Ian Rogers23435d02012-09-24 11:23:12 -0700206 void ClearReferenceReferent(Object* reference) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700207
Elliott Hughesadb460d2011-10-05 17:02:34 -0700208 // Returns true if the reference object has not yet been enqueued.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800209 bool IsEnqueuable(const Object* ref);
Ian Rogers23435d02012-09-24 11:23:12 -0700210 void EnqueueReference(Object* ref, Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
211 void EnqueuePendingReference(Object* ref, Object** list)
212 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
213 Object* DequeuePendingReference(Object** list) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700214
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800215 MemberOffset GetReferencePendingNextOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700216 DCHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700217 return reference_pendingNext_offset_;
218 }
219
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800220 MemberOffset GetFinalizerReferenceZombieOffset() {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700221 DCHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700222 return finalizer_reference_zombie_offset_;
223 }
224
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800225 void EnableObjectValidation() {
Ian Rogers30fab402012-01-23 15:43:46 -0800226#if VERIFY_OBJECT_ENABLED
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800227 VerifyHeap();
Ian Rogers30fab402012-01-23 15:43:46 -0800228#endif
Elliott Hughes85d15452011-09-16 17:33:01 -0700229 verify_objects_ = true;
230 }
231
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800232 void DisableObjectValidation() {
Elliott Hughes85d15452011-09-16 17:33:01 -0700233 verify_objects_ = false;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700234 }
235
Ian Rogers23435d02012-09-24 11:23:12 -0700236 bool IsObjectValidationEnabled() const {
237 return verify_objects_;
238 }
239
Mathieu Chartier037813d2012-08-23 16:44:59 -0700240 void RecordFree(size_t freed_objects, size_t freed_bytes);
Brian Carlstrom693267a2011-09-06 09:25:34 -0700241
Elliott Hughes5ea047b2011-09-13 14:38:18 -0700242 // Must be called if a field of an Object in the heap changes, and before any GC safe-point.
243 // The call is not needed if NULL is stored in the field.
Elliott Hughes1bac54f2012-03-16 12:48:31 -0700244 void WriteBarrierField(const Object* dst, MemberOffset /*offset*/, const Object* /*new_value*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700245 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700246 }
247
248 // Write barrier for array operations that update many field positions
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700249 void WriteBarrierArray(const Object* dst, int /*start_offset*/,
250 size_t /*length TODO: element_count or byte_count?*/) {
Mathieu Chartiere35517a2012-10-30 18:49:55 -0700251 card_table_->MarkCard(dst);
Ian Rogers5d76c432011-10-31 21:42:49 -0700252 }
253
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800254 CardTable* GetCardTable() {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700255 return card_table_.get();
Ian Rogers5d76c432011-10-31 21:42:49 -0700256 }
257
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800258 void AddFinalizerReference(Thread* self, Object* object);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700259
Mathieu Chartier037813d2012-08-23 16:44:59 -0700260 size_t GetBytesAllocated() const;
261 size_t GetObjectsAllocated() const;
262 size_t GetConcurrentStartSize() const;
263 size_t GetConcurrentMinFree() const;
264 size_t GetUsedMemorySize() const;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700265
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700266 // Returns the total number of objects allocated since the heap was created.
267 size_t GetTotalObjectsAllocated() const;
268
269 // Returns the total number of bytes allocated since the heap was created.
270 size_t GetTotalBytesAllocated() const;
271
272 // Returns the total number of objects freed since the heap was created.
273 size_t GetTotalObjectsFreed() const;
274
275 // Returns the total number of bytes freed since the heap was created.
276 size_t GetTotalBytesFreed() const;
277
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700278 // Functions for getting the bitmap which corresponds to an object's address.
279 // This is probably slow, TODO: use better data structure like binary tree .
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700280 ContinuousSpace* FindSpaceFromObject(const Object*) const;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700281
Mathieu Chartier037813d2012-08-23 16:44:59 -0700282 void DumpForSigQuit(std::ostream& os);
Elliott Hughesc967f782012-04-16 10:23:15 -0700283
Mathieu Chartier3056d0c2012-10-19 10:49:56 -0700284 void Trim();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700285
Ian Rogersb726dcb2012-09-05 08:57:23 -0700286 HeapBitmap* GetLiveBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700287 return live_bitmap_.get();
288 }
289
Ian Rogersb726dcb2012-09-05 08:57:23 -0700290 HeapBitmap* GetMarkBitmap() SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700291 return mark_bitmap_.get();
292 }
293
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700294 void PreZygoteFork() LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700295
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700296 // Mark and empty stack.
297 void FlushAllocStack()
Ian Rogersb726dcb2012-09-05 08:57:23 -0700298 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700299
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700300 // Mark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700301 void MarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700302 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700303
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700304 // Unmark all the objects in the allocation stack in the specified bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700305 void UnMarkAllocStack(SpaceBitmap* bitmap, SpaceSetMap* large_objects, ObjectStack* stack)
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700306 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700307
308 // Update and mark mod union table based on gc type.
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700309 void UpdateAndMarkModUnion(MarkSweep* mark_sweep, TimingLogger& timings, GcType gc_type)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700310 EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700311
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700312 // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700313 // Assumes there is only one image space.
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700314 ImageSpace* GetImageSpace();
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700315 DlMallocSpace* GetAllocSpace();
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700316 LargeObjectSpace* GetLargeObjectsSpace() {
317 return large_object_space_.get();
318 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700319 void DumpSpaces();
Elliott Hughesf8349362012-06-18 15:00:06 -0700320
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700321 // UnReserve the address range where the oat file will be placed.
322 void UnReserveOatFileAddressRange();
323
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700324 // GC performance measuring
325 void DumpGcPerformanceInfo();
326
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700327 // Thread pool.
328 void CreateThreadPool();
329 void DeleteThreadPool();
330 ThreadPool* GetThreadPool() {
331 return thread_pool_.get();
332 }
333
Carl Shapiro58551df2011-07-24 03:09:51 -0700334 private:
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700335 // Allocates uninitialized storage. Passing in a null space tries to place the object in the
336 // large object space.
Ian Rogers50b35e22012-10-04 10:09:15 -0700337 Object* Allocate(Thread* self, AllocSpace* space, size_t num_bytes)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700338 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
339 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Mathieu Chartiera6399032012-06-11 18:49:50 -0700340
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700341 // Try to allocate a number of bytes, this function never does any GCs.
Ian Rogers50b35e22012-10-04 10:09:15 -0700342 Object* TryToAllocate(Thread* self, AllocSpace* space, size_t alloc_size, bool grow)
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700343 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
344 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
345
Elliott Hughesadb460d2011-10-05 17:02:34 -0700346 // Pushes a list of cleared references out to the managed heap.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800347 void EnqueueClearedReferences(Object** cleared_references);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700348
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700349 void RequestHeapTrim() LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
350 void RequestConcurrentGC(Thread* self) LOCKS_EXCLUDED(Locks::runtime_shutdown_lock_);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800351
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700352 // Swap bitmaps (if we are a full Gc then we swap the zygote bitmap too).
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700353 void SwapBitmaps(GcType gc_type) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
354 void SwapLargeObjects() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700355
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700356 void RecordAllocation(size_t size, Object* object)
357 LOCKS_EXCLUDED(GlobalSynchronization::heap_bitmap_lock_)
358 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700359
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700360 // Sometimes CollectGarbageInternal decides to run a different Gc than you requested. Returns
361 // which type of Gc was actually ran.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700362 GcType CollectGarbageInternal(GcType gc_plan, GcCause gc_cause, bool clear_soft_references)
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700363 LOCKS_EXCLUDED(gc_complete_lock_,
Ian Rogersb726dcb2012-09-05 08:57:23 -0700364 Locks::heap_bitmap_lock_,
365 Locks::mutator_lock_,
366 Locks::thread_suspend_count_lock_);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700367 void CollectGarbageMarkSweepPlan(Thread* self, GcType gc_plan, GcCause gc_cause,
368 bool clear_soft_references)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700369 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_,
370 Locks::mutator_lock_);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700371 void CollectGarbageConcurrentMarkSweepPlan(Thread* self, GcType gc_plan, GcCause gc_cause,
Ian Rogers81d425b2012-09-27 16:03:43 -0700372 bool clear_soft_references)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700373 LOCKS_EXCLUDED(Locks::heap_bitmap_lock_,
374 Locks::mutator_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700375
Ian Rogers3bb17a62012-01-27 23:56:44 -0800376 // Given the current contents of the alloc space, increase the allowed heap footprint to match
377 // the target utilization ratio. This should only be called immediately after a full garbage
378 // collection.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800379 void GrowForUtilization();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700380
Mathieu Chartier637e3482012-08-17 10:41:32 -0700381 size_t GetPercentFree();
Elliott Hughesc967f782012-04-16 10:23:15 -0700382
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700383 void AddSpace(ContinuousSpace* space) LOCKS_EXCLUDED(Locks::heap_bitmap_lock_);
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700384
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700385 // No thread saftey analysis since we call this everywhere and it is impossible to find a proper
386 // lock ordering for it.
Ian Rogersf0bbeab2012-10-10 18:26:27 -0700387 void VerifyObjectBody(const Object *obj) NO_THREAD_SAFETY_ANALYSIS;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700388
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700389 static void VerificationCallback(Object* obj, void* arg)
390 SHARED_LOCKS_REQUIRED(GlobalSychronization::heap_bitmap_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700391
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700392 // Swap the allocation stack with the live stack.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700393 void SwapStacks();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700394
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700395 // Clear cards and update the mod union table.
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800396 void ProcessCards(TimingLogger& timings);
Mathieu Chartier7469ebf2012-09-24 16:28:36 -0700397
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700398 Spaces spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700399
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700400 // A map that we use to temporarily reserve address range for the oat file.
401 UniquePtr<MemMap> oat_file_map_;
402
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700403 // The alloc space which we are currently allocating into.
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700404 DlMallocSpace* alloc_space_;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700405
Mathieu Chartier0325e622012-09-05 14:22:51 -0700406 // One cumulative logger for each type of Gc.
407 typedef SafeMap<GcType, CumulativeLogger*> CumulativeTimings;
408 CumulativeTimings cumulative_timings_;
409
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700410 // The mod-union table remembers all of the references from the image space to the alloc /
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700411 // zygote spaces.
412 UniquePtr<ModUnionTable> mod_union_table_;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700413
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700414 // This table holds all of the references from the zygote space to the alloc space.
415 UniquePtr<ModUnionTable> zygote_mod_union_table_;
416
417 UniquePtr<CardTable> card_table_;
Ian Rogers5d76c432011-10-31 21:42:49 -0700418
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700419 // True for concurrent mark sweep GC, false for mark sweep.
420 const bool concurrent_gc_;
421
422 // If we have a zygote space.
423 bool have_zygote_space_;
424
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700425 // Guards access to the state of GC, associated conditional variable is used to signal when a GC
426 // completes.
427 Mutex* gc_complete_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
428 UniquePtr<ConditionVariable> gc_complete_cond_ GUARDED_BY(gc_complete_lock_);
429
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700430 // Reference queue lock
431 UniquePtr<Mutex> reference_queue_lock_;
432
Carl Shapiro58551df2011-07-24 03:09:51 -0700433 // True while the garbage collector is running.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700434 volatile bool is_gc_running_ GUARDED_BY(gc_complete_lock_);
435
436 // Last Gc type we ran. Used by WaitForConcurrentGc to know which Gc was waited on.
437 volatile GcType last_gc_type_ GUARDED_BY(gc_complete_lock_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700438
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700439 // If enabled, causes Gc for alloc when heap size reaches the current footprint limit before the
440 // Gc updates it.
441 const bool enforce_heap_growth_rate_;
442
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700443 // Maximum size that the heap can reach.
444 size_t growth_limit_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700445 size_t max_allowed_footprint_;
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700446
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700447 // Bytes until concurrent GC starts.
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700448 size_t concurrent_start_size_;
449 size_t concurrent_min_free_;
Mathieu Chartier0051be62012-10-12 17:47:11 -0700450 size_t concurrent_start_bytes_;
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700451
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700452 // Number of bytes allocated since the last Gc, we use this to help determine when to schedule concurrent GCs.
453 size_t bytes_since_last_gc_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700454 size_t sticky_gc_count_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700455
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700456 size_t total_bytes_freed_;
457 size_t total_objects_freed_;
458
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700459 // Primitive objects larger than this size are put in the large object space.
460 size_t large_object_threshold_;
461
462 // Large object space.
463 UniquePtr<LargeObjectSpace> large_object_space_;
464
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700465 // Number of bytes allocated. Adjusted after each allocation and free.
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700466 AtomicInteger num_bytes_allocated_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700467
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700468 // Heap verification flags.
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700469 const bool verify_missing_card_marks_;
470 const bool verify_system_weaks_;
471 const bool verify_pre_gc_heap_;
472 const bool verify_post_gc_heap_;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700473 const bool verify_mod_union_table_;
474
Mathieu Chartier02b6a782012-10-26 13:51:26 -0700475 // Parallel GC data structures.
476 UniquePtr<ThreadPool> thread_pool_;
477
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700478 // After how many GCs we force to do a partial GC instead of sticky mark bits GC.
479 const size_t partial_gc_frequency_;
480
481 // Sticky mark bits GC has some overhead, so if we have less a few megabytes of AllocSpace then
482 // it's probably better to just do a partial GC.
483 const size_t min_alloc_space_size_for_sticky_gc_;
484
485 // Minimum remaining size for sticky GC. Since sticky GC doesn't free up as much memory as a
486 // normal GC, it is important to not use it when we are almost out of memory.
487 const size_t min_remaining_space_for_sticky_gc_;
488
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700489 // Last trim time
490 uint64_t last_trim_time_;
491
Ian Rogersb726dcb2012-09-05 08:57:23 -0700492 UniquePtr<HeapBitmap> live_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
493 UniquePtr<HeapBitmap> mark_bitmap_ GUARDED_BY(Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700494
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700495 // Used to ensure that we don't ever recursively request GC.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700496 volatile bool requesting_gc_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700497
Mathieu Chartiere0f0cb32012-08-28 11:26:00 -0700498 // Mark stack that we reuse to avoid re-allocating the mark stack.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700499 UniquePtr<ObjectStack> mark_stack_;
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700500
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700501 // Allocation stack, new allocations go here so that we can do sticky mark bits. This enables us
502 // to use the live bitmap as the old mark bitmap.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700503 const size_t max_allocation_stack_size_;
504 UniquePtr<ObjectStack> allocation_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700505
506 // Second allocation stack so that we can process allocation with the heap unlocked.
Mathieu Chartierd8195f12012-10-05 12:21:28 -0700507 UniquePtr<ObjectStack> live_stack_;
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700508
Brian Carlstrom1f870082011-08-23 16:02:11 -0700509 // offset of java.lang.ref.Reference.referent
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800510 MemberOffset reference_referent_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700511
512 // offset of java.lang.ref.Reference.queue
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800513 MemberOffset reference_queue_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700514
515 // offset of java.lang.ref.Reference.queueNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800516 MemberOffset reference_queueNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700517
518 // offset of java.lang.ref.Reference.pendingNext
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800519 MemberOffset reference_pendingNext_offset_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700520
521 // offset of java.lang.ref.FinalizerReference.zombie
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800522 MemberOffset finalizer_reference_zombie_offset_;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700523
Mathieu Chartier0051be62012-10-12 17:47:11 -0700524 // Minimum free guarantees that you always have at least min_free_ free bytes after growing for
525 // utilization, regardless of target utilization ratio.
526 size_t min_free_;
527
528 // The ideal maximum free size, when we grow the heap for utilization.
529 size_t max_free_;
530
Brian Carlstrom395520e2011-09-25 19:35:00 -0700531 // Target ideal heap utilization ratio
Mathieu Chartier0051be62012-10-12 17:47:11 -0700532 double target_utilization_;
Brian Carlstrom395520e2011-09-25 19:35:00 -0700533
Mathieu Chartier155dfe92012-10-09 14:24:49 -0700534 // Total time which mutators are paused or waiting for GC to complete.
535 uint64_t total_paused_time_;
536 uint64_t total_wait_time_;
537
538 // Total number of objects allocated in microseconds.
539 const bool measure_allocation_time_;
540 AtomicInteger total_allocation_time_;
541
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800542 bool verify_objects_;
Brian Carlstrom1f870082011-08-23 16:02:11 -0700543
Mathieu Chartierc7b83a02012-09-11 18:07:39 -0700544 friend class MarkSweep;
545 friend class VerifyReferenceCardVisitor;
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700546 friend class VerifyReferenceVisitor;
547 friend class VerifyObjectVisitor;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700548 friend class ScopedHeapLock;
Ian Rogers30fab402012-01-23 15:43:46 -0800549 FRIEND_TEST(SpaceTest, AllocAndFree);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800550 FRIEND_TEST(SpaceTest, AllocAndFreeList);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700551 FRIEND_TEST(SpaceTest, ZygoteSpace);
Ian Rogers3bb17a62012-01-27 23:56:44 -0800552 friend class SpaceTest;
Ian Rogers30fab402012-01-23 15:43:46 -0800553
Carl Shapiro69759ea2011-07-21 18:13:35 -0700554 DISALLOW_IMPLICIT_CONSTRUCTORS(Heap);
555};
556
Carl Shapiro1fb86202011-06-27 17:43:13 -0700557} // namespace art
558
559#endif // ART_SRC_HEAP_H_