blob: bffd19dd05460774386e407a08e21964b7e8ca5e [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07008#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07009#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "object.h"
11#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070013
14namespace art {
15
Carl Shapiro58551df2011-07-24 03:09:51 -070016std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070017
Brian Carlstroma663ea52011-08-19 23:33:41 -070018Space* Heap::boot_space_ = NULL;
19
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070020Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070021
22size_t Heap::maximum_size_ = 0;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024size_t Heap::num_bytes_allocated_ = 0;
25
26size_t Heap::num_objects_allocated_ = 0;
27
Carl Shapiro69759ea2011-07-21 18:13:35 -070028bool Heap::is_gc_running_ = false;
29
30HeapBitmap* Heap::mark_bitmap_ = NULL;
31
32HeapBitmap* Heap::live_bitmap_ = NULL;
33
Brian Carlstrom1f870082011-08-23 16:02:11 -070034size_t Heap::reference_referent_offset_ = 0; // TODO
35size_t Heap::reference_queue_offset_ = 0; // TODO
36size_t Heap::reference_queueNext_offset_ = 0; // TODO
37size_t Heap::reference_pendingNext_offset_ = 0; // TODO
38size_t Heap::finalizer_reference_zombie_offset_ = 0; // TODO
39
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070040bool Heap::Init(size_t initial_size, size_t maximum_size, const char* boot_image_file_name) {
41 Space* boot_space;
42 byte* requested_base;
43 if (boot_image_file_name == NULL) {
44 boot_space = NULL;
45 requested_base = NULL;
46 } else {
47 boot_space = Space::Create(boot_image_file_name);
48 if (boot_space == NULL) {
49 return false;
50 }
51 spaces_.push_back(boot_space);
52 requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize);
53 }
54
55 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070056 if (space == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070057 return false;
58 }
59
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070060 if (boot_space == NULL) {
61 boot_space = space;
62 }
63 byte* base = std::min(boot_space->GetBase(), space->GetBase());
64 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
65 DCHECK_LT(base, limit);
66 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070067
68 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070069 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
70 if (live_bitmap.get() == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070071 return false;
72 }
73
74 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070075 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
76 if (mark_bitmap.get() == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070077 return false;
78 }
79
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070080 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -070081 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -070082 maximum_size_ = maximum_size;
83 live_bitmap_ = live_bitmap.release();
84 mark_bitmap_ = mark_bitmap.release();
85
86 // TODO: allocate the card table
87
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070088 // Make objects in boot_space live (after live_bitmap_ is set)
89 if (boot_image_file_name != NULL) {
Brian Carlstroma663ea52011-08-19 23:33:41 -070090 boot_space_ = boot_space;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070091 RecordImageAllocations(boot_space);
92 }
93
Carl Shapiro69759ea2011-07-21 18:13:35 -070094 return true;
95}
96
97void Heap::Destroy() {
Carl Shapiro58551df2011-07-24 03:09:51 -070098 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070099 if (mark_bitmap_ != NULL) {
100 delete mark_bitmap_;
101 mark_bitmap_ = NULL;
102 }
103 if (live_bitmap_ != NULL) {
104 delete live_bitmap_;
105 }
106 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700107}
108
Carl Shapiro58551df2011-07-24 03:09:51 -0700109Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700110 DCHECK(klass == NULL
Brian Carlstrom74eb46a2011-08-02 20:10:14 -0700111 || klass->descriptor_ == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700112 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Brian Carlstromb63ec392011-08-27 17:38:27 -0700113 || (klass->object_size_ == (klass->IsArrayClass() ? 0 : num_bytes)));
Carl Shapiro58551df2011-07-24 03:09:51 -0700114 Object* obj = Allocate(num_bytes);
115 if (obj != NULL) {
116 obj->klass_ = klass;
117 }
118 return obj;
119}
120
Elliott Hughesa2501992011-08-26 19:39:54 -0700121void Heap::VerifyObject(Object* obj) {
Ian Rogers408f79a2011-08-23 18:22:33 -0700122 if (!IsAligned(obj, kObjectAlignment)) {
123 LOG(FATAL) << "Object isn't aligned: " << obj;
124 } else if (!live_bitmap_->Test(obj)) {
125 // TODO: we don't hold a lock here as it is assumed the live bit map
126 // isn't changing if the mutator is running.
127 LOG(FATAL) << "Object is dead: " << obj;
128 } else if(obj->GetClass() == NULL) {
129 LOG(FATAL) << "Object has no class: " << obj;
130 }
131}
132
Elliott Hughesa2501992011-08-26 19:39:54 -0700133bool Heap::IsHeapAddress(Object* obj) {
134 if (!IsAligned(obj, kObjectAlignment)) {
135 return false;
136 }
137 // TODO
138 return true;
139}
140
Carl Shapiro58551df2011-07-24 03:09:51 -0700141void Heap::RecordAllocation(Space* space, const Object* obj) {
142 size_t size = space->AllocationSize(obj);
143 DCHECK_NE(size, 0u);
144 num_bytes_allocated_ += size;
145 num_objects_allocated_ += 1;
146 live_bitmap_->Set(obj);
147}
148
149void Heap::RecordFree(Space* space, const Object* obj) {
150 size_t size = space->AllocationSize(obj);
151 DCHECK_NE(size, 0u);
152 if (size < num_bytes_allocated_) {
153 num_bytes_allocated_ -= size;
154 } else {
155 num_bytes_allocated_ = 0;
156 }
157 live_bitmap_->Clear(obj);
158 if (num_objects_allocated_ > 0) {
159 num_objects_allocated_ -= 1;
160 }
161}
162
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700163void Heap::RecordImageAllocations(Space* space) {
164 CHECK(space != NULL);
165 CHECK(live_bitmap_ != NULL);
166 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), 8);
167 while (current < space->GetLimit()) {
168 DCHECK(IsAligned(current, 8));
169 const Object* obj = reinterpret_cast<const Object*>(current);
170 live_bitmap_->Set(obj);
171 current += RoundUp(obj->SizeOf(), 8);
172 }
173}
174
Carl Shapiro69759ea2011-07-21 18:13:35 -0700175Object* Heap::Allocate(size_t size) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700176 DCHECK(alloc_space_ != NULL);
177 Space* space = alloc_space_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700178 Object* obj = Allocate(space, size);
179 if (obj != NULL) {
180 RecordAllocation(space, obj);
181 }
182 return obj;
183}
184
185Object* Heap::Allocate(Space* space, size_t size) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700186 // Fail impossible allocations. TODO: collect soft references.
187 if (size > maximum_size_) {
188 return NULL;
189 }
190
Carl Shapiro58551df2011-07-24 03:09:51 -0700191 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700192 if (ptr != NULL) {
193 return ptr;
194 }
195
196 // The allocation failed. If the GC is running, block until it
197 // completes and retry.
198 if (is_gc_running_) {
199 // The GC is concurrently tracing the heap. Release the heap
200 // lock, wait for the GC to complete, and retrying allocating.
201 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700202 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700203 if (ptr != NULL) {
204 return ptr;
205 }
206 }
207
208 // Another failure. Our thread was starved or there may be too many
209 // live objects. Try a foreground GC. This will have no effect if
210 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700211 CollectGarbageInternal();
212 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700213 if (ptr != NULL) {
214 return ptr;
215 }
216
217 // Even that didn't work; this is an exceptional state.
218 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700219 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700220 if (ptr != NULL) {
221 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700222 size_t new_footprint = space->MaxAllowedFootprint();
223 // TODO: may want to grow a little bit more so that the amount of
224 // free space is equal to the old free space + the
225 // utilization slop for the new allocation.
226 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700227 << "for " << size << "-byte allocation";
228 return ptr;
229 }
230
231 // Most allocations should have succeeded by now, so the heap is
232 // really full, really fragmented, or the requested size is really
233 // big. Do another GC, collecting SoftReferences this time. The VM
234 // spec requires that all SoftReferences have been collected and
235 // cleared before throwing an OOME.
236
Carl Shapiro58551df2011-07-24 03:09:51 -0700237 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700238 LOG(INFO) << "Forcing collection of SoftReferences for "
239 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700240 CollectGarbageInternal();
241 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700242 if (ptr != NULL) {
243 return ptr;
244 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700245
Carl Shapiro69759ea2011-07-21 18:13:35 -0700246 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
247
Carl Shapiro58551df2011-07-24 03:09:51 -0700248 // TODO: tell the HeapSource to dump its state
249 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700250
Carl Shapiro69759ea2011-07-21 18:13:35 -0700251 return NULL;
252}
253
Carl Shapiro69759ea2011-07-21 18:13:35 -0700254void Heap::CollectGarbage() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700255 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700256}
257
258void Heap::CollectGarbageInternal() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700259 // TODO: check that heap lock is held
260
261 // TODO: Suspend all threads
262 {
263 MarkSweep mark_sweep;
264
265 mark_sweep.Init();
266
267 mark_sweep.MarkRoots();
268
269 // Push marked roots onto the mark stack
270
271 // TODO: if concurrent
272 // unlock heap
273 // resume threads
274
275 mark_sweep.RecursiveMark();
276
277 // TODO: if concurrent
278 // lock heap
279 // suspend threads
280 // re-mark root set
281 // scan dirty objects
282
283 mark_sweep.ProcessReferences(false);
284
285 // TODO: swap bitmaps
286
287 mark_sweep.Sweep();
288 }
289
290 GrowForUtilization();
291
292 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700293}
294
295void Heap::WaitForConcurrentGcToComplete() {
296}
297
298// Given the current contents of the active heap, increase the allowed
299// heap footprint to match the target utilization ratio. This should
300// only be called immediately after a full garbage collection.
301void Heap::GrowForUtilization() {
Elliott Hughes53b61312011-08-12 18:28:20 -0700302 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700303}
304
305} // namespace art