blob: 286d7061e73f2d25b7fea1a92f934ea4f83a8943 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07008#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07009#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "object.h"
11#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070013
14namespace art {
15
Carl Shapiro58551df2011-07-24 03:09:51 -070016std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070017
Brian Carlstroma663ea52011-08-19 23:33:41 -070018Space* Heap::boot_space_ = NULL;
19
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070020Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070021
22size_t Heap::maximum_size_ = 0;
23
Carl Shapiro58551df2011-07-24 03:09:51 -070024size_t Heap::num_bytes_allocated_ = 0;
25
26size_t Heap::num_objects_allocated_ = 0;
27
Carl Shapiro69759ea2011-07-21 18:13:35 -070028bool Heap::is_gc_running_ = false;
29
30HeapBitmap* Heap::mark_bitmap_ = NULL;
31
32HeapBitmap* Heap::live_bitmap_ = NULL;
33
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070034bool Heap::Init(size_t initial_size, size_t maximum_size, const char* boot_image_file_name) {
35 Space* boot_space;
36 byte* requested_base;
37 if (boot_image_file_name == NULL) {
38 boot_space = NULL;
39 requested_base = NULL;
40 } else {
41 boot_space = Space::Create(boot_image_file_name);
42 if (boot_space == NULL) {
43 return false;
44 }
45 spaces_.push_back(boot_space);
46 requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize);
47 }
48
49 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070050 if (space == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070051 return false;
52 }
53
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070054 if (boot_space == NULL) {
55 boot_space = space;
56 }
57 byte* base = std::min(boot_space->GetBase(), space->GetBase());
58 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
59 DCHECK_LT(base, limit);
60 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070061
62 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070063 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
64 if (live_bitmap.get() == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070065 return false;
66 }
67
68 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070069 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
70 if (mark_bitmap.get() == NULL) {
Carl Shapiro69759ea2011-07-21 18:13:35 -070071 return false;
72 }
73
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070074 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -070075 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -070076 maximum_size_ = maximum_size;
77 live_bitmap_ = live_bitmap.release();
78 mark_bitmap_ = mark_bitmap.release();
79
80 // TODO: allocate the card table
81
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070082 // Make objects in boot_space live (after live_bitmap_ is set)
83 if (boot_image_file_name != NULL) {
Brian Carlstroma663ea52011-08-19 23:33:41 -070084 boot_space_ = boot_space;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070085 RecordImageAllocations(boot_space);
86 }
87
Carl Shapiro69759ea2011-07-21 18:13:35 -070088 return true;
89}
90
91void Heap::Destroy() {
Carl Shapiro58551df2011-07-24 03:09:51 -070092 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070093 if (mark_bitmap_ != NULL) {
94 delete mark_bitmap_;
95 mark_bitmap_ = NULL;
96 }
97 if (live_bitmap_ != NULL) {
98 delete live_bitmap_;
99 }
100 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700101}
102
Carl Shapiro58551df2011-07-24 03:09:51 -0700103Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700104 DCHECK(klass == NULL
Brian Carlstrom74eb46a2011-08-02 20:10:14 -0700105 || klass->descriptor_ == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700106 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Brian Carlstromb63ec392011-08-27 17:38:27 -0700107 || (klass->object_size_ == (klass->IsArrayClass() ? 0 : num_bytes)));
Carl Shapiro58551df2011-07-24 03:09:51 -0700108 Object* obj = Allocate(num_bytes);
109 if (obj != NULL) {
110 obj->klass_ = klass;
111 }
112 return obj;
113}
114
Elliott Hughesa2501992011-08-26 19:39:54 -0700115void Heap::VerifyObject(Object* obj) {
Ian Rogers408f79a2011-08-23 18:22:33 -0700116 if (!IsAligned(obj, kObjectAlignment)) {
117 LOG(FATAL) << "Object isn't aligned: " << obj;
118 } else if (!live_bitmap_->Test(obj)) {
119 // TODO: we don't hold a lock here as it is assumed the live bit map
120 // isn't changing if the mutator is running.
121 LOG(FATAL) << "Object is dead: " << obj;
122 } else if(obj->GetClass() == NULL) {
123 LOG(FATAL) << "Object has no class: " << obj;
124 }
125}
126
Elliott Hughesa2501992011-08-26 19:39:54 -0700127bool Heap::IsHeapAddress(Object* obj) {
128 if (!IsAligned(obj, kObjectAlignment)) {
129 return false;
130 }
131 // TODO
132 return true;
133}
134
Carl Shapiro58551df2011-07-24 03:09:51 -0700135void Heap::RecordAllocation(Space* space, const Object* obj) {
136 size_t size = space->AllocationSize(obj);
137 DCHECK_NE(size, 0u);
138 num_bytes_allocated_ += size;
139 num_objects_allocated_ += 1;
140 live_bitmap_->Set(obj);
141}
142
143void Heap::RecordFree(Space* space, const Object* obj) {
144 size_t size = space->AllocationSize(obj);
145 DCHECK_NE(size, 0u);
146 if (size < num_bytes_allocated_) {
147 num_bytes_allocated_ -= size;
148 } else {
149 num_bytes_allocated_ = 0;
150 }
151 live_bitmap_->Clear(obj);
152 if (num_objects_allocated_ > 0) {
153 num_objects_allocated_ -= 1;
154 }
155}
156
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700157void Heap::RecordImageAllocations(Space* space) {
158 CHECK(space != NULL);
159 CHECK(live_bitmap_ != NULL);
160 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), 8);
161 while (current < space->GetLimit()) {
162 DCHECK(IsAligned(current, 8));
163 const Object* obj = reinterpret_cast<const Object*>(current);
164 live_bitmap_->Set(obj);
165 current += RoundUp(obj->SizeOf(), 8);
166 }
167}
168
Carl Shapiro69759ea2011-07-21 18:13:35 -0700169Object* Heap::Allocate(size_t size) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700170 DCHECK(alloc_space_ != NULL);
171 Space* space = alloc_space_;
Carl Shapiro58551df2011-07-24 03:09:51 -0700172 Object* obj = Allocate(space, size);
173 if (obj != NULL) {
174 RecordAllocation(space, obj);
175 }
176 return obj;
177}
178
179Object* Heap::Allocate(Space* space, size_t size) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700180 // Fail impossible allocations. TODO: collect soft references.
181 if (size > maximum_size_) {
182 return NULL;
183 }
184
Carl Shapiro58551df2011-07-24 03:09:51 -0700185 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700186 if (ptr != NULL) {
187 return ptr;
188 }
189
190 // The allocation failed. If the GC is running, block until it
191 // completes and retry.
192 if (is_gc_running_) {
193 // The GC is concurrently tracing the heap. Release the heap
194 // lock, wait for the GC to complete, and retrying allocating.
195 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700196 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700197 if (ptr != NULL) {
198 return ptr;
199 }
200 }
201
202 // Another failure. Our thread was starved or there may be too many
203 // live objects. Try a foreground GC. This will have no effect if
204 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700205 CollectGarbageInternal();
206 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700207 if (ptr != NULL) {
208 return ptr;
209 }
210
211 // Even that didn't work; this is an exceptional state.
212 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700213 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700214 if (ptr != NULL) {
215 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700216 size_t new_footprint = space->MaxAllowedFootprint();
217 // TODO: may want to grow a little bit more so that the amount of
218 // free space is equal to the old free space + the
219 // utilization slop for the new allocation.
220 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700221 << "for " << size << "-byte allocation";
222 return ptr;
223 }
224
225 // Most allocations should have succeeded by now, so the heap is
226 // really full, really fragmented, or the requested size is really
227 // big. Do another GC, collecting SoftReferences this time. The VM
228 // spec requires that all SoftReferences have been collected and
229 // cleared before throwing an OOME.
230
Carl Shapiro58551df2011-07-24 03:09:51 -0700231 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700232 LOG(INFO) << "Forcing collection of SoftReferences for "
233 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700234 CollectGarbageInternal();
235 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700236 if (ptr != NULL) {
237 return ptr;
238 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700239
Carl Shapiro69759ea2011-07-21 18:13:35 -0700240 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
241
Carl Shapiro58551df2011-07-24 03:09:51 -0700242 // TODO: tell the HeapSource to dump its state
243 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700244
Carl Shapiro69759ea2011-07-21 18:13:35 -0700245 return NULL;
246}
247
Carl Shapiro69759ea2011-07-21 18:13:35 -0700248void Heap::CollectGarbage() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700249 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700250}
251
252void Heap::CollectGarbageInternal() {
Carl Shapiro58551df2011-07-24 03:09:51 -0700253 // TODO: check that heap lock is held
254
255 // TODO: Suspend all threads
256 {
257 MarkSweep mark_sweep;
258
259 mark_sweep.Init();
260
261 mark_sweep.MarkRoots();
262
263 // Push marked roots onto the mark stack
264
265 // TODO: if concurrent
266 // unlock heap
267 // resume threads
268
269 mark_sweep.RecursiveMark();
270
271 // TODO: if concurrent
272 // lock heap
273 // suspend threads
274 // re-mark root set
275 // scan dirty objects
276
277 mark_sweep.ProcessReferences(false);
278
279 // TODO: swap bitmaps
280
281 mark_sweep.Sweep();
282 }
283
284 GrowForUtilization();
285
286 // TODO: Resume all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700287}
288
289void Heap::WaitForConcurrentGcToComplete() {
290}
291
292// Given the current contents of the active heap, increase the allowed
293// heap footprint to match the target utilization ratio. This should
294// only be called immediately after a full garbage collection.
295void Heap::GrowForUtilization() {
Elliott Hughes53b61312011-08-12 18:28:20 -0700296 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700297}
298
299} // namespace art