blob: 72de03fe5b1471268ae9abaa6fa42646c92c151f [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
5#include <vector>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -07008#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07009#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070010#include "object.h"
11#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070012#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070013#include "thread_list.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070014
15namespace art {
16
Carl Shapiro58551df2011-07-24 03:09:51 -070017std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070018
Brian Carlstroma663ea52011-08-19 23:33:41 -070019Space* Heap::boot_space_ = NULL;
20
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070021Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070022
23size_t Heap::maximum_size_ = 0;
24
Carl Shapiro58551df2011-07-24 03:09:51 -070025size_t Heap::num_bytes_allocated_ = 0;
26
27size_t Heap::num_objects_allocated_ = 0;
28
Carl Shapiro69759ea2011-07-21 18:13:35 -070029bool Heap::is_gc_running_ = false;
30
31HeapBitmap* Heap::mark_bitmap_ = NULL;
32
33HeapBitmap* Heap::live_bitmap_ = NULL;
34
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070035MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
36MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
37MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
38MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
39MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070040
Elliott Hughes92b3b562011-09-08 16:32:26 -070041Mutex* Heap::lock_ = NULL;
42
43class ScopedHeapLock {
44 public:
45 ScopedHeapLock() {
46 Heap::Lock();
47 }
48
49 ~ScopedHeapLock() {
50 Heap::Unlock();
51 }
52};
53
Elliott Hughesbe759c62011-09-08 19:38:21 -070054void Heap::Init(size_t initial_size, size_t maximum_size,
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070055 const char* boot_image_file_name,
56 std::vector<const char*>& image_file_names) {
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070057 Space* boot_space;
58 byte* requested_base;
59 if (boot_image_file_name == NULL) {
60 boot_space = NULL;
61 requested_base = NULL;
62 } else {
63 boot_space = Space::Create(boot_image_file_name);
64 if (boot_space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070065 LOG(FATAL) << "Failed to create space from " << boot_image_file_name;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070066 }
67 spaces_.push_back(boot_space);
68 requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize);
69 }
70
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070071 std::vector<Space*> image_spaces;
72 for (size_t i = 0; i < image_file_names.size(); i++) {
73 Space* space = Space::Create(image_file_names[i]);
74 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070075 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070076 }
77 image_spaces.push_back(space);
78 spaces_.push_back(space);
79 requested_base = space->GetBase() + RoundUp(space->Size(), kPageSize);
80 }
81
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070082 Space* space = Space::Create(initial_size, maximum_size, requested_base);
Carl Shapiro58551df2011-07-24 03:09:51 -070083 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070084 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -070085 }
86
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070087 if (boot_space == NULL) {
88 boot_space = space;
89 }
90 byte* base = std::min(boot_space->GetBase(), space->GetBase());
91 byte* limit = std::max(boot_space->GetLimit(), space->GetLimit());
92 DCHECK_LT(base, limit);
93 size_t num_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -070094
95 // Allocate the initial live bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -070096 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes));
97 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070098 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -070099 }
100
101 // Allocate the initial mark bitmap.
Elliott Hughes90a33692011-08-30 13:27:07 -0700102 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes));
103 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700104 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700105 }
106
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700107 alloc_space_ = space;
Carl Shapiro58551df2011-07-24 03:09:51 -0700108 spaces_.push_back(space);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700109 maximum_size_ = maximum_size;
110 live_bitmap_ = live_bitmap.release();
111 mark_bitmap_ = mark_bitmap.release();
112
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700113 num_bytes_allocated_ = 0;
114 num_objects_allocated_ = 0;
115
Carl Shapiro69759ea2011-07-21 18:13:35 -0700116 // TODO: allocate the card table
117
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700118 // Make objects in boot_space live (after live_bitmap_ is set)
119 if (boot_image_file_name != NULL) {
Brian Carlstroma663ea52011-08-19 23:33:41 -0700120 boot_space_ = boot_space;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700121 RecordImageAllocations(boot_space);
122 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700123 for (size_t i = 0; i < image_spaces.size(); i++) {
124 RecordImageAllocations(image_spaces[i]);
125 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700126
Elliott Hughes92b3b562011-09-08 16:32:26 -0700127 // It's still to early to take a lock because there are no threads yet,
128 // but we can create the heap lock now. We don't create it earlier to
129 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700130 lock_ = new Mutex("Heap lock");
Carl Shapiro69759ea2011-07-21 18:13:35 -0700131}
132
133void Heap::Destroy() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700134 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700135 STLDeleteElements(&spaces_);
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700136 if (mark_bitmap_ != NULL) {
137 delete mark_bitmap_;
138 mark_bitmap_ = NULL;
139 }
140 if (live_bitmap_ != NULL) {
141 delete live_bitmap_;
142 }
143 live_bitmap_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700144}
145
Carl Shapiro58551df2011-07-24 03:09:51 -0700146Object* Heap::AllocObject(Class* klass, size_t num_bytes) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700147 ScopedHeapLock lock;
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700148 DCHECK(klass == NULL
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700149 || klass->GetDescriptor() == NULL
Brian Carlstrom4873d462011-08-21 15:23:39 -0700150 || (klass->IsClassClass() && num_bytes >= sizeof(Class))
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700151 || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes));
152 DCHECK(num_bytes >= sizeof(Object));
Elliott Hughes92b3b562011-09-08 16:32:26 -0700153 Object* obj = AllocateLocked(num_bytes);
Carl Shapiro58551df2011-07-24 03:09:51 -0700154 if (obj != NULL) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700155 obj->SetClass(klass);
Carl Shapiro58551df2011-07-24 03:09:51 -0700156 }
157 return obj;
158}
159
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700160bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700161 // Note: we deliberately don't take the lock here, and mustn't test anything that would
162 // require taking the lock.
Elliott Hughesa2501992011-08-26 19:39:54 -0700163 if (!IsAligned(obj, kObjectAlignment)) {
164 return false;
165 }
166 // TODO
167 return true;
168}
169
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700170bool Heap::verify_object_disabled_;
171
Elliott Hughes3e465b12011-09-02 18:26:12 -0700172#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700173void Heap::VerifyObject(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700174 ScopedHeapLock lock;
175 Heap::VerifyObjectLocked(obj);
176}
177#endif
178
179void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700180 lock_->AssertHeld();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700181 if (obj != NULL && !verify_object_disabled_) {
182 if (!IsAligned(obj, kObjectAlignment)) {
183 LOG(FATAL) << "Object isn't aligned: " << obj;
184 } else if (!live_bitmap_->Test(obj)) {
185 // TODO: we don't hold a lock here as it is assumed the live bit map
186 // isn't changing if the mutator is running.
187 LOG(FATAL) << "Object is dead: " << obj;
188 }
189 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700190 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700191 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
192 Object::ClassOffset().Int32Value();
193 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
194 if (c == NULL) {
195 LOG(FATAL) << "Null class" << " in object: " << obj;
196 } else if (!IsAligned(c, kObjectAlignment)) {
197 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
198 } else if (!live_bitmap_->Test(c)) {
199 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
200 }
201 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
202 // NB we don't use the accessors here as they have internal sanity checks
203 // that we don't want to run
204 raw_addr = reinterpret_cast<const byte*>(c) +
205 Object::ClassOffset().Int32Value();
206 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
207 raw_addr = reinterpret_cast<const byte*>(c_c) +
208 Object::ClassOffset().Int32Value();
209 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
210 CHECK_EQ(c_c, c_c_c);
211 }
212 }
213}
214
Elliott Hughes92b3b562011-09-08 16:32:26 -0700215void Heap::VerificationCallback(Object* obj, void *arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700216 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700217 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700218}
219
220void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700221 ScopedHeapLock lock;
222 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700223}
224
Elliott Hughes92b3b562011-09-08 16:32:26 -0700225void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
226#ifndef NDEBUG
227 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700228 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700229 }
230#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700231 size_t size = space->AllocationSize(obj);
232 DCHECK_NE(size, 0u);
233 num_bytes_allocated_ += size;
234 num_objects_allocated_ += 1;
235 live_bitmap_->Set(obj);
236}
237
Elliott Hughes92b3b562011-09-08 16:32:26 -0700238void Heap::RecordFreeLocked(Space* space, const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700239 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700240 size_t size = space->AllocationSize(obj);
241 DCHECK_NE(size, 0u);
242 if (size < num_bytes_allocated_) {
243 num_bytes_allocated_ -= size;
244 } else {
245 num_bytes_allocated_ = 0;
246 }
247 live_bitmap_->Clear(obj);
248 if (num_objects_allocated_ > 0) {
249 num_objects_allocated_ -= 1;
250 }
251}
252
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700253void Heap::RecordImageAllocations(Space* space) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700254 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700255 CHECK(space != NULL);
256 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700257 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700258 while (current < space->GetLimit()) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700259 DCHECK(IsAligned(current, kObjectAlignment));
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700260 const Object* obj = reinterpret_cast<const Object*>(current);
261 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700262 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700263 }
264}
265
Elliott Hughes92b3b562011-09-08 16:32:26 -0700266Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700267 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700268 DCHECK(alloc_space_ != NULL);
269 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700270 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700271 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700272 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700273 }
274 return obj;
275}
276
Elliott Hughes92b3b562011-09-08 16:32:26 -0700277Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700278 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700279
Carl Shapiro69759ea2011-07-21 18:13:35 -0700280 // Fail impossible allocations. TODO: collect soft references.
281 if (size > maximum_size_) {
282 return NULL;
283 }
284
Carl Shapiro58551df2011-07-24 03:09:51 -0700285 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700286 if (ptr != NULL) {
287 return ptr;
288 }
289
290 // The allocation failed. If the GC is running, block until it
291 // completes and retry.
292 if (is_gc_running_) {
293 // The GC is concurrently tracing the heap. Release the heap
294 // lock, wait for the GC to complete, and retrying allocating.
295 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700296 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700297 if (ptr != NULL) {
298 return ptr;
299 }
300 }
301
302 // Another failure. Our thread was starved or there may be too many
303 // live objects. Try a foreground GC. This will have no effect if
304 // the concurrent GC is already running.
Carl Shapiro58551df2011-07-24 03:09:51 -0700305 CollectGarbageInternal();
306 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700307 if (ptr != NULL) {
308 return ptr;
309 }
310
311 // Even that didn't work; this is an exceptional state.
312 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700313 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700314 if (ptr != NULL) {
315 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Carl Shapiro58551df2011-07-24 03:09:51 -0700316 size_t new_footprint = space->MaxAllowedFootprint();
317 // TODO: may want to grow a little bit more so that the amount of
318 // free space is equal to the old free space + the
319 // utilization slop for the new allocation.
320 LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB
Carl Shapiro69759ea2011-07-21 18:13:35 -0700321 << "for " << size << "-byte allocation";
322 return ptr;
323 }
324
325 // Most allocations should have succeeded by now, so the heap is
326 // really full, really fragmented, or the requested size is really
327 // big. Do another GC, collecting SoftReferences this time. The VM
328 // spec requires that all SoftReferences have been collected and
329 // cleared before throwing an OOME.
330
Carl Shapiro58551df2011-07-24 03:09:51 -0700331 // TODO: wait for the finalizers from the previous GC to finish
Carl Shapiro69759ea2011-07-21 18:13:35 -0700332 LOG(INFO) << "Forcing collection of SoftReferences for "
333 << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700334 CollectGarbageInternal();
335 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700336 if (ptr != NULL) {
337 return ptr;
338 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700339
Carl Shapiro69759ea2011-07-21 18:13:35 -0700340 LOG(ERROR) << "Out of memory on a " << size << " byte allocation";
341
Carl Shapiro58551df2011-07-24 03:09:51 -0700342 // TODO: tell the HeapSource to dump its state
343 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700344
Carl Shapiro69759ea2011-07-21 18:13:35 -0700345 return NULL;
346}
347
Elliott Hughesbf86d042011-08-31 17:53:14 -0700348int64_t Heap::GetMaxMemory() {
349 UNIMPLEMENTED(WARNING);
350 return 0;
351}
352
353int64_t Heap::GetTotalMemory() {
354 UNIMPLEMENTED(WARNING);
355 return 0;
356}
357
358int64_t Heap::GetFreeMemory() {
359 UNIMPLEMENTED(WARNING);
360 return 0;
361}
362
Carl Shapiro69759ea2011-07-21 18:13:35 -0700363void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700364 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700365 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700366}
367
368void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700369 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700370
Elliott Hughes8d768a92011-09-14 16:35:25 -0700371 ThreadList* thread_list = Runtime::Current()->GetThreadList();
372 thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700373 {
374 MarkSweep mark_sweep;
375
376 mark_sweep.Init();
377
378 mark_sweep.MarkRoots();
379
380 // Push marked roots onto the mark stack
381
382 // TODO: if concurrent
383 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700384 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700385
386 mark_sweep.RecursiveMark();
387
388 // TODO: if concurrent
389 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700390 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700391 // re-mark root set
392 // scan dirty objects
393
394 mark_sweep.ProcessReferences(false);
395
396 // TODO: swap bitmaps
397
398 mark_sweep.Sweep();
399 }
400
401 GrowForUtilization();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700402 thread_list->ResumeAll();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700403}
404
405void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700406 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700407}
408
409// Given the current contents of the active heap, increase the allowed
410// heap footprint to match the target utilization ratio. This should
411// only be called immediately after a full garbage collection.
412void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700413 lock_->AssertHeld();
Elliott Hughes53b61312011-08-12 18:28:20 -0700414 UNIMPLEMENTED(ERROR);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700415}
416
Elliott Hughes92b3b562011-09-08 16:32:26 -0700417void Heap::Lock() {
Elliott Hughes93e74e82011-09-13 11:07:03 -0700418 // TODO: grab the lock, but put ourselves into Thread::kVmWait if it looks like
Elliott Hughes92b3b562011-09-08 16:32:26 -0700419 // we're going to have to wait on the mutex.
420 lock_->Lock();
421}
422
423void Heap::Unlock() {
424 lock_->Unlock();
425}
426
Carl Shapiro69759ea2011-07-21 18:13:35 -0700427} // namespace art