blob: ba52a54ad4da8153a55fae465076d8ac6b777ec1 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -07004
Brian Carlstrom58ae9412011-10-04 00:56:06 -07005#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -07006#include <vector>
7
Ian Rogers5d76c432011-10-31 21:42:49 -07008#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -07009#include "debugger.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070010#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070011#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070012#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080013#include "object_utils.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070014#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070015#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070016#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070017#include "timing_logger.h"
18#include "UniquePtr.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070019
20namespace art {
21
Carl Shapiro58551df2011-07-24 03:09:51 -070022std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070023
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070024Space* Heap::alloc_space_ = NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -070025
26size_t Heap::maximum_size_ = 0;
27
jeffhaoc1160702011-10-27 15:48:45 -070028size_t Heap::growth_size_ = 0;
29
Carl Shapiro58551df2011-07-24 03:09:51 -070030size_t Heap::num_bytes_allocated_ = 0;
31
32size_t Heap::num_objects_allocated_ = 0;
33
Carl Shapiro69759ea2011-07-21 18:13:35 -070034bool Heap::is_gc_running_ = false;
35
36HeapBitmap* Heap::mark_bitmap_ = NULL;
37
38HeapBitmap* Heap::live_bitmap_ = NULL;
39
Ian Rogers5d76c432011-10-31 21:42:49 -070040CardTable* Heap::card_table_ = NULL;
41
42bool Heap::card_marking_disabled_ = false;
43
Elliott Hughesadb460d2011-10-05 17:02:34 -070044Class* Heap::java_lang_ref_FinalizerReference_ = NULL;
45Class* Heap::java_lang_ref_ReferenceQueue_ = NULL;
46
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070047MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
48MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
49MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
50MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
51MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070052
Brian Carlstrom395520e2011-09-25 19:35:00 -070053float Heap::target_utilization_ = 0.5;
54
Elliott Hughes92b3b562011-09-08 16:32:26 -070055Mutex* Heap::lock_ = NULL;
56
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070057bool Heap::verify_objects_ = false;
58
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -080059void Heap::Init(size_t initial_size, size_t maximum_size, size_t growth_size,
Brian Carlstrom58ae9412011-10-04 00:56:06 -070060 const std::vector<std::string>& image_file_names) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -080061 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070062 LOG(INFO) << "Heap::Init entering";
63 }
64
Brian Carlstrom58ae9412011-10-04 00:56:06 -070065 // bounds of all spaces for allocating live and mark bitmaps
66 // there will be at least one space (the alloc space),
jeffhao39da0352011-11-04 14:58:55 -070067 // so set base to max, and limit and min to start
Brian Carlstrom58ae9412011-10-04 00:56:06 -070068 byte* base = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::max());
Ian Rogers5d76c432011-10-31 21:42:49 -070069 byte* max = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
jeffhao39da0352011-11-04 14:58:55 -070070 byte* limit = reinterpret_cast<byte*>(std::numeric_limits<uintptr_t>::min());
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070071
Brian Carlstrom58ae9412011-10-04 00:56:06 -070072 byte* requested_base = NULL;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070073 std::vector<Space*> image_spaces;
74 for (size_t i = 0; i < image_file_names.size(); i++) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070075 Space* space = Space::CreateFromImage(image_file_names[i]);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070076 if (space == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070077 LOG(FATAL) << "Failed to create space from " << image_file_names[i];
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070078 }
79 image_spaces.push_back(space);
80 spaces_.push_back(space);
Brian Carlstrome24fa612011-09-29 00:53:55 -070081 byte* oat_limit_addr = space->GetImageHeader().GetOatLimitAddr();
Brian Carlstrom58ae9412011-10-04 00:56:06 -070082 if (oat_limit_addr > requested_base) {
83 requested_base = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_limit_addr),
84 kPageSize));
85 }
86 base = std::min(base, space->GetBase());
Ian Rogers5d76c432011-10-31 21:42:49 -070087 max = std::max(max, space->GetMax());
jeffhao39da0352011-11-04 14:58:55 -070088 limit = std::max(limit, space->GetLimit());
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070089 }
90
jeffhaoc1160702011-10-27 15:48:45 -070091 alloc_space_ = Space::Create("alloc space", initial_size, maximum_size, growth_size, requested_base);
Elliott Hughes307f75d2011-10-12 18:04:40 -070092 if (alloc_space_ == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -070093 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -070094 }
Elliott Hughes307f75d2011-10-12 18:04:40 -070095 base = std::min(base, alloc_space_->GetBase());
Ian Rogers5d76c432011-10-31 21:42:49 -070096 max = std::max(max, alloc_space_->GetMax());
jeffhao39da0352011-11-04 14:58:55 -070097 limit = std::max(limit, alloc_space_->GetLimit());
Ian Rogers5d76c432011-10-31 21:42:49 -070098 DCHECK_LT(base, max);
jeffhao39da0352011-11-04 14:58:55 -070099 DCHECK_LT(base, limit);
Ian Rogers5d76c432011-10-31 21:42:49 -0700100 size_t num_bytes = max - base;
jeffhao39da0352011-11-04 14:58:55 -0700101 size_t limit_bytes = limit - base;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700102
103 // Allocate the initial live bitmap.
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800104 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create("dalvik-bitmap-1", base, num_bytes));
Elliott Hughes90a33692011-08-30 13:27:07 -0700105 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700106 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700107 }
108
109 // Allocate the initial mark bitmap.
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800110 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create("dalvik-bitmap-2", base, num_bytes));
Elliott Hughes90a33692011-08-30 13:27:07 -0700111 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700112 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700113 }
114
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800115 // Allocate the card table.
jeffhao39da0352011-11-04 14:58:55 -0700116 UniquePtr<CardTable> card_table(CardTable::Create(base, num_bytes, limit_bytes));
Ian Rogers5d76c432011-10-31 21:42:49 -0700117 if (card_table.get() == NULL) {
118 LOG(FATAL) << "Failed to create card table";
119 }
120
Elliott Hughes307f75d2011-10-12 18:04:40 -0700121 spaces_.push_back(alloc_space_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700122 maximum_size_ = maximum_size;
jeffhaoc1160702011-10-27 15:48:45 -0700123 growth_size_ = growth_size;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700124 live_bitmap_ = live_bitmap.release();
125 mark_bitmap_ = mark_bitmap.release();
Ian Rogers5d76c432011-10-31 21:42:49 -0700126 card_table_ = card_table.release();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700127
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700128 num_bytes_allocated_ = 0;
129 num_objects_allocated_ = 0;
130
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700131 // Make image objects live (after live_bitmap_ is set)
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700132 for (size_t i = 0; i < image_spaces.size(); i++) {
133 RecordImageAllocations(image_spaces[i]);
134 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700135
Elliott Hughes85d15452011-09-16 17:33:01 -0700136 Heap::EnableObjectValidation();
137
Elliott Hughes92b3b562011-09-08 16:32:26 -0700138 // It's still to early to take a lock because there are no threads yet,
139 // but we can create the heap lock now. We don't create it earlier to
140 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700141 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700142
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800143 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700144 LOG(INFO) << "Heap::Init exiting";
145 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700146}
147
148void Heap::Destroy() {
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800149 // We can't take the heap lock here because there might be a daemon thread suspended with the
150 // heap lock held. We know though that no non-daemon threads are executing, and we know that
151 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
152 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700153 STLDeleteElements(&spaces_);
Elliott Hughes4d6850c2012-01-18 15:55:06 -0800154 delete mark_bitmap_;
155 delete live_bitmap_;
156 delete card_table_;
157 delete lock_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700158}
159
Elliott Hughes418dfe72011-10-06 18:56:27 -0700160Object* Heap::AllocObject(Class* klass, size_t byte_count) {
161 {
162 ScopedHeapLock lock;
Ian Rogers6d4d9fc2011-11-30 16:24:48 -0800163 DCHECK(klass == NULL || (klass->IsClassClass() && byte_count >= sizeof(Class)) ||
164 (klass->IsVariableSize() || klass->GetObjectSize() == byte_count) ||
Elliott Hughes91250e02011-12-13 22:30:35 -0800165 strlen(ClassHelper(klass).GetDescriptor()) == 0);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700166 DCHECK_GE(byte_count, sizeof(Object));
167 Object* obj = AllocateLocked(byte_count);
168 if (obj != NULL) {
169 obj->SetClass(klass);
Elliott Hughes545a0642011-11-08 19:10:03 -0800170 if (Dbg::IsAllocTrackingEnabled()) {
171 Dbg::RecordAllocation(klass, byte_count);
172 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700173 return obj;
174 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700175 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700176
177 Thread::Current()->ThrowOutOfMemoryError(klass, byte_count);
178 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700179}
180
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700181bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700182 // Note: we deliberately don't take the lock here, and mustn't test anything that would
183 // require taking the lock.
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700184 if (obj == NULL || !IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700185 return false;
186 }
187 // TODO
188 return true;
189}
190
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700191bool Heap::IsLiveObjectLocked(const Object* obj) {
192 lock_->AssertHeld();
193 return IsHeapAddress(obj) && live_bitmap_->Test(obj);
194}
195
Elliott Hughes3e465b12011-09-02 18:26:12 -0700196#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700197void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700198 if (!verify_objects_) {
199 return;
200 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700201 ScopedHeapLock lock;
202 Heap::VerifyObjectLocked(obj);
203}
204#endif
205
206void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700207 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700208 if (obj != NULL) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700209 if (!IsAligned<kObjectAlignment>(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700210 LOG(FATAL) << "Object isn't aligned: " << obj;
211 } else if (!live_bitmap_->Test(obj)) {
212 // TODO: we don't hold a lock here as it is assumed the live bit map
213 // isn't changing if the mutator is running.
214 LOG(FATAL) << "Object is dead: " << obj;
215 }
216 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700217 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700218 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
219 Object::ClassOffset().Int32Value();
220 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
221 if (c == NULL) {
Elliott Hughes5d78d392011-12-13 16:53:05 -0800222 LOG(FATAL) << "Null class in object: " << obj;
Elliott Hughes06b37d92011-10-16 11:51:29 -0700223 } else if (!IsAligned<kObjectAlignment>(c)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700224 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
225 } else if (!live_bitmap_->Test(c)) {
226 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
227 }
228 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
Ian Rogersad25ac52011-10-04 19:13:33 -0700229 // Note: we don't use the accessors here as they have internal sanity checks
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700230 // that we don't want to run
231 raw_addr = reinterpret_cast<const byte*>(c) +
232 Object::ClassOffset().Int32Value();
233 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
234 raw_addr = reinterpret_cast<const byte*>(c_c) +
235 Object::ClassOffset().Int32Value();
236 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
237 CHECK_EQ(c_c, c_c_c);
238 }
239 }
240}
241
Brian Carlstrom78128a62011-09-15 17:21:19 -0700242void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700243 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700244 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700245}
246
247void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700248 ScopedHeapLock lock;
249 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700250}
251
Elliott Hughes92b3b562011-09-08 16:32:26 -0700252void Heap::RecordAllocationLocked(Space* space, const Object* obj) {
253#ifndef NDEBUG
254 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700255 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700256 }
257#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700258 size_t size = space->AllocationSize(obj);
Elliott Hughes5d78d392011-12-13 16:53:05 -0800259 DCHECK_GT(size, 0u);
Carl Shapiro58551df2011-07-24 03:09:51 -0700260 num_bytes_allocated_ += size;
261 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700262
263 if (Runtime::Current()->HasStatsEnabled()) {
264 RuntimeStats* global_stats = Runtime::Current()->GetStats();
265 RuntimeStats* thread_stats = Thread::Current()->GetStats();
266 ++global_stats->allocated_objects;
267 ++thread_stats->allocated_objects;
268 global_stats->allocated_bytes += size;
269 thread_stats->allocated_bytes += size;
270 }
271
Carl Shapiro58551df2011-07-24 03:09:51 -0700272 live_bitmap_->Set(obj);
273}
274
Elliott Hughes307f75d2011-10-12 18:04:40 -0700275void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700276 lock_->AssertHeld();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700277
278 if (freed_objects < num_objects_allocated_) {
279 num_objects_allocated_ -= freed_objects;
280 } else {
281 num_objects_allocated_ = 0;
282 }
283 if (freed_bytes < num_bytes_allocated_) {
284 num_bytes_allocated_ -= freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700285 } else {
286 num_bytes_allocated_ = 0;
287 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700288
289 if (Runtime::Current()->HasStatsEnabled()) {
290 RuntimeStats* global_stats = Runtime::Current()->GetStats();
291 RuntimeStats* thread_stats = Thread::Current()->GetStats();
292 ++global_stats->freed_objects;
293 ++thread_stats->freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700294 global_stats->freed_bytes += freed_bytes;
295 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700296 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700297}
298
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700299void Heap::RecordImageAllocations(Space* space) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800300 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700301 LOG(INFO) << "Heap::RecordImageAllocations entering";
302 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700303 DCHECK(!Runtime::Current()->IsStarted());
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700304 CHECK(space != NULL);
305 CHECK(live_bitmap_ != NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700306 byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700307 while (current < space->GetLimit()) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700308 DCHECK_ALIGNED(current, kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700309 const Object* obj = reinterpret_cast<const Object*>(current);
310 live_bitmap_->Set(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700311 current += RoundUp(obj->SizeOf(), kObjectAlignment);
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700312 }
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800313 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700314 LOG(INFO) << "Heap::RecordImageAllocations exiting";
315 }
Brian Carlstrom9cff8e12011-08-18 16:47:29 -0700316}
317
Elliott Hughes92b3b562011-09-08 16:32:26 -0700318Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700319 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700320 DCHECK(alloc_space_ != NULL);
321 Space* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700322 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700323 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700324 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700325 }
326 return obj;
327}
328
Elliott Hughes92b3b562011-09-08 16:32:26 -0700329Object* Heap::AllocateLocked(Space* space, size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700330 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700331
Brian Carlstromb82b6872011-10-26 17:18:07 -0700332 // Since allocation can cause a GC which will need to SuspendAll,
333 // make sure all allocators are in the kRunnable state.
334 DCHECK_EQ(Thread::Current()->GetState(), Thread::kRunnable);
335
Carl Shapiro69759ea2011-07-21 18:13:35 -0700336 // Fail impossible allocations. TODO: collect soft references.
jeffhaoc1160702011-10-27 15:48:45 -0700337 if (size > growth_size_) {
Carl Shapiro69759ea2011-07-21 18:13:35 -0700338 return NULL;
339 }
340
Carl Shapiro58551df2011-07-24 03:09:51 -0700341 Object* ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700342 if (ptr != NULL) {
343 return ptr;
344 }
345
346 // The allocation failed. If the GC is running, block until it
347 // completes and retry.
348 if (is_gc_running_) {
349 // The GC is concurrently tracing the heap. Release the heap
350 // lock, wait for the GC to complete, and retrying allocating.
351 WaitForConcurrentGcToComplete();
Carl Shapiro58551df2011-07-24 03:09:51 -0700352 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700353 if (ptr != NULL) {
354 return ptr;
355 }
356 }
357
358 // Another failure. Our thread was starved or there may be too many
359 // live objects. Try a foreground GC. This will have no effect if
360 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700361 if (Runtime::Current()->HasStatsEnabled()) {
362 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
363 ++Thread::Current()->GetStats()->gc_for_alloc_count;
364 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700365 CollectGarbageInternal();
366 ptr = space->AllocWithoutGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700367 if (ptr != NULL) {
368 return ptr;
369 }
370
371 // Even that didn't work; this is an exceptional state.
372 // Try harder, growing the heap if necessary.
Carl Shapiro58551df2011-07-24 03:09:51 -0700373 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700374 if (ptr != NULL) {
375 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700376 size_t new_footprint = space->GetMaxAllowedFootprint();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700377 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700378 // free space is equal to the old free space + the
379 // utilization slop for the new allocation.
Elliott Hughes5d78d392011-12-13 16:53:05 -0800380 VLOG(gc) << "Grow heap (frag case) to " << (new_footprint/KB) << "KiB "
381 << "for a " << size << "-byte allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700382 return ptr;
383 }
384
385 // Most allocations should have succeeded by now, so the heap is
386 // really full, really fragmented, or the requested size is really
387 // big. Do another GC, collecting SoftReferences this time. The VM
388 // spec requires that all SoftReferences have been collected and
389 // cleared before throwing an OOME.
390
Elliott Hughes418dfe72011-10-06 18:56:27 -0700391 // OLD-TODO: wait for the finalizers from the previous GC to finish
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800392 VLOG(gc) << "Forcing collection of SoftReferences for " << size << "-byte allocation";
Carl Shapiro58551df2011-07-24 03:09:51 -0700393 CollectGarbageInternal();
394 ptr = space->AllocWithGrowth(size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700395 if (ptr != NULL) {
396 return ptr;
397 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700398
Elliott Hughes5d78d392011-12-13 16:53:05 -0800399 LOG(ERROR) << "Out of memory on a " << size << "-byte allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700400
Carl Shapiro58551df2011-07-24 03:09:51 -0700401 // TODO: tell the HeapSource to dump its state
402 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700403
Carl Shapiro69759ea2011-07-21 18:13:35 -0700404 return NULL;
405}
406
Elliott Hughesbf86d042011-08-31 17:53:14 -0700407int64_t Heap::GetMaxMemory() {
jeffhaoc1160702011-10-27 15:48:45 -0700408 return growth_size_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700409}
410
411int64_t Heap::GetTotalMemory() {
Elliott Hughes7162ad92011-10-27 14:08:42 -0700412 return alloc_space_->Size();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700413}
414
415int64_t Heap::GetFreeMemory() {
Elliott Hughes7162ad92011-10-27 14:08:42 -0700416 return alloc_space_->Size() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700417}
418
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700419class InstanceCounter {
420 public:
421 InstanceCounter(Class* c, bool count_assignable)
422 : class_(c), count_assignable_(count_assignable), count_(0) {
423 }
424
425 size_t GetCount() {
426 return count_;
427 }
428
429 static void Callback(Object* o, void* arg) {
430 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
431 }
432
433 private:
434 void VisitInstance(Object* o) {
435 Class* instance_class = o->GetClass();
436 if (count_assignable_) {
437 if (instance_class == class_) {
438 ++count_;
439 }
440 } else {
441 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
442 ++count_;
443 }
444 }
445 }
446
447 Class* class_;
448 bool count_assignable_;
449 size_t count_;
450};
451
452int64_t Heap::CountInstances(Class* c, bool count_assignable) {
453 ScopedHeapLock lock;
454 InstanceCounter counter(c, count_assignable);
455 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
456 return counter.GetCount();
457}
458
Carl Shapiro69759ea2011-07-21 18:13:35 -0700459void Heap::CollectGarbage() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700460 ScopedHeapLock lock;
Carl Shapiro58551df2011-07-24 03:09:51 -0700461 CollectGarbageInternal();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700462}
463
464void Heap::CollectGarbageInternal() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700465 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700466
Elliott Hughes8d768a92011-09-14 16:35:25 -0700467 ThreadList* thread_list = Runtime::Current()->GetThreadList();
468 thread_list->SuspendAll();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700469
470 size_t initial_size = num_bytes_allocated_;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700471 TimingLogger timings("CollectGarbageInternal");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700472 uint64_t t0 = NanoTime();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700473 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700474 {
475 MarkSweep mark_sweep;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700476 timings.AddSplit("ctor");
Carl Shapiro58551df2011-07-24 03:09:51 -0700477
478 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700479 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700480
481 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700482 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700483
Ian Rogers5d76c432011-10-31 21:42:49 -0700484 mark_sweep.ScanDirtyImageRoots();
485 timings.AddSplit("DirtyImageRoots");
486
487 // Roots are marked on the bitmap and the mark_stack is empty
488 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700489
490 // TODO: if concurrent
491 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700492 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700493
Ian Rogers5d76c432011-10-31 21:42:49 -0700494 // Recursively mark all bits set in the non-image mark bitmap
Carl Shapiro58551df2011-07-24 03:09:51 -0700495 mark_sweep.RecursiveMark();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700496 timings.AddSplit("RecursiveMark");
Carl Shapiro58551df2011-07-24 03:09:51 -0700497
498 // TODO: if concurrent
499 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700500 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700501 // re-mark root set
502 // scan dirty objects
503
504 mark_sweep.ProcessReferences(false);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700505 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700506
Elliott Hughes2da50362011-10-10 16:57:08 -0700507 // TODO: if concurrent
508 // swap bitmaps
Carl Shapiro58551df2011-07-24 03:09:51 -0700509
510 mark_sweep.Sweep();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700511 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -0700512
513 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700514 }
515
516 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700517 timings.AddSplit("GrowForUtilization");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700518 uint64_t t1 = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700519 thread_list->ResumeAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700520
521 EnqueueClearedReferences(&cleared_references);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700522
523 // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
524 size_t bytes_freed = initial_size - num_bytes_allocated_;
525 bool is_small = (bytes_freed > 0 && bytes_freed < 1024);
Elliott Hughes5d78d392011-12-13 16:53:05 -0800526 size_t kib_freed = (bytes_freed > 0 ? std::max(bytes_freed/KB, 1U) : 0);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700527
Elliott Hughes7162ad92011-10-27 14:08:42 -0700528 size_t total = GetTotalMemory();
529 size_t percentFree = 100 - static_cast<size_t>(100.0f * float(num_bytes_allocated_) / total);
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700530
531 uint32_t duration = (t1 - t0)/1000/1000;
Elliott Hughesaaed81d2011-11-07 15:11:47 -0800532 bool gc_was_particularly_slow = (duration > 100); // TODO: crank this down for concurrent.
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800533 if (VLOG_IS_ON(gc) || gc_was_particularly_slow) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700534 LOG(INFO) << "GC freed " << (is_small ? "<" : "") << kib_freed << "KiB, "
535 << percentFree << "% free "
Elliott Hughes5d78d392011-12-13 16:53:05 -0800536 << (num_bytes_allocated_/KB) << "KiB/" << (total/KB) << "KiB, "
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700537 << "paused " << duration << "ms";
538 }
Elliott Hughes767a1472011-10-26 18:49:02 -0700539 Dbg::GcDidFinish();
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800540 if (VLOG_IS_ON(heap)) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700541 timings.Dump();
542 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700543}
544
545void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700546 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700547}
548
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700549void Heap::WalkHeap(void(*callback)(const void*, size_t, const void*, size_t, void*), void* arg) {
550 typedef std::vector<Space*>::iterator It; // C++0x auto.
551 for (It it = spaces_.begin(); it != spaces_.end(); ++it) {
552 (*it)->Walk(callback, arg);
553 }
554}
555
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700556/* Terminology:
557 * 1. Footprint: Capacity we allocate from system.
558 * 2. Active space: a.k.a. alloc_space_.
559 * 3. Soft footprint: external allocation + spaces footprint + active space footprint
560 * 4. Overhead: soft footprint excluding active.
561 *
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700562 * Layout: (The spaces below might not be contiguous, but are lumped together to depict size.)
563 * |----------------------spaces footprint--------- --------------|----active space footprint----|
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700564 * |--active space allocated--|
565 * |--------------------soft footprint (include active)--------------------------------------|
566 * |----------------soft footprint excluding active---------------|
567 * |------------soft limit-------...|
568 * |------------------------------------ideal footprint-----------------------------------------...|
569 *
570 */
571
572// Sets the maximum number of bytes that the heap is allowed to
573// allocate from the system. Clamps to the appropriate maximum
574// value.
575// Old spaces will count against the ideal size.
576//
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800577void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
jeffhaoc1160702011-10-27 15:48:45 -0700578 if (max_allowed_footprint > Heap::growth_size_) {
Elliott Hughes5d78d392011-12-13 16:53:05 -0800579 VLOG(gc) << "Clamp target GC heap from " << (max_allowed_footprint/KB) << "KiB"
580 << " to " << (Heap::growth_size_/KB) << "KiB";
jeffhaoc1160702011-10-27 15:48:45 -0700581 max_allowed_footprint = Heap::growth_size_;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700582 }
583
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700584 alloc_space_->SetMaxAllowedFootprint(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700585}
586
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700587// kHeapIdealFree is the ideal maximum free size, when we grow the heap for
Elliott Hughes5d78d392011-12-13 16:53:05 -0800588// utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700589static const size_t kHeapIdealFree = 2 * MB;
590// kHeapMinFree guarantees that you always have at least 512 KB free, when
591// you grow for utilization, regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700592static const size_t kHeapMinFree = kHeapIdealFree / 4;
593
594// Given the current contents of the active space, increase the allowed
Carl Shapiro69759ea2011-07-21 18:13:35 -0700595// heap footprint to match the target utilization ratio. This should
596// only be called immediately after a full garbage collection.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700597//
Carl Shapiro69759ea2011-07-21 18:13:35 -0700598void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700599 lock_->AssertHeld();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700600
601 // We know what our utilization is at this moment.
602 // This doesn't actually resize any memory. It just lets the heap grow more
603 // when necessary.
Elliott Hughes362f9bc2011-10-17 18:56:41 -0700604 size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization());
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700605
606 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
607 target_size = num_bytes_allocated_ + kHeapIdealFree;
608 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
609 target_size = num_bytes_allocated_ + kHeapMinFree;
610 }
611
612 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700613}
614
jeffhaoc1160702011-10-27 15:48:45 -0700615void Heap::ClearGrowthLimit() {
616 ScopedHeapLock lock;
617 WaitForConcurrentGcToComplete();
618 CHECK_GE(maximum_size_, growth_size_);
619 growth_size_ = maximum_size_;
620 alloc_space_->ClearGrowthLimit();
jeffhao39da0352011-11-04 14:58:55 -0700621 card_table_->ClearGrowthLimit();
jeffhaoc1160702011-10-27 15:48:45 -0700622}
623
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700624pid_t Heap::GetLockOwner() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700625 return lock_->GetOwner();
626}
627
Elliott Hughes92b3b562011-09-08 16:32:26 -0700628void Heap::Lock() {
Brian Carlstromfad71432011-10-16 20:25:10 -0700629 // Grab the lock, but put ourselves into Thread::kVmWait if it looks
630 // like we're going to have to wait on the mutex. This prevents
631 // deadlock if another thread is calling CollectGarbageInternal,
632 // since they will have the heap lock and be waiting for mutators to
633 // suspend.
634 if (!lock_->TryLock()) {
635 ScopedThreadStateChange tsc(Thread::Current(), Thread::kVmWait);
636 lock_->Lock();
637 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700638}
639
640void Heap::Unlock() {
641 lock_->Unlock();
642}
643
Elliott Hughesadb460d2011-10-05 17:02:34 -0700644void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
645 Class* java_lang_ref_ReferenceQueue) {
646 java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference;
647 java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue;
648 CHECK(java_lang_ref_FinalizerReference_ != NULL);
649 CHECK(java_lang_ref_ReferenceQueue_ != NULL);
650}
651
652void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
653 MemberOffset reference_queue_offset,
654 MemberOffset reference_queueNext_offset,
655 MemberOffset reference_pendingNext_offset,
656 MemberOffset finalizer_reference_zombie_offset) {
657 reference_referent_offset_ = reference_referent_offset;
658 reference_queue_offset_ = reference_queue_offset;
659 reference_queueNext_offset_ = reference_queueNext_offset;
660 reference_pendingNext_offset_ = reference_pendingNext_offset;
661 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
662 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
663 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
664 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
665 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
666 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
667}
668
669Object* Heap::GetReferenceReferent(Object* reference) {
670 DCHECK(reference != NULL);
671 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
672 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
673}
674
675void Heap::ClearReferenceReferent(Object* reference) {
676 DCHECK(reference != NULL);
677 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
678 reference->SetFieldObject(reference_referent_offset_, NULL, true);
679}
680
681// Returns true if the reference object has not yet been enqueued.
682bool Heap::IsEnqueuable(const Object* ref) {
683 DCHECK(ref != NULL);
684 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
685 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
686 return (queue != NULL) && (queue_next == NULL);
687}
688
689void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
690 DCHECK(ref != NULL);
691 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
692 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
693 EnqueuePendingReference(ref, cleared_reference_list);
694}
695
696void Heap::EnqueuePendingReference(Object* ref, Object** list) {
697 DCHECK(ref != NULL);
698 DCHECK(list != NULL);
699
700 if (*list == NULL) {
701 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
702 *list = ref;
703 } else {
704 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
705 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
706 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
707 }
708}
709
710Object* Heap::DequeuePendingReference(Object** list) {
711 DCHECK(list != NULL);
712 DCHECK(*list != NULL);
713 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
714 Object* ref;
715 if (*list == head) {
716 ref = *list;
717 *list = NULL;
718 } else {
719 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
720 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
721 ref = head;
722 }
723 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
724 return ref;
725}
726
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700727void Heap::AddFinalizerReference(Thread* self, Object* object) {
728 ScopedThreadStateChange tsc(self, Thread::kRunnable);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700729 static Method* FinalizerReference_add =
730 java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V");
731 DCHECK(FinalizerReference_add != NULL);
732 Object* args[] = { object };
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700733 FinalizerReference_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700734}
735
736void Heap::EnqueueClearedReferences(Object** cleared) {
737 DCHECK(cleared != NULL);
738 if (*cleared != NULL) {
739 static Method* ReferenceQueue_add =
740 java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V");
741 DCHECK(ReferenceQueue_add != NULL);
742
743 Thread* self = Thread::Current();
744 ScopedThreadStateChange tsc(self, Thread::kRunnable);
745 Object* args[] = { *cleared };
746 ReferenceQueue_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
747 *cleared = NULL;
748 }
749}
750
Carl Shapiro69759ea2011-07-21 18:13:35 -0700751} // namespace art