blob: d13eae5df7e112ed6947cfcb49961a5ed9084175 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom58ae9412011-10-04 00:56:06 -070019#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070020#include <vector>
21
Ian Rogers5d76c432011-10-31 21:42:49 -070022#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070023#include "debugger.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070024#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070025#include "mark_sweep.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070026#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080027#include "object_utils.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070028#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070029#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070030#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070031#include "timing_logger.h"
32#include "UniquePtr.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070033
34namespace art {
35
Carl Shapiro58551df2011-07-24 03:09:51 -070036std::vector<Space*> Heap::spaces_;
Carl Shapiro69759ea2011-07-21 18:13:35 -070037
Ian Rogers30fab402012-01-23 15:43:46 -080038AllocSpace* Heap::alloc_space_ = NULL;
jeffhaoc1160702011-10-27 15:48:45 -070039
Carl Shapiro58551df2011-07-24 03:09:51 -070040size_t Heap::num_bytes_allocated_ = 0;
41
42size_t Heap::num_objects_allocated_ = 0;
43
Carl Shapiro69759ea2011-07-21 18:13:35 -070044bool Heap::is_gc_running_ = false;
45
46HeapBitmap* Heap::mark_bitmap_ = NULL;
47
48HeapBitmap* Heap::live_bitmap_ = NULL;
49
Ian Rogers5d76c432011-10-31 21:42:49 -070050CardTable* Heap::card_table_ = NULL;
51
52bool Heap::card_marking_disabled_ = false;
53
Elliott Hughesadb460d2011-10-05 17:02:34 -070054Class* Heap::java_lang_ref_FinalizerReference_ = NULL;
55Class* Heap::java_lang_ref_ReferenceQueue_ = NULL;
56
Ian Rogers0cfe1fb2011-08-26 03:29:44 -070057MemberOffset Heap::reference_referent_offset_ = MemberOffset(0);
58MemberOffset Heap::reference_queue_offset_ = MemberOffset(0);
59MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0);
60MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0);
61MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0);
Brian Carlstrom1f870082011-08-23 16:02:11 -070062
Brian Carlstrom395520e2011-09-25 19:35:00 -070063float Heap::target_utilization_ = 0.5;
64
Elliott Hughes92b3b562011-09-08 16:32:26 -070065Mutex* Heap::lock_ = NULL;
66
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070067bool Heap::verify_objects_ = false;
68
Ian Rogers30fab402012-01-23 15:43:46 -080069static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
70 if (*first_space == NULL) {
71 *first_space = space;
72 *last_space = space;
73 } else {
74 if ((*first_space)->Begin() > space->Begin()) {
75 *first_space = space;
76 } else if (space->Begin() > (*last_space)->Begin()) {
77 *last_space = space;
78 }
79 }
80}
81
82void Heap::Init(size_t initial_size, size_t growth_limit, size_t capacity,
Brian Carlstrom223f20f2012-02-04 23:06:55 -080083 const std::string& image_file_name) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -080084 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -070085 LOG(INFO) << "Heap::Init entering";
86 }
87
Ian Rogers30fab402012-01-23 15:43:46 -080088 // Compute the bounds of all spaces for allocating live and mark bitmaps
89 // there will be at least one space (the alloc space)
90 Space* first_space = NULL;
91 Space* last_space = NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070092
Ian Rogers30fab402012-01-23 15:43:46 -080093 // Requested begin for the alloc space, to follow the mapped image and oat files
94 byte* requested_begin = NULL;
Brian Carlstrom223f20f2012-02-04 23:06:55 -080095 if (image_file_name != NULL) {
96 ImageSpace* space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070097 if (space == NULL) {
Brian Carlstrom223f20f2012-02-04 23:06:55 -080098 LOG(FATAL) << "Failed to create space from " << image_file_name;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -070099 }
Ian Rogers30fab402012-01-23 15:43:46 -0800100 AddSpace(space);
101 UpdateFirstAndLastSpace(&first_space, &last_space, space);
102 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
103 // isn't going to get in the middle
104 byte* oat_end_addr = space->GetImageHeader().GetOatEnd();
105 CHECK(oat_end_addr > space->End());
106 if (oat_end_addr > requested_begin) {
107 requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
108 kPageSize));
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700109 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700110 }
111
Ian Rogers30fab402012-01-23 15:43:46 -0800112 alloc_space_ = Space::CreateAllocSpace("alloc space", initial_size, growth_limit, capacity,
113 requested_begin);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700114 if (alloc_space_ == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700115 LOG(FATAL) << "Failed to create alloc space";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700116 }
Ian Rogers30fab402012-01-23 15:43:46 -0800117 AddSpace(alloc_space_);
118 UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
119 byte* heap_begin = first_space->Begin();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800120 size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700121
122 // Allocate the initial live bitmap.
Ian Rogers30fab402012-01-23 15:43:46 -0800123 UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create("dalvik-bitmap-1", heap_begin, heap_capacity));
Elliott Hughes90a33692011-08-30 13:27:07 -0700124 if (live_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700125 LOG(FATAL) << "Failed to create live bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700126 }
127
Ian Rogers30fab402012-01-23 15:43:46 -0800128 // Mark image objects in the live bitmap
129 for (size_t i = 0; i < spaces_.size(); i++) {
130 Space* space = spaces_[i];
131 if (space->IsImageSpace()) {
132 space->AsImageSpace()->RecordImageAllocations(live_bitmap.get());
133 }
134 }
135
Carl Shapiro69759ea2011-07-21 18:13:35 -0700136 // Allocate the initial mark bitmap.
Ian Rogers30fab402012-01-23 15:43:46 -0800137 UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create("dalvik-bitmap-2", heap_begin, heap_capacity));
Elliott Hughes90a33692011-08-30 13:27:07 -0700138 if (mark_bitmap.get() == NULL) {
Elliott Hughesbe759c62011-09-08 19:38:21 -0700139 LOG(FATAL) << "Failed to create mark bitmap";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700140 }
141
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800142 // Allocate the card table.
Ian Rogers30fab402012-01-23 15:43:46 -0800143 UniquePtr<CardTable> card_table(CardTable::Create(heap_begin, heap_capacity));
Ian Rogers5d76c432011-10-31 21:42:49 -0700144 if (card_table.get() == NULL) {
145 LOG(FATAL) << "Failed to create card table";
146 }
147
Carl Shapiro69759ea2011-07-21 18:13:35 -0700148 live_bitmap_ = live_bitmap.release();
149 mark_bitmap_ = mark_bitmap.release();
Ian Rogers5d76c432011-10-31 21:42:49 -0700150 card_table_ = card_table.release();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700151
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700152 num_bytes_allocated_ = 0;
153 num_objects_allocated_ = 0;
154
Elliott Hughes92b3b562011-09-08 16:32:26 -0700155 // It's still to early to take a lock because there are no threads yet,
156 // but we can create the heap lock now. We don't create it earlier to
157 // make it clear that you can't use locks during heap initialization.
Elliott Hughes8daa0922011-09-11 13:46:25 -0700158 lock_ = new Mutex("Heap lock");
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700159
Ian Rogers30fab402012-01-23 15:43:46 -0800160 Heap::EnableObjectValidation();
161
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800162 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700163 LOG(INFO) << "Heap::Init exiting";
164 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700165}
166
167void Heap::Destroy() {
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800168 // We can't take the heap lock here because there might be a daemon thread suspended with the
169 // heap lock held. We know though that no non-daemon threads are executing, and we know that
170 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
171 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700172 STLDeleteElements(&spaces_);
Elliott Hughes4d6850c2012-01-18 15:55:06 -0800173 delete mark_bitmap_;
174 delete live_bitmap_;
175 delete card_table_;
176 delete lock_;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700177}
178
Elliott Hughes418dfe72011-10-06 18:56:27 -0700179Object* Heap::AllocObject(Class* klass, size_t byte_count) {
180 {
181 ScopedHeapLock lock;
Ian Rogers6d4d9fc2011-11-30 16:24:48 -0800182 DCHECK(klass == NULL || (klass->IsClassClass() && byte_count >= sizeof(Class)) ||
183 (klass->IsVariableSize() || klass->GetObjectSize() == byte_count) ||
Elliott Hughes91250e02011-12-13 22:30:35 -0800184 strlen(ClassHelper(klass).GetDescriptor()) == 0);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700185 DCHECK_GE(byte_count, sizeof(Object));
186 Object* obj = AllocateLocked(byte_count);
187 if (obj != NULL) {
188 obj->SetClass(klass);
Elliott Hughes545a0642011-11-08 19:10:03 -0800189 if (Dbg::IsAllocTrackingEnabled()) {
190 Dbg::RecordAllocation(klass, byte_count);
191 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700192 return obj;
193 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700194 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700195
196 Thread::Current()->ThrowOutOfMemoryError(klass, byte_count);
197 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700198}
199
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700200bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700201 // Note: we deliberately don't take the lock here, and mustn't test anything that would
202 // require taking the lock.
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700203 if (obj == NULL || !IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700204 return false;
205 }
Ian Rogers30fab402012-01-23 15:43:46 -0800206 for (size_t i = 0; i < spaces_.size(); i++) {
207 if (spaces_[i]->Contains(obj)) {
208 return true;
209 }
210 }
211 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700212}
213
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700214bool Heap::IsLiveObjectLocked(const Object* obj) {
215 lock_->AssertHeld();
216 return IsHeapAddress(obj) && live_bitmap_->Test(obj);
217}
218
Elliott Hughes3e465b12011-09-02 18:26:12 -0700219#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700220void Heap::VerifyObject(const Object* obj) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700221 if (!verify_objects_) {
222 return;
223 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700224 ScopedHeapLock lock;
225 Heap::VerifyObjectLocked(obj);
226}
227#endif
228
229void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700230 lock_->AssertHeld();
Elliott Hughes85d15452011-09-16 17:33:01 -0700231 if (obj != NULL) {
Elliott Hughes06b37d92011-10-16 11:51:29 -0700232 if (!IsAligned<kObjectAlignment>(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700233 LOG(FATAL) << "Object isn't aligned: " << obj;
234 } else if (!live_bitmap_->Test(obj)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700235 LOG(FATAL) << "Object is dead: " << obj;
236 }
237 // Ignore early dawn of the universe verifications
Brian Carlstromdbc05252011-09-09 01:59:59 -0700238 if (num_objects_allocated_ > 10) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700239 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
240 Object::ClassOffset().Int32Value();
241 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
242 if (c == NULL) {
Elliott Hughes5d78d392011-12-13 16:53:05 -0800243 LOG(FATAL) << "Null class in object: " << obj;
Elliott Hughes06b37d92011-10-16 11:51:29 -0700244 } else if (!IsAligned<kObjectAlignment>(c)) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700245 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
246 } else if (!live_bitmap_->Test(c)) {
247 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
248 }
249 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
Ian Rogersad25ac52011-10-04 19:13:33 -0700250 // Note: we don't use the accessors here as they have internal sanity checks
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700251 // that we don't want to run
Ian Rogers30fab402012-01-23 15:43:46 -0800252 raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700253 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
Ian Rogers30fab402012-01-23 15:43:46 -0800254 raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700255 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
256 CHECK_EQ(c_c, c_c_c);
257 }
258 }
259}
260
Brian Carlstrom78128a62011-09-15 17:21:19 -0700261void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700262 DCHECK(obj != NULL);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700263 Heap::VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700264}
265
266void Heap::VerifyHeap() {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700267 ScopedHeapLock lock;
268 live_bitmap_->Walk(Heap::VerificationCallback, NULL);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700269}
270
Ian Rogers30fab402012-01-23 15:43:46 -0800271void Heap::RecordAllocationLocked(AllocSpace* space, const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700272#ifndef NDEBUG
273 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700274 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700275 }
276#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700277 size_t size = space->AllocationSize(obj);
Elliott Hughes5d78d392011-12-13 16:53:05 -0800278 DCHECK_GT(size, 0u);
Carl Shapiro58551df2011-07-24 03:09:51 -0700279 num_bytes_allocated_ += size;
280 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700281
282 if (Runtime::Current()->HasStatsEnabled()) {
283 RuntimeStats* global_stats = Runtime::Current()->GetStats();
284 RuntimeStats* thread_stats = Thread::Current()->GetStats();
285 ++global_stats->allocated_objects;
286 ++thread_stats->allocated_objects;
287 global_stats->allocated_bytes += size;
288 thread_stats->allocated_bytes += size;
289 }
290
Carl Shapiro58551df2011-07-24 03:09:51 -0700291 live_bitmap_->Set(obj);
292}
293
Elliott Hughes307f75d2011-10-12 18:04:40 -0700294void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700295 lock_->AssertHeld();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700296
297 if (freed_objects < num_objects_allocated_) {
298 num_objects_allocated_ -= freed_objects;
299 } else {
300 num_objects_allocated_ = 0;
301 }
302 if (freed_bytes < num_bytes_allocated_) {
303 num_bytes_allocated_ -= freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700304 } else {
305 num_bytes_allocated_ = 0;
306 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700307
308 if (Runtime::Current()->HasStatsEnabled()) {
309 RuntimeStats* global_stats = Runtime::Current()->GetStats();
310 RuntimeStats* thread_stats = Thread::Current()->GetStats();
311 ++global_stats->freed_objects;
312 ++thread_stats->freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700313 global_stats->freed_bytes += freed_bytes;
314 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700315 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700316}
317
Elliott Hughes92b3b562011-09-08 16:32:26 -0700318Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700319 lock_->AssertHeld();
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700320 DCHECK(alloc_space_ != NULL);
Ian Rogers30fab402012-01-23 15:43:46 -0800321 AllocSpace* space = alloc_space_;
Elliott Hughes92b3b562011-09-08 16:32:26 -0700322 Object* obj = AllocateLocked(space, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700323 if (obj != NULL) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700324 RecordAllocationLocked(space, obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700325 }
326 return obj;
327}
328
Ian Rogers30fab402012-01-23 15:43:46 -0800329Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700330 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700331
Brian Carlstromb82b6872011-10-26 17:18:07 -0700332 // Since allocation can cause a GC which will need to SuspendAll,
333 // make sure all allocators are in the kRunnable state.
Ian Rogersf45b1542012-02-03 18:03:48 -0800334 CHECK_EQ(Thread::Current()->GetState(), Thread::kRunnable);
Brian Carlstromb82b6872011-10-26 17:18:07 -0700335
Ian Rogers30fab402012-01-23 15:43:46 -0800336 // Fail impossible allocations
337 if (alloc_size > space->Capacity()) {
338 // On failure collect soft references
339 CollectGarbageInternal(true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700340 return NULL;
341 }
342
Ian Rogers30fab402012-01-23 15:43:46 -0800343 Object* ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700344 if (ptr != NULL) {
345 return ptr;
346 }
347
Ian Rogers30fab402012-01-23 15:43:46 -0800348 // The allocation failed. If the GC is running, block until it completes and retry.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700349 if (is_gc_running_) {
Ian Rogers30fab402012-01-23 15:43:46 -0800350 // The GC is concurrently tracing the heap. Release the heap lock, wait for the GC to
351 // complete, and retrying allocating.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700352 WaitForConcurrentGcToComplete();
Ian Rogers30fab402012-01-23 15:43:46 -0800353 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700354 if (ptr != NULL) {
355 return ptr;
356 }
357 }
358
359 // Another failure. Our thread was starved or there may be too many
360 // live objects. Try a foreground GC. This will have no effect if
361 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700362 if (Runtime::Current()->HasStatsEnabled()) {
363 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
364 ++Thread::Current()->GetStats()->gc_for_alloc_count;
365 }
Ian Rogers30fab402012-01-23 15:43:46 -0800366 CollectGarbageInternal(false);
367 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700368 if (ptr != NULL) {
369 return ptr;
370 }
371
372 // Even that didn't work; this is an exceptional state.
373 // Try harder, growing the heap if necessary.
Ian Rogers30fab402012-01-23 15:43:46 -0800374 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700375 if (ptr != NULL) {
376 //size_t new_footprint = dvmHeapSourceGetIdealFootprint();
Ian Rogers30fab402012-01-23 15:43:46 -0800377 size_t new_footprint = space->GetFootprintLimit();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700378 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700379 // free space is equal to the old free space + the
380 // utilization slop for the new allocation.
Ian Rogers3bb17a62012-01-27 23:56:44 -0800381 VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
Ian Rogers162a31c2012-01-31 16:14:31 -0800382 << " for a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700383 return ptr;
384 }
385
386 // Most allocations should have succeeded by now, so the heap is
387 // really full, really fragmented, or the requested size is really
388 // big. Do another GC, collecting SoftReferences this time. The VM
389 // spec requires that all SoftReferences have been collected and
390 // cleared before throwing an OOME.
391
Elliott Hughes418dfe72011-10-06 18:56:27 -0700392 // OLD-TODO: wait for the finalizers from the previous GC to finish
Ian Rogers3bb17a62012-01-27 23:56:44 -0800393 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) << " allocation";
Ian Rogers30fab402012-01-23 15:43:46 -0800394 CollectGarbageInternal(true);
395 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700396 if (ptr != NULL) {
397 return ptr;
398 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700399
Ian Rogers3bb17a62012-01-27 23:56:44 -0800400 LOG(ERROR) << "Out of memory on a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700401
Carl Shapiro58551df2011-07-24 03:09:51 -0700402 // TODO: tell the HeapSource to dump its state
403 // TODO: dump stack traces for all threads
Carl Shapiro69759ea2011-07-21 18:13:35 -0700404
Carl Shapiro69759ea2011-07-21 18:13:35 -0700405 return NULL;
406}
407
Elliott Hughesbf86d042011-08-31 17:53:14 -0700408int64_t Heap::GetMaxMemory() {
Ian Rogers30fab402012-01-23 15:43:46 -0800409 return alloc_space_->Capacity();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700410}
411
412int64_t Heap::GetTotalMemory() {
Ian Rogers30fab402012-01-23 15:43:46 -0800413 return alloc_space_->Capacity();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700414}
415
416int64_t Heap::GetFreeMemory() {
Ian Rogers30fab402012-01-23 15:43:46 -0800417 return alloc_space_->Capacity() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700418}
419
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700420class InstanceCounter {
421 public:
422 InstanceCounter(Class* c, bool count_assignable)
423 : class_(c), count_assignable_(count_assignable), count_(0) {
424 }
425
426 size_t GetCount() {
427 return count_;
428 }
429
430 static void Callback(Object* o, void* arg) {
431 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
432 }
433
434 private:
435 void VisitInstance(Object* o) {
436 Class* instance_class = o->GetClass();
437 if (count_assignable_) {
438 if (instance_class == class_) {
439 ++count_;
440 }
441 } else {
442 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
443 ++count_;
444 }
445 }
446 }
447
448 Class* class_;
449 bool count_assignable_;
450 size_t count_;
451};
452
453int64_t Heap::CountInstances(Class* c, bool count_assignable) {
454 ScopedHeapLock lock;
455 InstanceCounter counter(c, count_assignable);
456 live_bitmap_->Walk(InstanceCounter::Callback, &counter);
457 return counter.GetCount();
458}
459
Ian Rogers30fab402012-01-23 15:43:46 -0800460void Heap::CollectGarbage(bool clear_soft_references) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700461 ScopedHeapLock lock;
Ian Rogers30fab402012-01-23 15:43:46 -0800462 CollectGarbageInternal(clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700463}
464
Ian Rogers30fab402012-01-23 15:43:46 -0800465void Heap::CollectGarbageInternal(bool clear_soft_references) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700466 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700467
Elliott Hughes8d768a92011-09-14 16:35:25 -0700468 ThreadList* thread_list = Runtime::Current()->GetThreadList();
469 thread_list->SuspendAll();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700470
471 size_t initial_size = num_bytes_allocated_;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700472 TimingLogger timings("CollectGarbageInternal");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700473 uint64_t t0 = NanoTime();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700474 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700475 {
476 MarkSweep mark_sweep;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700477 timings.AddSplit("ctor");
Carl Shapiro58551df2011-07-24 03:09:51 -0700478
479 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700480 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700481
482 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700483 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700484
Ian Rogers5d76c432011-10-31 21:42:49 -0700485 mark_sweep.ScanDirtyImageRoots();
486 timings.AddSplit("DirtyImageRoots");
487
488 // Roots are marked on the bitmap and the mark_stack is empty
489 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700490
491 // TODO: if concurrent
492 // unlock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700493 // thread_list->ResumeAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700494
Ian Rogers5d76c432011-10-31 21:42:49 -0700495 // Recursively mark all bits set in the non-image mark bitmap
Carl Shapiro58551df2011-07-24 03:09:51 -0700496 mark_sweep.RecursiveMark();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700497 timings.AddSplit("RecursiveMark");
Carl Shapiro58551df2011-07-24 03:09:51 -0700498
499 // TODO: if concurrent
500 // lock heap
Elliott Hughes8d768a92011-09-14 16:35:25 -0700501 // thread_list->SuspendAll();
Carl Shapiro58551df2011-07-24 03:09:51 -0700502 // re-mark root set
503 // scan dirty objects
504
Ian Rogers30fab402012-01-23 15:43:46 -0800505 mark_sweep.ProcessReferences(clear_soft_references);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700506 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700507
Elliott Hughes2da50362011-10-10 16:57:08 -0700508 // TODO: if concurrent
509 // swap bitmaps
Carl Shapiro58551df2011-07-24 03:09:51 -0700510
511 mark_sweep.Sweep();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700512 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -0700513
514 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700515 }
516
517 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700518 timings.AddSplit("GrowForUtilization");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700519 uint64_t t1 = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700520 thread_list->ResumeAll();
Elliott Hughesadb460d2011-10-05 17:02:34 -0700521
522 EnqueueClearedReferences(&cleared_references);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800523 RequestHeapTrim();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700524
Ian Rogers3bb17a62012-01-27 23:56:44 -0800525 uint64_t duration_ns = t1 - t0;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800526 bool gc_was_particularly_slow = duration_ns > MsToNs(50); // TODO: crank this down for concurrent.
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800527 if (VLOG_IS_ON(gc) || gc_was_particularly_slow) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800528 // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
529 size_t bytes_freed = initial_size - num_bytes_allocated_;
530 if (bytes_freed > KB) { // ignore freed bytes in output if > 1KB
531 bytes_freed = RoundDown(bytes_freed, KB);
532 }
533 size_t bytes_allocated = RoundUp(num_bytes_allocated_, KB);
534 // lose low nanoseconds in duration. TODO: make this part of PrettyDuration
535 duration_ns = (duration_ns / 1000) * 1000;
536 size_t total = GetTotalMemory();
537 size_t percentFree = 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
538 LOG(INFO) << "GC freed " << PrettySize(bytes_freed) << ", " << percentFree << "% free, "
539 << PrettySize(bytes_allocated) << "/" << PrettySize(total) << ", "
540 << "paused " << PrettyDuration(duration_ns);
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700541 }
Elliott Hughes767a1472011-10-26 18:49:02 -0700542 Dbg::GcDidFinish();
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800543 if (VLOG_IS_ON(heap)) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700544 timings.Dump();
545 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700546}
547
548void Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700549 lock_->AssertHeld();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700550}
551
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800552void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Ian Rogers30fab402012-01-23 15:43:46 -0800553 size_t alloc_space_capacity = alloc_space_->Capacity();
554 if (max_allowed_footprint > alloc_space_capacity) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800555 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint)
556 << " to " << PrettySize(alloc_space_capacity);
Ian Rogers30fab402012-01-23 15:43:46 -0800557 max_allowed_footprint = alloc_space_capacity;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700558 }
Ian Rogers30fab402012-01-23 15:43:46 -0800559 alloc_space_->SetFootprintLimit(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700560}
561
Ian Rogers3bb17a62012-01-27 23:56:44 -0800562// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700563static const size_t kHeapIdealFree = 2 * MB;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800564// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
565// regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700566static const size_t kHeapMinFree = kHeapIdealFree / 4;
567
Carl Shapiro69759ea2011-07-21 18:13:35 -0700568void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700569 lock_->AssertHeld();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700570
571 // We know what our utilization is at this moment.
572 // This doesn't actually resize any memory. It just lets the heap grow more
573 // when necessary.
Elliott Hughes362f9bc2011-10-17 18:56:41 -0700574 size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization());
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700575
576 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
577 target_size = num_bytes_allocated_ + kHeapIdealFree;
578 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
579 target_size = num_bytes_allocated_ + kHeapMinFree;
580 }
581
582 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700583}
584
jeffhaoc1160702011-10-27 15:48:45 -0700585void Heap::ClearGrowthLimit() {
586 ScopedHeapLock lock;
587 WaitForConcurrentGcToComplete();
jeffhaoc1160702011-10-27 15:48:45 -0700588 alloc_space_->ClearGrowthLimit();
589}
590
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700591pid_t Heap::GetLockOwner() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700592 return lock_->GetOwner();
593}
594
Elliott Hughes92b3b562011-09-08 16:32:26 -0700595void Heap::Lock() {
Brian Carlstromfad71432011-10-16 20:25:10 -0700596 // Grab the lock, but put ourselves into Thread::kVmWait if it looks
597 // like we're going to have to wait on the mutex. This prevents
598 // deadlock if another thread is calling CollectGarbageInternal,
599 // since they will have the heap lock and be waiting for mutators to
600 // suspend.
601 if (!lock_->TryLock()) {
602 ScopedThreadStateChange tsc(Thread::Current(), Thread::kVmWait);
603 lock_->Lock();
604 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700605}
606
607void Heap::Unlock() {
608 lock_->Unlock();
609}
610
Elliott Hughesadb460d2011-10-05 17:02:34 -0700611void Heap::SetWellKnownClasses(Class* java_lang_ref_FinalizerReference,
612 Class* java_lang_ref_ReferenceQueue) {
613 java_lang_ref_FinalizerReference_ = java_lang_ref_FinalizerReference;
614 java_lang_ref_ReferenceQueue_ = java_lang_ref_ReferenceQueue;
615 CHECK(java_lang_ref_FinalizerReference_ != NULL);
616 CHECK(java_lang_ref_ReferenceQueue_ != NULL);
617}
618
619void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
620 MemberOffset reference_queue_offset,
621 MemberOffset reference_queueNext_offset,
622 MemberOffset reference_pendingNext_offset,
623 MemberOffset finalizer_reference_zombie_offset) {
624 reference_referent_offset_ = reference_referent_offset;
625 reference_queue_offset_ = reference_queue_offset;
626 reference_queueNext_offset_ = reference_queueNext_offset;
627 reference_pendingNext_offset_ = reference_pendingNext_offset;
628 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
629 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
630 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
631 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
632 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
633 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
634}
635
636Object* Heap::GetReferenceReferent(Object* reference) {
637 DCHECK(reference != NULL);
638 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
639 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
640}
641
642void Heap::ClearReferenceReferent(Object* reference) {
643 DCHECK(reference != NULL);
644 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
645 reference->SetFieldObject(reference_referent_offset_, NULL, true);
646}
647
648// Returns true if the reference object has not yet been enqueued.
649bool Heap::IsEnqueuable(const Object* ref) {
650 DCHECK(ref != NULL);
651 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
652 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
653 return (queue != NULL) && (queue_next == NULL);
654}
655
656void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
657 DCHECK(ref != NULL);
658 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
659 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
660 EnqueuePendingReference(ref, cleared_reference_list);
661}
662
663void Heap::EnqueuePendingReference(Object* ref, Object** list) {
664 DCHECK(ref != NULL);
665 DCHECK(list != NULL);
666
667 if (*list == NULL) {
668 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
669 *list = ref;
670 } else {
671 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
672 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
673 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
674 }
675}
676
677Object* Heap::DequeuePendingReference(Object** list) {
678 DCHECK(list != NULL);
679 DCHECK(*list != NULL);
680 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
681 Object* ref;
682 if (*list == head) {
683 ref = *list;
684 *list = NULL;
685 } else {
686 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
687 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
688 ref = head;
689 }
690 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
691 return ref;
692}
693
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700694void Heap::AddFinalizerReference(Thread* self, Object* object) {
695 ScopedThreadStateChange tsc(self, Thread::kRunnable);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700696 static Method* FinalizerReference_add =
697 java_lang_ref_FinalizerReference_->FindDirectMethod("add", "(Ljava/lang/Object;)V");
698 DCHECK(FinalizerReference_add != NULL);
699 Object* args[] = { object };
Ian Rogers5d4bdc22011-11-02 22:15:43 -0700700 FinalizerReference_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -0700701}
702
703void Heap::EnqueueClearedReferences(Object** cleared) {
704 DCHECK(cleared != NULL);
705 if (*cleared != NULL) {
706 static Method* ReferenceQueue_add =
707 java_lang_ref_ReferenceQueue_->FindDirectMethod("add", "(Ljava/lang/ref/Reference;)V");
708 DCHECK(ReferenceQueue_add != NULL);
709
710 Thread* self = Thread::Current();
711 ScopedThreadStateChange tsc(self, Thread::kRunnable);
712 Object* args[] = { *cleared };
713 ReferenceQueue_add->Invoke(self, NULL, reinterpret_cast<byte*>(&args), NULL);
714 *cleared = NULL;
715 }
716}
717
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800718void Heap::RequestHeapTrim() {
719 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
720 // because that only marks object heads, so a large array looks like lots of empty space. We
721 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
722 // to utilization (which is probably inversely proportional to how much benefit we can expect).
723 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
724 // not how much use we're making of those pages.
725 float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
726 if (utilization > 0.75f) {
727 // Don't bother trimming the heap if it's more than 75% utilized.
728 // (This percentage was picked arbitrarily.)
729 return;
730 }
Ian Rogerse1d490c2012-02-03 09:09:07 -0800731 if (!Runtime::Current()->IsStarted()) {
732 // Heap trimming isn't supported without a Java runtime (such as at dex2oat time)
733 return;
734 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800735 JNIEnv* env = Thread::Current()->GetJniEnv();
736 static jclass Daemons_class = CacheClass(env, "java/lang/Daemons");
737 static jmethodID Daemons_requestHeapTrim = env->GetStaticMethodID(Daemons_class, "requestHeapTrim", "()V");
738 env->CallStaticVoidMethod(Daemons_class, Daemons_requestHeapTrim);
739 CHECK(!env->ExceptionCheck());
740}
741
Carl Shapiro69759ea2011-07-21 18:13:35 -0700742} // namespace art