blob: 626adf97287fc84cade74c90ebaadf67d251daf6 [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom5643b782012-02-05 12:32:53 -080019#include <sys/types.h>
20#include <sys/wait.h>
21
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Ian Rogers5d76c432011-10-31 21:42:49 -070025#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070026#include "debugger.h"
Mathieu Chartiercc236d72012-07-20 10:29:05 -070027#include "heap_bitmap.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070028#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070029#include "mark_sweep.h"
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070030#include "mod_union_table.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070031#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080032#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080033#include "os.h"
Elliott Hughesb3bd5f02012-03-08 21:05:27 -080034#include "scoped_heap_lock.h"
Ian Rogers365c1022012-06-22 15:05:28 -070035#include "scoped_jni_thread_state.h"
Mathieu Chartier06f79872012-06-21 13:51:52 -070036#include "scoped_thread_list_lock_releaser.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070037#include "ScopedLocalRef.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070038#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070039#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070040#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070041#include "timing_logger.h"
42#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070043#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070044
45namespace art {
46
Ian Rogers30fab402012-01-23 15:43:46 -080047static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
48 if (*first_space == NULL) {
49 *first_space = space;
50 *last_space = space;
51 } else {
52 if ((*first_space)->Begin() > space->Begin()) {
53 *first_space = space;
54 } else if (space->Begin() > (*last_space)->Begin()) {
55 *last_space = space;
56 }
57 }
58}
59
Elliott Hughesae80b492012-04-24 10:43:17 -070060static bool GenerateImage(const std::string& image_file_name) {
Brian Carlstroma004aa92012-02-08 18:05:09 -080061 const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
Brian Carlstrom5643b782012-02-05 12:32:53 -080062 std::vector<std::string> boot_class_path;
63 Split(boot_class_path_string, ':', boot_class_path);
Brian Carlstromb2793372012-03-17 18:27:16 -070064 if (boot_class_path.empty()) {
65 LOG(FATAL) << "Failed to generate image because no boot class path specified";
66 }
Brian Carlstrom5643b782012-02-05 12:32:53 -080067
68 std::vector<char*> arg_vector;
69
70 std::string dex2oat_string(GetAndroidRoot());
Elliott Hughes67d92002012-03-26 15:08:51 -070071 dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
Brian Carlstrom5643b782012-02-05 12:32:53 -080072 const char* dex2oat = dex2oat_string.c_str();
73 arg_vector.push_back(strdup(dex2oat));
74
75 std::string image_option_string("--image=");
76 image_option_string += image_file_name;
77 const char* image_option = image_option_string.c_str();
78 arg_vector.push_back(strdup(image_option));
79
80 arg_vector.push_back(strdup("--runtime-arg"));
81 arg_vector.push_back(strdup("-Xms64m"));
82
83 arg_vector.push_back(strdup("--runtime-arg"));
84 arg_vector.push_back(strdup("-Xmx64m"));
85
86 for (size_t i = 0; i < boot_class_path.size(); i++) {
87 std::string dex_file_option_string("--dex-file=");
88 dex_file_option_string += boot_class_path[i];
89 const char* dex_file_option = dex_file_option_string.c_str();
90 arg_vector.push_back(strdup(dex_file_option));
91 }
92
93 std::string oat_file_option_string("--oat-file=");
94 oat_file_option_string += image_file_name;
95 oat_file_option_string.erase(oat_file_option_string.size() - 3);
96 oat_file_option_string += "oat";
97 const char* oat_file_option = oat_file_option_string.c_str();
98 arg_vector.push_back(strdup(oat_file_option));
99
100 arg_vector.push_back(strdup("--base=0x60000000"));
101
Elliott Hughes48436bb2012-02-07 15:23:28 -0800102 std::string command_line(Join(arg_vector, ' '));
Brian Carlstrom5643b782012-02-05 12:32:53 -0800103 LOG(INFO) << command_line;
104
Elliott Hughes48436bb2012-02-07 15:23:28 -0800105 arg_vector.push_back(NULL);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800106 char** argv = &arg_vector[0];
107
108 // fork and exec dex2oat
109 pid_t pid = fork();
110 if (pid == 0) {
111 // no allocation allowed between fork and exec
112
113 // change process groups, so we don't get reaped by ProcessManager
114 setpgid(0, 0);
115
116 execv(dex2oat, argv);
117
118 PLOG(FATAL) << "execv(" << dex2oat << ") failed";
119 return false;
120 } else {
121 STLDeleteElements(&arg_vector);
122
123 // wait for dex2oat to finish
124 int status;
125 pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
126 if (got_pid != pid) {
127 PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
128 return false;
129 }
130 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
131 LOG(ERROR) << dex2oat << " failed: " << command_line;
132 return false;
133 }
134 }
135 return true;
136}
137
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800138Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
139 const std::string& original_image_file_name)
140 : lock_(NULL),
141 alloc_space_(NULL),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800142 card_table_(NULL),
143 card_marking_disabled_(false),
144 is_gc_running_(false),
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700145 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700146 concurrent_start_size_(128 * KB),
147 concurrent_min_free_(256 * KB),
148 try_running_gc_(false),
149 requesting_gc_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800150 num_bytes_allocated_(0),
151 num_objects_allocated_(0),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700152 last_trim_time_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800153 reference_referent_offset_(0),
154 reference_queue_offset_(0),
155 reference_queueNext_offset_(0),
156 reference_pendingNext_offset_(0),
157 finalizer_reference_zombie_offset_(0),
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700158 have_zygote_space_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800159 target_utilization_(0.5),
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700160 verify_objects_(false) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800161 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800162 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700163 }
164
Ian Rogers30fab402012-01-23 15:43:46 -0800165 // Compute the bounds of all spaces for allocating live and mark bitmaps
166 // there will be at least one space (the alloc space)
167 Space* first_space = NULL;
168 Space* last_space = NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700169
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700170 live_bitmap_.reset(new HeapBitmap(this));
171 mark_bitmap_.reset(new HeapBitmap(this));
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700172
Ian Rogers30fab402012-01-23 15:43:46 -0800173 // Requested begin for the alloc space, to follow the mapped image and oat files
174 byte* requested_begin = NULL;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800175 std::string image_file_name(original_image_file_name);
176 if (!image_file_name.empty()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700177 Space* image_space = NULL;
178
Brian Carlstrom5643b782012-02-05 12:32:53 -0800179 if (OS::FileExists(image_file_name.c_str())) {
180 // If the /system file exists, it should be up-to-date, don't try to generate
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700181 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800182 } else {
183 // If the /system file didn't exist, we need to use one from the art-cache.
184 // If the cache file exists, try to open, but if it fails, regenerate.
185 // If it does not exist, generate.
186 image_file_name = GetArtCacheFilenameOrDie(image_file_name);
187 if (OS::FileExists(image_file_name.c_str())) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700188 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800189 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700190 if (image_space == NULL) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800191 if (!GenerateImage(image_file_name)) {
192 LOG(FATAL) << "Failed to generate image: " << image_file_name;
193 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700194 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800195 }
196 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700197 if (image_space == NULL) {
Brian Carlstrom223f20f2012-02-04 23:06:55 -0800198 LOG(FATAL) << "Failed to create space from " << image_file_name;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700199 }
Brian Carlstrom5643b782012-02-05 12:32:53 -0800200
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700201 AddSpace(image_space);
202 UpdateFirstAndLastSpace(&first_space, &last_space, image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800203 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
204 // isn't going to get in the middle
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700205 byte* oat_end_addr = GetImageSpace()->GetImageHeader().GetOatEnd();
206 CHECK(oat_end_addr > GetImageSpace()->End());
Ian Rogers30fab402012-01-23 15:43:46 -0800207 if (oat_end_addr > requested_begin) {
208 requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
209 kPageSize));
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700210 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700211 }
212
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700213 UniquePtr<AllocSpace> alloc_space(Space::CreateAllocSpace(
214 "alloc space", initial_size, growth_limit, capacity, requested_begin));
215 alloc_space_ = alloc_space.release();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700216 CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700217 AddSpace(alloc_space_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700218
Ian Rogers30fab402012-01-23 15:43:46 -0800219 UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
220 byte* heap_begin = first_space->Begin();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800221 size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700222
Ian Rogers30fab402012-01-23 15:43:46 -0800223 // Mark image objects in the live bitmap
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800224 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800225 Space* space = spaces_[i];
226 if (space->IsImageSpace()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700227 space->AsImageSpace()->RecordImageAllocations(space->GetLiveBitmap());
Ian Rogers30fab402012-01-23 15:43:46 -0800228 }
229 }
230
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800231 // Allocate the card table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700232 card_table_.reset(CardTable::Create(heap_begin, heap_capacity));
233 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700234
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700235 mod_union_table_.reset(new ModUnionTableToZygoteAllocspace<ModUnionTableReferenceCache>(this));
236 CHECK(mod_union_table_.get() != NULL) << "Failed to create mod-union table";
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700237
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700238 zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
239 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700240
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700241 num_bytes_allocated_ = 0;
242 num_objects_allocated_ = 0;
243
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700244 mark_stack_.reset(MarkStack::Create());
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700245
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800246 // It's still too early to take a lock because there are no threads yet,
Elliott Hughes92b3b562011-09-08 16:32:26 -0700247 // but we can create the heap lock now. We don't create it earlier to
248 // make it clear that you can't use locks during heap initialization.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700249 lock_.reset(new Mutex("Heap lock", kHeapLock));
250 condition_.reset(new ConditionVariable("Heap condition variable"));
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700251
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800252 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800253 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700254 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700255}
256
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700257// Sort spaces based on begin address
258class SpaceSorter {
259 public:
260 bool operator () (const Space* a, const Space* b) const {
261 return a->Begin() < b->Begin();
262 }
263};
264
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800265void Heap::AddSpace(Space* space) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700266 DCHECK(space != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700267 DCHECK(space->GetLiveBitmap() != NULL);
268 live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap());
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700269 DCHECK(space->GetMarkBitmap() != NULL);
270 mark_bitmap_->AddSpaceBitmap(space->GetMarkBitmap());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800271 spaces_.push_back(space);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700272 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
273 std::sort(spaces_.begin(), spaces_.end(), SpaceSorter());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800274}
275
276Heap::~Heap() {
277 VLOG(heap) << "~Heap()";
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800278 // We can't take the heap lock here because there might be a daemon thread suspended with the
279 // heap lock held. We know though that no non-daemon threads are executing, and we know that
280 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
281 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700282 STLDeleteElements(&spaces_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700283}
284
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700285Space* Heap::FindSpaceFromObject(const Object* obj) const {
286 // TODO: C++0x auto
287 for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
288 if ((*cur)->Contains(obj)) {
289 return *cur;
290 }
291 }
292 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
293 return NULL;
294}
295
296ImageSpace* Heap::GetImageSpace() {
297 // TODO: C++0x auto
298 for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
299 if ((*cur)->IsImageSpace()) {
300 return (*cur)->AsImageSpace();
301 }
302 }
303 return NULL;
304}
305
306AllocSpace* Heap::GetAllocSpace() {
307 return alloc_space_;
308}
309
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700310static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
311 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
312
313 size_t chunk_size = static_cast<size_t>(reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start));
314 size_t chunk_free_bytes = 0;
315 if (used_bytes < chunk_size) {
316 chunk_free_bytes = chunk_size - used_bytes;
317 }
318
319 if (chunk_free_bytes > max_contiguous_allocation) {
320 max_contiguous_allocation = chunk_free_bytes;
321 }
322}
323
324Object* Heap::AllocObject(Class* c, size_t byte_count) {
325 // Used in the detail message if we throw an OOME.
326 int64_t total_bytes_free;
327 size_t max_contiguous_allocation;
328
Elliott Hughes418dfe72011-10-06 18:56:27 -0700329 {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800330 ScopedHeapLock heap_lock;
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700331 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) ||
332 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
333 strlen(ClassHelper(c).GetDescriptor()) == 0);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700334 DCHECK_GE(byte_count, sizeof(Object));
335 Object* obj = AllocateLocked(byte_count);
336 if (obj != NULL) {
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700337 obj->SetClass(c);
Elliott Hughes545a0642011-11-08 19:10:03 -0800338 if (Dbg::IsAllocTrackingEnabled()) {
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700339 Dbg::RecordAllocation(c, byte_count);
Elliott Hughes545a0642011-11-08 19:10:03 -0800340 }
Mathieu Chartiera6399032012-06-11 18:49:50 -0700341
342 if (!is_gc_running_ && num_bytes_allocated_ >= concurrent_start_bytes_) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700343 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
Mathieu Chartiera6399032012-06-11 18:49:50 -0700344 SirtRef<Object> ref(obj);
345 RequestConcurrentGC();
346 }
347 VerifyObject(obj);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700348
349 // Additional verification to ensure that we did not allocate into a zygote space.
350 DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
351
Elliott Hughes418dfe72011-10-06 18:56:27 -0700352 return obj;
353 }
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700354 total_bytes_free = GetFreeMemory();
355 max_contiguous_allocation = 0;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700356 // TODO: C++0x auto
357 for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
358 if ((*cur)->IsAllocSpace()) {
359 (*cur)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
360 }
361 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700362 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700363
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700364 std::string msg(StringPrintf("Failed to allocate a %zd-byte %s (%lld total bytes free; largest possible contiguous allocation %zd bytes)",
365 byte_count,
366 PrettyDescriptor(c).c_str(),
367 total_bytes_free, max_contiguous_allocation));
368 Thread::Current()->ThrowOutOfMemoryError(msg.c_str());
Elliott Hughes418dfe72011-10-06 18:56:27 -0700369 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700370}
371
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700372bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700373 // Note: we deliberately don't take the lock here, and mustn't test anything that would
374 // require taking the lock.
Elliott Hughes88c5c352012-03-15 18:49:48 -0700375 if (obj == NULL) {
376 return true;
377 }
378 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700379 return false;
380 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800381 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800382 if (spaces_[i]->Contains(obj)) {
383 return true;
384 }
385 }
386 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700387}
388
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700389bool Heap::IsLiveObjectLocked(const Object* obj) {
390 lock_->AssertHeld();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700391 return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj);
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700392}
393
Elliott Hughes3e465b12011-09-02 18:26:12 -0700394#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700395void Heap::VerifyObject(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700396 if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
Ian Rogers141d6222012-04-05 12:23:06 -0700397 Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700398 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700399 return;
400 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700401 {
402 ScopedHeapLock heap_lock;
403 Heap::VerifyObjectLocked(obj);
404 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700405}
406#endif
407
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700408void Heap::DumpSpaces() {
409 // TODO: C++0x auto
410 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
411 LOG(INFO) << **it;
412 }
413}
414
Elliott Hughes92b3b562011-09-08 16:32:26 -0700415void Heap::VerifyObjectLocked(const Object* obj) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700416 lock_->AssertHeld();
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700417 if (!IsAligned<kObjectAlignment>(obj)) {
418 LOG(FATAL) << "Object isn't aligned: " << obj;
419 } else if (!GetLiveBitmap()->Test(obj)) {
420 Space* space = FindSpaceFromObject(obj);
421 if (space == NULL) {
422 DumpSpaces();
423 LOG(FATAL) << "Object " << obj << " is not contained in any space";
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700424 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700425 LOG(FATAL) << "Object is dead: " << obj << " in space " << *space;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700426 }
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700427#if !VERIFY_OBJECT_FAST
428 // Ignore early dawn of the universe verifications
429 if (num_objects_allocated_ > 10) {
430 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
431 Object::ClassOffset().Int32Value();
432 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
433 if (c == NULL) {
434 LOG(FATAL) << "Null class in object: " << obj;
435 } else if (!IsAligned<kObjectAlignment>(c)) {
436 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
437 } else if (!GetLiveBitmap()->Test(c)) {
438 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
439 }
440 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
441 // Note: we don't use the accessors here as they have internal sanity checks
442 // that we don't want to run
443 raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
444 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
445 raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
446 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
447 CHECK_EQ(c_c, c_c_c);
448 }
449#endif
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700450}
451
Brian Carlstrom78128a62011-09-15 17:21:19 -0700452void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700453 DCHECK(obj != NULL);
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800454 reinterpret_cast<Heap*>(arg)->VerifyObjectLocked(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700455}
456
457void Heap::VerifyHeap() {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800458 ScopedHeapLock heap_lock;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700459 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700460}
461
Ian Rogers30fab402012-01-23 15:43:46 -0800462void Heap::RecordAllocationLocked(AllocSpace* space, const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700463#ifndef NDEBUG
464 if (Runtime::Current()->IsStarted()) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700465 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700466 }
467#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700468 size_t size = space->AllocationSize(obj);
Elliott Hughes5d78d392011-12-13 16:53:05 -0800469 DCHECK_GT(size, 0u);
Carl Shapiro58551df2011-07-24 03:09:51 -0700470 num_bytes_allocated_ += size;
471 num_objects_allocated_ += 1;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700472
473 if (Runtime::Current()->HasStatsEnabled()) {
474 RuntimeStats* global_stats = Runtime::Current()->GetStats();
475 RuntimeStats* thread_stats = Thread::Current()->GetStats();
476 ++global_stats->allocated_objects;
477 ++thread_stats->allocated_objects;
478 global_stats->allocated_bytes += size;
479 thread_stats->allocated_bytes += size;
480 }
481
Carl Shapiro58551df2011-07-24 03:09:51 -0700482 live_bitmap_->Set(obj);
483}
484
Elliott Hughes307f75d2011-10-12 18:04:40 -0700485void Heap::RecordFreeLocked(size_t freed_objects, size_t freed_bytes) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700486 lock_->AssertHeld();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700487
488 if (freed_objects < num_objects_allocated_) {
489 num_objects_allocated_ -= freed_objects;
490 } else {
491 num_objects_allocated_ = 0;
492 }
493 if (freed_bytes < num_bytes_allocated_) {
494 num_bytes_allocated_ -= freed_bytes;
Carl Shapiro58551df2011-07-24 03:09:51 -0700495 } else {
496 num_bytes_allocated_ = 0;
497 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700498
499 if (Runtime::Current()->HasStatsEnabled()) {
500 RuntimeStats* global_stats = Runtime::Current()->GetStats();
501 RuntimeStats* thread_stats = Thread::Current()->GetStats();
502 ++global_stats->freed_objects;
503 ++thread_stats->freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700504 global_stats->freed_bytes += freed_bytes;
505 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700506 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700507}
508
Elliott Hughes92b3b562011-09-08 16:32:26 -0700509Object* Heap::AllocateLocked(size_t size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700510 lock_->AssertHeld();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700511
512 // Try the default alloc space first.
513 Object* obj = AllocateLocked(alloc_space_, size);
Carl Shapiro58551df2011-07-24 03:09:51 -0700514 if (obj != NULL) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700515 RecordAllocationLocked(alloc_space_, obj);
516 return obj;
Carl Shapiro58551df2011-07-24 03:09:51 -0700517 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700518
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700519 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700520}
521
Ian Rogers30fab402012-01-23 15:43:46 -0800522Object* Heap::AllocateLocked(AllocSpace* space, size_t alloc_size) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700523 lock_->AssertHeld();
Elliott Hughes92b3b562011-09-08 16:32:26 -0700524
Ian Rogers0399dde2012-06-06 17:09:28 -0700525 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
526 // done in the runnable state where suspension is expected.
527 DCHECK_EQ(Thread::Current()->GetState(), kRunnable);
528 Thread::Current()->AssertThreadSuspensionIsAllowable();
Brian Carlstromb82b6872011-10-26 17:18:07 -0700529
Ian Rogers30fab402012-01-23 15:43:46 -0800530 // Fail impossible allocations
531 if (alloc_size > space->Capacity()) {
532 // On failure collect soft references
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700533 WaitForConcurrentGcToComplete();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700534 CollectGarbageInternal(false, false, true);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700535 return NULL;
536 }
537
Ian Rogers30fab402012-01-23 15:43:46 -0800538 Object* ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700539 if (ptr != NULL) {
540 return ptr;
541 }
542
Ian Rogers30fab402012-01-23 15:43:46 -0800543 // The allocation failed. If the GC is running, block until it completes and retry.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700544 if (is_gc_running_) {
Ian Rogers30fab402012-01-23 15:43:46 -0800545 // The GC is concurrently tracing the heap. Release the heap lock, wait for the GC to
546 // complete, and retrying allocating.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700547 WaitForConcurrentGcToComplete();
Ian Rogers30fab402012-01-23 15:43:46 -0800548 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700549 if (ptr != NULL) {
550 return ptr;
551 }
552 }
553
554 // Another failure. Our thread was starved or there may be too many
555 // live objects. Try a foreground GC. This will have no effect if
556 // the concurrent GC is already running.
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700557 if (Runtime::Current()->HasStatsEnabled()) {
558 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
559 ++Thread::Current()->GetStats()->gc_for_alloc_count;
560 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700561
562 if (have_zygote_space_) {
563 // We don't need a WaitForConcurrentGcToComplete here since we checked is_gc_running_ earlier
564 // and we are in a heap lock. Try partial GC first.
565 CollectGarbageInternal(true, false, false);
566 ptr = space->AllocWithoutGrowth(alloc_size);
567 if (ptr != NULL) {
568 return ptr;
569 }
570 }
571
572 // Partial GC didn't free enough memory, try a full GC.
573 CollectGarbageInternal(false, false, false);
Ian Rogers30fab402012-01-23 15:43:46 -0800574 ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700575 if (ptr != NULL) {
576 return ptr;
577 }
578
579 // Even that didn't work; this is an exceptional state.
580 // Try harder, growing the heap if necessary.
Ian Rogers30fab402012-01-23 15:43:46 -0800581 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700582 if (ptr != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800583 size_t new_footprint = space->GetFootprintLimit();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700584 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700585 // free space is equal to the old free space + the
586 // utilization slop for the new allocation.
Ian Rogers3bb17a62012-01-27 23:56:44 -0800587 VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
Ian Rogers162a31c2012-01-31 16:14:31 -0800588 << " for a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700589 return ptr;
590 }
591
Elliott Hughes81ff3182012-03-23 20:35:56 -0700592 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
593 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
594 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700595
Elliott Hughes418dfe72011-10-06 18:56:27 -0700596 // OLD-TODO: wait for the finalizers from the previous GC to finish
Ian Rogers3bb17a62012-01-27 23:56:44 -0800597 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size) << " allocation";
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700598 // We don't need a WaitForConcurrentGcToComplete here either.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700599 CollectGarbageInternal(false, false, true);
Ian Rogers30fab402012-01-23 15:43:46 -0800600 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700601 if (ptr != NULL) {
602 return ptr;
603 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700604
Carl Shapiro69759ea2011-07-21 18:13:35 -0700605 return NULL;
606}
607
Elliott Hughesbf86d042011-08-31 17:53:14 -0700608int64_t Heap::GetMaxMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700609 size_t total = 0;
610 // TODO: C++0x auto
611 for (Spaces::const_iterator cur = spaces_.begin(); cur != spaces_.end(); ++cur) {
612 if ((*cur)->IsAllocSpace()) {
613 total += (*cur)->AsAllocSpace()->Capacity();
614 }
615 }
616 return total;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700617}
618
619int64_t Heap::GetTotalMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700620 return GetMaxMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700621}
622
623int64_t Heap::GetFreeMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700624 return GetMaxMemory() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700625}
626
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700627class InstanceCounter {
628 public:
629 InstanceCounter(Class* c, bool count_assignable)
630 : class_(c), count_assignable_(count_assignable), count_(0) {
631 }
632
633 size_t GetCount() {
634 return count_;
635 }
636
637 static void Callback(Object* o, void* arg) {
638 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
639 }
640
641 private:
642 void VisitInstance(Object* o) {
643 Class* instance_class = o->GetClass();
644 if (count_assignable_) {
645 if (instance_class == class_) {
646 ++count_;
647 }
648 } else {
649 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
650 ++count_;
651 }
652 }
653 }
654
655 Class* class_;
656 bool count_assignable_;
657 size_t count_;
658};
659
660int64_t Heap::CountInstances(Class* c, bool count_assignable) {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800661 ScopedHeapLock heap_lock;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700662 InstanceCounter counter(c, count_assignable);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700663 GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700664 return counter.GetCount();
665}
666
Ian Rogers30fab402012-01-23 15:43:46 -0800667void Heap::CollectGarbage(bool clear_soft_references) {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800668 ScopedHeapLock heap_lock;
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700669 // If we just waited for a GC to complete then we do not need to do another
670 // GC unless we clear soft references.
671 if (!WaitForConcurrentGcToComplete() || clear_soft_references) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700672 CollectGarbageInternal(have_zygote_space_, true, clear_soft_references);
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700673 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700674}
675
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700676void Heap::PreZygoteFork() {
677 ScopedHeapLock heap_lock;
678
679 // Try to see if we have any Zygote spaces.
680 if (have_zygote_space_) {
681 return;
682 }
683
684 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(GetBytesAllocated());
685
686 // Replace the first alloc space we find with a zygote space.
687 // TODO: C++0x auto
688 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
689 if ((*it)->IsAllocSpace()) {
690 AllocSpace* zygote_space = (*it)->AsAllocSpace();
691
692 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
693 // of the remaining available heap memory.
694 alloc_space_ = zygote_space->CreateZygoteSpace();
695
696 // Change the GC retention policy of the zygote space to only collect when full.
697 zygote_space->SetGcRetentionPolicy(GCRP_FULL_COLLECT);
698 AddSpace(alloc_space_);
699 have_zygote_space_ = true;
700 break;
701 }
702 }
703}
704
705void Heap::CollectGarbageInternal(bool partial_gc, bool concurrent, bool clear_soft_references) {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700706 lock_->AssertHeld();
Carl Shapiro58551df2011-07-24 03:09:51 -0700707
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700708 CHECK(!is_gc_running_) << "Attempted recursive GC";
Mathieu Chartiera6399032012-06-11 18:49:50 -0700709 is_gc_running_ = true;
710
Mathieu Chartier662618f2012-06-06 12:01:47 -0700711 TimingLogger timings("CollectGarbageInternal");
Elliott Hughes24edeb52012-06-18 15:29:46 -0700712 uint64_t t0 = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0;
Mathieu Chartier662618f2012-06-06 12:01:47 -0700713
Elliott Hughes8d768a92011-09-14 16:35:25 -0700714 ThreadList* thread_list = Runtime::Current()->GetThreadList();
715 thread_list->SuspendAll();
Mathieu Chartier662618f2012-06-06 12:01:47 -0700716 timings.AddSplit("SuspendAll");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700717
718 size_t initial_size = num_bytes_allocated_;
Elliott Hughesadb460d2011-10-05 17:02:34 -0700719 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700720 {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700721 MarkSweep mark_sweep(mark_stack_.get());
Elliott Hughes307f75d2011-10-12 18:04:40 -0700722 timings.AddSplit("ctor");
Carl Shapiro58551df2011-07-24 03:09:51 -0700723
724 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700725 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700726
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700727 // Make sure that the tables have the correct pointer for the mark sweep.
728 mod_union_table_->Init(&mark_sweep);
729 zygote_mod_union_table_->Init(&mark_sweep);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700730
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700731 // Clear image space cards and keep track of cards we cleared in the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700732 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
733 Space* space = *it;
734 if (space->IsImageSpace()) {
735 mod_union_table_->ClearCards(*it);
736 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
737 zygote_mod_union_table_->ClearCards(space);
738 } else if (concurrent) {
739 card_table_->ClearSpaceCards(space);
740 }
741 }
742 timings.AddSplit("ClearCards");
743
744#if VERIFY_MOD_UNION
745 mod_union_table_->Verify();
746 zygote_mod_union_table_->Verify();
747#endif
748
749 if (partial_gc) {
750 // Copy the mark bits over from the live bits, do this as early as possible or else we can
751 // accidentally un-mark roots.
752 // Needed for scanning dirty objects.
753 mark_sweep.CopyMarkBits();
754 timings.AddSplit("CopyMarkBits");
755 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700756
Carl Shapiro58551df2011-07-24 03:09:51 -0700757 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700758 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700759
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700760 // Roots are marked on the bitmap and the mark_stack is empty.
Ian Rogers5d76c432011-10-31 21:42:49 -0700761 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700762
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700763 if (concurrent) {
Mathieu Chartiera6399032012-06-11 18:49:50 -0700764 // We need to resume before unlocking or else a thread waiting for the
765 // heap lock would re-suspend since we have not yet called ResumeAll.
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700766 thread_list->ResumeAll();
Mathieu Chartiera6399032012-06-11 18:49:50 -0700767 Unlock();
Elliott Hughes24edeb52012-06-18 15:29:46 -0700768 root_end = NanoTime();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700769 timings.AddSplit("RootEnd");
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700770 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700771
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700772 // Update zygote mod union table.
773 if (partial_gc) {
774 zygote_mod_union_table_->Update();
775 timings.AddSplit("UpdateZygoteModUnionTable");
776
777 zygote_mod_union_table_->MarkReferences();
778 timings.AddSplit("ZygoteMarkReferences");
779 }
780
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700781 // Processes the cards we cleared earlier and adds their objects into the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700782 mod_union_table_->Update();
Mathieu Chartiere6e06512012-06-26 15:00:26 -0700783 timings.AddSplit("UpdateModUnionTable");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700784
785 // Scans all objects in the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700786 mod_union_table_->MarkReferences();
Mathieu Chartiere6e06512012-06-26 15:00:26 -0700787 timings.AddSplit("MarkImageToAllocSpaceReferences");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700788
789 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700790 mark_sweep.RecursiveMark(partial_gc);
791 timings.AddSplit(partial_gc ? "PartialMark" : "RecursiveMark");
Carl Shapiro58551df2011-07-24 03:09:51 -0700792
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700793 if (concurrent) {
Elliott Hughes24edeb52012-06-18 15:29:46 -0700794 dirty_begin = NanoTime();
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700795 Lock();
796 thread_list->SuspendAll();
Mathieu Chartier662618f2012-06-06 12:01:47 -0700797 timings.AddSplit("ReSuspend");
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700798
799 // Re-mark root set.
800 mark_sweep.ReMarkRoots();
801 timings.AddSplit("ReMarkRoots");
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700802
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700803 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700804 mark_sweep.RecursiveMarkDirtyObjects();
805 timings.AddSplit("RecursiveMarkDirtyObjects");
806 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700807
Ian Rogers30fab402012-01-23 15:43:46 -0800808 mark_sweep.ProcessReferences(clear_soft_references);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700809 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -0700810
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700811 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
812 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
813 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit
814 // instead, resulting in no new allocated objects being incorrectly freed by sweep.
815 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
816 Space* space = *it;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700817 // We never allocate into zygote spaces.
818 if (space->GetGcRetentionPolicy() == GCRP_ALWAYS_COLLECT) {
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700819 live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
820 mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
821 space->AsAllocSpace()->SwapBitmaps();
822 }
823 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -0700824
825 // Verify that we only reach marked objects from the image space
826 mark_sweep.VerifyImageRoots();
827 timings.AddSplit("VerifyImageRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700828
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700829 if (concurrent) {
830 thread_list->ResumeAll();
831 dirty_end = NanoTime();
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700832 Unlock();
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700833 }
834
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700835 mark_sweep.Sweep(partial_gc);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700836 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -0700837
838 cleared_references = mark_sweep.GetClearedReferences();
Carl Shapiro58551df2011-07-24 03:09:51 -0700839 }
840
Mathieu Chartier654d3a22012-07-11 17:54:18 -0700841 if (concurrent) {
842 // Relock since we unlocked earlier.
843 // TODO: We probably don't need to have the heap locked for all remainder of the function, except for GrowForUtilization.
844 Lock();
845 }
846
Carl Shapiro58551df2011-07-24 03:09:51 -0700847 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700848 timings.AddSplit("GrowForUtilization");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700849
850 if (!concurrent) {
851 thread_list->ResumeAll();
852 dirty_end = NanoTime();
853 }
Elliott Hughesadb460d2011-10-05 17:02:34 -0700854
855 EnqueueClearedReferences(&cleared_references);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -0800856 RequestHeapTrim();
Mathieu Chartier662618f2012-06-06 12:01:47 -0700857 timings.AddSplit("Finish");
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700858
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700859 if (VLOG_IS_ON(gc)) {
860 uint64_t t1 = NanoTime();
861
Ian Rogers3bb17a62012-01-27 23:56:44 -0800862 // TODO: somehow make the specific GC implementation (here MarkSweep) responsible for logging.
Mathieu Chartiera6399032012-06-11 18:49:50 -0700863 // Reason: For CMS sometimes initial_size < num_bytes_allocated_ results in overflow (3GB freed message).
Ian Rogers3bb17a62012-01-27 23:56:44 -0800864 size_t bytes_freed = initial_size - num_bytes_allocated_;
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700865 uint64_t duration_ns = t1 - t0;
866 duration_ns -= duration_ns % 1000;
867
868 // If the GC was slow, then print timings in the log.
Mathieu Chartier662618f2012-06-06 12:01:47 -0700869 if (concurrent) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700870 uint64_t pause_roots = (root_end - t0) / 1000 * 1000;
871 uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
872 if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
873 LOG(INFO) << (partial_gc ? "Partial " : "")
874 << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, "
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700875 << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", "
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700876 << "paused " << PrettyDuration(pause_roots) << "+" << PrettyDuration(pause_dirty)
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700877 << ", total " << PrettyDuration(duration_ns);
878 }
Mathieu Chartier662618f2012-06-06 12:01:47 -0700879 } else {
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700880 if (duration_ns > MsToNs(50)) {
881 uint64_t markSweepTime = (dirty_end - t0) / 1000 * 1000;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700882 LOG(INFO) << (partial_gc ? "Partial " : "")
883 << "GC freed " << PrettySize(bytes_freed) << ", " << GetPercentFree() << "% free, "
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700884 << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory()) << ", "
885 << "paused " << PrettyDuration(markSweepTime)
886 << ", total " << PrettyDuration(duration_ns);
887 }
Mathieu Chartier662618f2012-06-06 12:01:47 -0700888 }
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700889 }
Elliott Hughes767a1472011-10-26 18:49:02 -0700890 Dbg::GcDidFinish();
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800891 if (VLOG_IS_ON(heap)) {
Brian Carlstrom6b4ef022011-10-23 14:59:04 -0700892 timings.Dump();
893 }
Mathieu Chartiera6399032012-06-11 18:49:50 -0700894
895 is_gc_running_ = false;
896
897 // Wake anyone who may have been waiting for the GC to complete.
898 condition_->Broadcast();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700899}
900
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700901bool Heap::WaitForConcurrentGcToComplete() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700902 lock_->AssertHeld();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700903
904 // Busy wait for GC to finish
905 if (is_gc_running_) {
Mathieu Chartiera6399032012-06-11 18:49:50 -0700906 uint64_t wait_start = NanoTime();
Mathieu Chartier06f79872012-06-21 13:51:52 -0700907
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700908 do {
Mathieu Chartiera6399032012-06-11 18:49:50 -0700909 ScopedThreadStateChange tsc(Thread::Current(), kVmWait);
Mathieu Chartier06f79872012-06-21 13:51:52 -0700910 ScopedThreadListLockReleaser list_lock_releaser;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700911 condition_->Wait(*lock_);
912 } while (is_gc_running_);
Mathieu Chartiera6399032012-06-11 18:49:50 -0700913 uint64_t wait_time = NanoTime() - wait_start;
914 if (wait_time > MsToNs(5)) {
915 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
916 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700917 DCHECK(!is_gc_running_);
918 return true;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700919 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700920 return false;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700921}
922
Elliott Hughesc967f782012-04-16 10:23:15 -0700923void Heap::DumpForSigQuit(std::ostream& os) {
924 os << "Heap: " << GetPercentFree() << "% free, "
925 << PrettySize(num_bytes_allocated_) << "/" << PrettySize(GetTotalMemory())
Elliott Hughesae80b492012-04-24 10:43:17 -0700926 << "; " << num_objects_allocated_ << " objects\n";
Elliott Hughesc967f782012-04-16 10:23:15 -0700927}
928
929size_t Heap::GetPercentFree() {
930 size_t total = GetTotalMemory();
931 return 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
932}
933
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800934void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700935 AllocSpace* alloc_space = alloc_space_;
936 // TODO: Behavior for multiple alloc spaces?
937 size_t alloc_space_capacity = alloc_space->Capacity();
938 if (max_allowed_footprint > alloc_space_capacity) {
939 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint)
940 << " to " << PrettySize(alloc_space_capacity);
941 max_allowed_footprint = alloc_space_capacity;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700942 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700943 alloc_space->SetFootprintLimit(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700944}
945
Ian Rogers3bb17a62012-01-27 23:56:44 -0800946// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -0700947static const size_t kHeapIdealFree = 2 * MB;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800948// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
949// regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700950static const size_t kHeapMinFree = kHeapIdealFree / 4;
951
Carl Shapiro69759ea2011-07-21 18:13:35 -0700952void Heap::GrowForUtilization() {
Elliott Hughes8daa0922011-09-11 13:46:25 -0700953 lock_->AssertHeld();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700954
955 // We know what our utilization is at this moment.
956 // This doesn't actually resize any memory. It just lets the heap grow more
957 // when necessary.
Elliott Hughes362f9bc2011-10-17 18:56:41 -0700958 size_t target_size(num_bytes_allocated_ / Heap::GetTargetHeapUtilization());
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700959
960 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
961 target_size = num_bytes_allocated_ + kHeapIdealFree;
962 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
963 target_size = num_bytes_allocated_ + kHeapMinFree;
964 }
965
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700966 // Calculate when to perform the next ConcurrentGC.
967 if (GetTotalMemory() - num_bytes_allocated_ < concurrent_min_free_) {
968 // Not enough free memory to perform concurrent GC.
969 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
970 } else {
971 concurrent_start_bytes_ = alloc_space_->GetFootprintLimit() - concurrent_start_size_;
972 }
973
Shih-wei Liao8c2f6412011-10-03 22:58:14 -0700974 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700975}
976
jeffhaoc1160702011-10-27 15:48:45 -0700977void Heap::ClearGrowthLimit() {
Elliott Hughesffb465f2012-03-01 18:46:05 -0800978 ScopedHeapLock heap_lock;
jeffhaoc1160702011-10-27 15:48:45 -0700979 WaitForConcurrentGcToComplete();
jeffhaoc1160702011-10-27 15:48:45 -0700980 alloc_space_->ClearGrowthLimit();
981}
982
Brian Carlstrom24a3c2e2011-10-17 18:07:52 -0700983pid_t Heap::GetLockOwner() {
Elliott Hughesaccd83d2011-10-17 14:25:58 -0700984 return lock_->GetOwner();
985}
986
Elliott Hughes92b3b562011-09-08 16:32:26 -0700987void Heap::Lock() {
Elliott Hughes34e06962012-04-09 13:55:55 -0700988 // Grab the lock, but put ourselves into kVmWait if it looks
Brian Carlstromfad71432011-10-16 20:25:10 -0700989 // like we're going to have to wait on the mutex. This prevents
990 // deadlock if another thread is calling CollectGarbageInternal,
991 // since they will have the heap lock and be waiting for mutators to
992 // suspend.
993 if (!lock_->TryLock()) {
Elliott Hughes34e06962012-04-09 13:55:55 -0700994 ScopedThreadStateChange tsc(Thread::Current(), kVmWait);
Brian Carlstromfad71432011-10-16 20:25:10 -0700995 lock_->Lock();
996 }
Elliott Hughes92b3b562011-09-08 16:32:26 -0700997}
998
999void Heap::Unlock() {
1000 lock_->Unlock();
1001}
1002
Elliott Hughesadb460d2011-10-05 17:02:34 -07001003void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
1004 MemberOffset reference_queue_offset,
1005 MemberOffset reference_queueNext_offset,
1006 MemberOffset reference_pendingNext_offset,
1007 MemberOffset finalizer_reference_zombie_offset) {
1008 reference_referent_offset_ = reference_referent_offset;
1009 reference_queue_offset_ = reference_queue_offset;
1010 reference_queueNext_offset_ = reference_queueNext_offset;
1011 reference_pendingNext_offset_ = reference_pendingNext_offset;
1012 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
1013 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1014 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
1015 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
1016 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
1017 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
1018}
1019
1020Object* Heap::GetReferenceReferent(Object* reference) {
1021 DCHECK(reference != NULL);
1022 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1023 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
1024}
1025
1026void Heap::ClearReferenceReferent(Object* reference) {
1027 DCHECK(reference != NULL);
1028 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1029 reference->SetFieldObject(reference_referent_offset_, NULL, true);
1030}
1031
1032// Returns true if the reference object has not yet been enqueued.
1033bool Heap::IsEnqueuable(const Object* ref) {
1034 DCHECK(ref != NULL);
1035 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
1036 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
1037 return (queue != NULL) && (queue_next == NULL);
1038}
1039
1040void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
1041 DCHECK(ref != NULL);
1042 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
1043 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
1044 EnqueuePendingReference(ref, cleared_reference_list);
1045}
1046
1047void Heap::EnqueuePendingReference(Object* ref, Object** list) {
1048 DCHECK(ref != NULL);
1049 DCHECK(list != NULL);
1050
1051 if (*list == NULL) {
1052 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
1053 *list = ref;
1054 } else {
1055 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1056 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
1057 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
1058 }
1059}
1060
1061Object* Heap::DequeuePendingReference(Object** list) {
1062 DCHECK(list != NULL);
1063 DCHECK(*list != NULL);
1064 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1065 Object* ref;
1066 if (*list == head) {
1067 ref = *list;
1068 *list = NULL;
1069 } else {
1070 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1071 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
1072 ref = head;
1073 }
1074 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
1075 return ref;
1076}
1077
Ian Rogers5d4bdc22011-11-02 22:15:43 -07001078void Heap::AddFinalizerReference(Thread* self, Object* object) {
Ian Rogers365c1022012-06-22 15:05:28 -07001079 ScopedJniThreadState ts(self);
Elliott Hughes77405792012-03-15 15:22:12 -07001080 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001081 args[0].SetL(object);
Ian Rogers365c1022012-06-22 15:05:28 -07001082 ts.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, NULL, args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -07001083}
1084
1085void Heap::EnqueueClearedReferences(Object** cleared) {
1086 DCHECK(cleared != NULL);
1087 if (*cleared != NULL) {
Ian Rogers365c1022012-06-22 15:05:28 -07001088 ScopedJniThreadState ts(Thread::Current());
Elliott Hughes77405792012-03-15 15:22:12 -07001089 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001090 args[0].SetL(*cleared);
Ian Rogers365c1022012-06-22 15:05:28 -07001091 ts.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(ts.Self(), NULL, args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -07001092 *cleared = NULL;
1093 }
1094}
1095
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001096void Heap::RequestConcurrentGC() {
Mathieu Chartier069387a2012-06-18 12:01:01 -07001097 // Make sure that we can do a concurrent GC.
1098 if (requesting_gc_ ||
1099 !Runtime::Current()->IsFinishedStarting() ||
1100 Runtime::Current()->IsShuttingDown() ||
1101 !Runtime::Current()->IsConcurrentGcEnabled()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001102 return;
1103 }
1104
1105 requesting_gc_ = true;
1106 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001107 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1108 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001109 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_requestGC);
1110 CHECK(!env->ExceptionCheck());
1111 requesting_gc_ = false;
1112}
1113
1114void Heap::ConcurrentGC() {
Mathieu Chartier2542d662012-06-21 17:14:11 -07001115 if (Runtime::Current()->IsShuttingDown()) {
1116 return;
1117 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001118 ScopedHeapLock heap_lock;
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -07001119 // We shouldn't need a WaitForConcurrentGcToComplete here since only
1120 // concurrent GC resumes threads before the GC is completed and this function
1121 // is only called within the GC daemon thread.
1122 CHECK(!is_gc_running_);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001123 // Current thread needs to be runnable or else we can't suspend all threads.
1124 ScopedThreadStateChange tsc(Thread::Current(), kRunnable);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001125 if (!WaitForConcurrentGcToComplete()) {
1126 CollectGarbageInternal(have_zygote_space_, true, false);
1127 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001128}
1129
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001130void Heap::Trim(AllocSpace* alloc_space) {
Mathieu Chartier5dbf8292012-06-11 13:51:41 -07001131 lock_->AssertHeld();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001132 WaitForConcurrentGcToComplete();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -07001133 alloc_space->Trim();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001134}
1135
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001136void Heap::RequestHeapTrim() {
1137 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
1138 // because that only marks object heads, so a large array looks like lots of empty space. We
1139 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
1140 // to utilization (which is probably inversely proportional to how much benefit we can expect).
1141 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
1142 // not how much use we're making of those pages.
1143 float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001144 uint64_t ms_time = NsToMs(NanoTime());
1145 if (utilization > 0.75f || ms_time - last_trim_time_ < 2 * 1000) {
1146 // Don't bother trimming the heap if it's more than 75% utilized, or if a
1147 // heap trim occurred in the last two seconds.
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001148 return;
1149 }
Mathieu Chartiera6399032012-06-11 18:49:50 -07001150 if (!Runtime::Current()->IsFinishedStarting() || Runtime::Current()->IsShuttingDown()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001151 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
Mathieu Chartiera6399032012-06-11 18:49:50 -07001152 // Also: we do not wish to start a heap trim if the runtime is shutting down.
Ian Rogerse1d490c2012-02-03 09:09:07 -08001153 return;
1154 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001155 last_trim_time_ = ms_time;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001156 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001157 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1158 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
Elliott Hugheseac76672012-05-24 21:56:51 -07001159 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_requestHeapTrim);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001160 CHECK(!env->ExceptionCheck());
1161}
1162
Carl Shapiro69759ea2011-07-21 18:13:35 -07001163} // namespace art