blob: 4120a0e8abe026dcd0412a15b17f8722f177019d [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro69759ea2011-07-21 18:13:35 -070016
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070017#include "heap.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070018
Brian Carlstrom5643b782012-02-05 12:32:53 -080019#include <sys/types.h>
20#include <sys/wait.h>
21
Brian Carlstrom58ae9412011-10-04 00:56:06 -070022#include <limits>
Carl Shapiro58551df2011-07-24 03:09:51 -070023#include <vector>
24
Mathieu Chartier637e3482012-08-17 10:41:32 -070025#include "atomic.h"
Ian Rogers5d76c432011-10-31 21:42:49 -070026#include "card_table.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070027#include "debugger.h"
Mathieu Chartiercc236d72012-07-20 10:29:05 -070028#include "heap_bitmap.h"
Brian Carlstrom9cff8e12011-08-18 16:47:29 -070029#include "image.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070030#include "mark_sweep.h"
Mathieu Chartierb43b7d42012-06-19 13:15:09 -070031#include "mod_union_table.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070032#include "object.h"
Ian Rogers6d4d9fc2011-11-30 16:24:48 -080033#include "object_utils.h"
Brian Carlstrom5643b782012-02-05 12:32:53 -080034#include "os.h"
Mathieu Chartier7664f5c2012-06-08 18:15:32 -070035#include "ScopedLocalRef.h"
Ian Rogers00f7d0e2012-07-19 15:28:27 -070036#include "scoped_thread_state_change.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070037#include "space.h"
Carl Shapiro58551df2011-07-24 03:09:51 -070038#include "stl_util.h"
Elliott Hughes8d768a92011-09-14 16:35:25 -070039#include "thread_list.h"
Elliott Hughes767a1472011-10-26 18:49:02 -070040#include "timing_logger.h"
41#include "UniquePtr.h"
Elliott Hugheseac76672012-05-24 21:56:51 -070042#include "well_known_classes.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070043
44namespace art {
45
Ian Rogers30fab402012-01-23 15:43:46 -080046static void UpdateFirstAndLastSpace(Space** first_space, Space** last_space, Space* space) {
47 if (*first_space == NULL) {
48 *first_space = space;
49 *last_space = space;
50 } else {
51 if ((*first_space)->Begin() > space->Begin()) {
52 *first_space = space;
53 } else if (space->Begin() > (*last_space)->Begin()) {
54 *last_space = space;
55 }
56 }
57}
58
Elliott Hughesae80b492012-04-24 10:43:17 -070059static bool GenerateImage(const std::string& image_file_name) {
Brian Carlstroma004aa92012-02-08 18:05:09 -080060 const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
Brian Carlstrom5643b782012-02-05 12:32:53 -080061 std::vector<std::string> boot_class_path;
62 Split(boot_class_path_string, ':', boot_class_path);
Brian Carlstromb2793372012-03-17 18:27:16 -070063 if (boot_class_path.empty()) {
64 LOG(FATAL) << "Failed to generate image because no boot class path specified";
65 }
Brian Carlstrom5643b782012-02-05 12:32:53 -080066
67 std::vector<char*> arg_vector;
68
69 std::string dex2oat_string(GetAndroidRoot());
Elliott Hughes67d92002012-03-26 15:08:51 -070070 dex2oat_string += (kIsDebugBuild ? "/bin/dex2oatd" : "/bin/dex2oat");
Brian Carlstrom5643b782012-02-05 12:32:53 -080071 const char* dex2oat = dex2oat_string.c_str();
72 arg_vector.push_back(strdup(dex2oat));
73
74 std::string image_option_string("--image=");
75 image_option_string += image_file_name;
76 const char* image_option = image_option_string.c_str();
77 arg_vector.push_back(strdup(image_option));
78
79 arg_vector.push_back(strdup("--runtime-arg"));
80 arg_vector.push_back(strdup("-Xms64m"));
81
82 arg_vector.push_back(strdup("--runtime-arg"));
83 arg_vector.push_back(strdup("-Xmx64m"));
84
85 for (size_t i = 0; i < boot_class_path.size(); i++) {
86 std::string dex_file_option_string("--dex-file=");
87 dex_file_option_string += boot_class_path[i];
88 const char* dex_file_option = dex_file_option_string.c_str();
89 arg_vector.push_back(strdup(dex_file_option));
90 }
91
92 std::string oat_file_option_string("--oat-file=");
93 oat_file_option_string += image_file_name;
94 oat_file_option_string.erase(oat_file_option_string.size() - 3);
95 oat_file_option_string += "oat";
96 const char* oat_file_option = oat_file_option_string.c_str();
97 arg_vector.push_back(strdup(oat_file_option));
98
99 arg_vector.push_back(strdup("--base=0x60000000"));
100
Elliott Hughes48436bb2012-02-07 15:23:28 -0800101 std::string command_line(Join(arg_vector, ' '));
Brian Carlstrom5643b782012-02-05 12:32:53 -0800102 LOG(INFO) << command_line;
103
Elliott Hughes48436bb2012-02-07 15:23:28 -0800104 arg_vector.push_back(NULL);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800105 char** argv = &arg_vector[0];
106
107 // fork and exec dex2oat
108 pid_t pid = fork();
109 if (pid == 0) {
110 // no allocation allowed between fork and exec
111
112 // change process groups, so we don't get reaped by ProcessManager
113 setpgid(0, 0);
114
115 execv(dex2oat, argv);
116
117 PLOG(FATAL) << "execv(" << dex2oat << ") failed";
118 return false;
119 } else {
120 STLDeleteElements(&arg_vector);
121
122 // wait for dex2oat to finish
123 int status;
124 pid_t got_pid = TEMP_FAILURE_RETRY(waitpid(pid, &status, 0));
125 if (got_pid != pid) {
126 PLOG(ERROR) << "waitpid failed: wanted " << pid << ", got " << got_pid;
127 return false;
128 }
129 if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) {
130 LOG(ERROR) << dex2oat << " failed: " << command_line;
131 return false;
132 }
133 }
134 return true;
135}
136
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800137Heap::Heap(size_t initial_size, size_t growth_limit, size_t capacity,
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700138 const std::string& original_image_file_name, bool concurrent_gc)
139 : alloc_space_(NULL),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800140 card_table_(NULL),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700141 concurrent_gc_(concurrent_gc),
142 have_zygote_space_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800143 card_marking_disabled_(false),
144 is_gc_running_(false),
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700145 last_gc_type_(kGcTypeNone),
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700146 concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700147 concurrent_start_size_(128 * KB),
148 concurrent_min_free_(256 * KB),
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700149 sticky_gc_count_(0),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800150 num_bytes_allocated_(0),
151 num_objects_allocated_(0),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700152 pre_gc_verify_heap_(false),
153 post_gc_verify_heap_(false),
154 verify_mod_union_table_(false),
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700155 last_trim_time_(0),
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700156 try_running_gc_(false),
157 requesting_gc_(false),
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800158 reference_referent_offset_(0),
159 reference_queue_offset_(0),
160 reference_queueNext_offset_(0),
161 reference_pendingNext_offset_(0),
162 finalizer_reference_zombie_offset_(0),
163 target_utilization_(0.5),
Elliott Hughesb25c3f62012-03-26 16:35:06 -0700164 verify_objects_(false) {
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800165 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800166 LOG(INFO) << "Heap() entering";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700167 }
168
Ian Rogers30fab402012-01-23 15:43:46 -0800169 // Compute the bounds of all spaces for allocating live and mark bitmaps
170 // there will be at least one space (the alloc space)
171 Space* first_space = NULL;
172 Space* last_space = NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700173
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700174 live_bitmap_.reset(new HeapBitmap(this));
175 mark_bitmap_.reset(new HeapBitmap(this));
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700176
Ian Rogers30fab402012-01-23 15:43:46 -0800177 // Requested begin for the alloc space, to follow the mapped image and oat files
178 byte* requested_begin = NULL;
Brian Carlstrom5643b782012-02-05 12:32:53 -0800179 std::string image_file_name(original_image_file_name);
180 if (!image_file_name.empty()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700181 Space* image_space = NULL;
182
Brian Carlstrom5643b782012-02-05 12:32:53 -0800183 if (OS::FileExists(image_file_name.c_str())) {
184 // If the /system file exists, it should be up-to-date, don't try to generate
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700185 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800186 } else {
187 // If the /system file didn't exist, we need to use one from the art-cache.
188 // If the cache file exists, try to open, but if it fails, regenerate.
189 // If it does not exist, generate.
190 image_file_name = GetArtCacheFilenameOrDie(image_file_name);
191 if (OS::FileExists(image_file_name.c_str())) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700192 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800193 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700194 if (image_space == NULL) {
Brian Carlstrom5643b782012-02-05 12:32:53 -0800195 if (!GenerateImage(image_file_name)) {
196 LOG(FATAL) << "Failed to generate image: " << image_file_name;
197 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700198 image_space = Space::CreateImageSpace(image_file_name);
Brian Carlstrom5643b782012-02-05 12:32:53 -0800199 }
200 }
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700201 if (image_space == NULL) {
Brian Carlstrom223f20f2012-02-04 23:06:55 -0800202 LOG(FATAL) << "Failed to create space from " << image_file_name;
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700203 }
Brian Carlstrom5643b782012-02-05 12:32:53 -0800204
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700205 AddSpace(image_space);
206 UpdateFirstAndLastSpace(&first_space, &last_space, image_space);
Ian Rogers30fab402012-01-23 15:43:46 -0800207 // Oat files referenced by image files immediately follow them in memory, ensure alloc space
208 // isn't going to get in the middle
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700209 byte* oat_end_addr = GetImageSpace()->GetImageHeader().GetOatEnd();
210 CHECK(oat_end_addr > GetImageSpace()->End());
Ian Rogers30fab402012-01-23 15:43:46 -0800211 if (oat_end_addr > requested_begin) {
212 requested_begin = reinterpret_cast<byte*>(RoundUp(reinterpret_cast<uintptr_t>(oat_end_addr),
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700213 kPageSize));
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700214 }
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700215 }
216
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700217 UniquePtr<AllocSpace> alloc_space(Space::CreateAllocSpace(
218 "alloc space", initial_size, growth_limit, capacity, requested_begin));
219 alloc_space_ = alloc_space.release();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700220 CHECK(alloc_space_ != NULL) << "Failed to create alloc space";
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700221 AddSpace(alloc_space_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700222
Ian Rogers30fab402012-01-23 15:43:46 -0800223 UpdateFirstAndLastSpace(&first_space, &last_space, alloc_space_);
224 byte* heap_begin = first_space->Begin();
Ian Rogers3bb17a62012-01-27 23:56:44 -0800225 size_t heap_capacity = (last_space->Begin() - first_space->Begin()) + last_space->NonGrowthLimitCapacity();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700226
Ian Rogers30fab402012-01-23 15:43:46 -0800227 // Mark image objects in the live bitmap
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800228 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800229 Space* space = spaces_[i];
230 if (space->IsImageSpace()) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700231 space->AsImageSpace()->RecordImageAllocations(space->GetLiveBitmap());
Ian Rogers30fab402012-01-23 15:43:46 -0800232 }
233 }
234
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800235 // Allocate the card table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700236 card_table_.reset(CardTable::Create(heap_begin, heap_capacity));
237 CHECK(card_table_.get() != NULL) << "Failed to create card table";
Ian Rogers5d76c432011-10-31 21:42:49 -0700238
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700239 mod_union_table_.reset(new ModUnionTableToZygoteAllocspace<ModUnionTableReferenceCache>(this));
240 CHECK(mod_union_table_.get() != NULL) << "Failed to create mod-union table";
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700241
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700242 zygote_mod_union_table_.reset(new ModUnionTableCardCache(this));
243 CHECK(zygote_mod_union_table_.get() != NULL) << "Failed to create Zygote mod-union table";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700244
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700245 // TODO: Count objects in the image space here.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700246 num_bytes_allocated_ = 0;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700247 num_objects_allocated_ = 0;
248
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700249 // Max stack size in bytes.
250 static const size_t max_stack_size = capacity / SpaceBitmap::kAlignment * kWordSize;
251
252 // TODO: Rename MarkStack to a more generic name?
253 mark_stack_.reset(MarkStack::Create("dalvik-mark-stack", max_stack_size));
254 allocation_stack_.reset(MarkStack::Create("dalvik-allocation-stack", max_stack_size));
255 live_stack_.reset(MarkStack::Create("dalvik-live-stack", max_stack_size));
Mathieu Chartier5301cd22012-05-31 12:11:36 -0700256
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800257 // It's still too early to take a lock because there are no threads yet,
Elliott Hughes92b3b562011-09-08 16:32:26 -0700258 // but we can create the heap lock now. We don't create it earlier to
259 // make it clear that you can't use locks during heap initialization.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700260 gc_complete_lock_ = new Mutex("GC complete lock");
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700261 gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable"));
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700262
Mathieu Chartier0325e622012-09-05 14:22:51 -0700263 // Set up the cumulative timing loggers.
264 for (size_t i = 0; i < static_cast<size_t>(kGcTypeMax); ++i) {
265 std::ostringstream name;
266 name << static_cast<GcType>(i);
267 cumulative_timings_.Put(static_cast<GcType>(i),
268 new CumulativeLogger(name.str().c_str(), true));
269 }
270
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -0800271 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800272 LOG(INFO) << "Heap() exiting";
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700273 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700274}
275
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700276// Sort spaces based on begin address
277class SpaceSorter {
278 public:
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700279 bool operator ()(const Space* a, const Space* b) const {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700280 return a->Begin() < b->Begin();
281 }
282};
283
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800284void Heap::AddSpace(Space* space) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700285 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700286 DCHECK(space != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700287 DCHECK(space->GetLiveBitmap() != NULL);
288 live_bitmap_->AddSpaceBitmap(space->GetLiveBitmap());
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700289 DCHECK(space->GetMarkBitmap() != NULL);
290 mark_bitmap_->AddSpaceBitmap(space->GetMarkBitmap());
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800291 spaces_.push_back(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700292 if (space->IsAllocSpace()) {
293 alloc_space_ = space->AsAllocSpace();
294 }
295
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700296 // Ensure that spaces remain sorted in increasing order of start address (required for CMS finger)
297 std::sort(spaces_.begin(), spaces_.end(), SpaceSorter());
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700298
299 // Ensure that ImageSpaces < ZygoteSpaces < AllocSpaces so that we can do address based checks to
300 // avoid redundant marking.
301 bool seen_zygote = false, seen_alloc = false;
302 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
303 Space* space = *it;
304 if (space->IsImageSpace()) {
305 DCHECK(!seen_zygote);
306 DCHECK(!seen_alloc);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700307 } else if (space->IsZygoteSpace()) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700308 DCHECK(!seen_alloc);
309 seen_zygote = true;
310 } else if (space->IsAllocSpace()) {
311 seen_alloc = true;
312 }
313 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800314}
315
316Heap::~Heap() {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700317 // If we don't reset then the mark stack complains in it's destructor.
318 allocation_stack_->Reset();
319 live_stack_->Reset();
320
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800321 VLOG(heap) << "~Heap()";
Elliott Hughesb3e66df2012-01-12 14:49:18 -0800322 // We can't take the heap lock here because there might be a daemon thread suspended with the
323 // heap lock held. We know though that no non-daemon threads are executing, and we know that
324 // all daemon threads are suspended, and we also know that the threads list have been deleted, so
325 // those threads can't resume. We're the only running thread, and we can do whatever we like...
Carl Shapiro58551df2011-07-24 03:09:51 -0700326 STLDeleteElements(&spaces_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700327 delete gc_complete_lock_;
328
Mathieu Chartier0325e622012-09-05 14:22:51 -0700329 STLDeleteValues(&cumulative_timings_);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700330}
331
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700332Space* Heap::FindSpaceFromObject(const Object* obj) const {
333 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700334 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
335 if ((*it)->Contains(obj)) {
336 return *it;
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700337 }
338 }
339 LOG(FATAL) << "object " << reinterpret_cast<const void*>(obj) << " not inside any spaces!";
340 return NULL;
341}
342
343ImageSpace* Heap::GetImageSpace() {
344 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700345 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
346 if ((*it)->IsImageSpace()) {
347 return (*it)->AsImageSpace();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700348 }
349 }
350 return NULL;
351}
352
353AllocSpace* Heap::GetAllocSpace() {
354 return alloc_space_;
355}
356
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700357static void MSpaceChunkCallback(void* start, void* end, size_t used_bytes, void* arg) {
358 size_t& max_contiguous_allocation = *reinterpret_cast<size_t*>(arg);
359
360 size_t chunk_size = static_cast<size_t>(reinterpret_cast<uint8_t*>(end) - reinterpret_cast<uint8_t*>(start));
361 size_t chunk_free_bytes = 0;
362 if (used_bytes < chunk_size) {
363 chunk_free_bytes = chunk_size - used_bytes;
364 }
365
366 if (chunk_free_bytes > max_contiguous_allocation) {
367 max_contiguous_allocation = chunk_free_bytes;
368 }
369}
370
371Object* Heap::AllocObject(Class* c, size_t byte_count) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700372 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(Class)) ||
373 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
374 strlen(ClassHelper(c).GetDescriptor()) == 0);
375 DCHECK_GE(byte_count, sizeof(Object));
Mathieu Chartier037813d2012-08-23 16:44:59 -0700376 Object* obj = Allocate(alloc_space_, byte_count);
377 if (LIKELY(obj != NULL)) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700378 obj->SetClass(c);
Mathieu Chartier037813d2012-08-23 16:44:59 -0700379
380 // Record allocation after since we want to use the atomic add for the atomic fence to guard
381 // the SetClass since we do not want the class to appear NULL in another thread.
382 RecordAllocation(alloc_space_, obj);
383
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700384 if (Dbg::IsAllocTrackingEnabled()) {
385 Dbg::RecordAllocation(c, byte_count);
Elliott Hughes418dfe72011-10-06 18:56:27 -0700386 }
Mathieu Chartier637e3482012-08-17 10:41:32 -0700387 const bool request_concurrent_gc = num_bytes_allocated_ >= concurrent_start_bytes_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700388 if (request_concurrent_gc) {
389 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
390 SirtRef<Object> ref(obj);
391 RequestConcurrentGC();
392 }
393 VerifyObject(obj);
394
395 // Additional verification to ensure that we did not allocate into a zygote space.
396 DCHECK(!have_zygote_space_ || !FindSpaceFromObject(obj)->IsZygoteSpace());
397
398 return obj;
399 }
Mathieu Chartier037813d2012-08-23 16:44:59 -0700400 int64_t total_bytes_free = GetFreeMemory();
401 size_t max_contiguous_allocation = 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700402 // TODO: C++0x auto
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700403 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
404 if ((*it)->IsAllocSpace()) {
405 (*it)->AsAllocSpace()->Walk(MSpaceChunkCallback, &max_contiguous_allocation);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700406 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700407 }
Elliott Hughes418dfe72011-10-06 18:56:27 -0700408
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700409 std::string msg(StringPrintf("Failed to allocate a %zd-byte %s (%lld total bytes free; largest possible contiguous allocation %zd bytes)",
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700410 byte_count, PrettyDescriptor(c).c_str(), total_bytes_free, max_contiguous_allocation));
Elliott Hughes8a8b9cb2012-04-13 18:29:22 -0700411 Thread::Current()->ThrowOutOfMemoryError(msg.c_str());
Elliott Hughes418dfe72011-10-06 18:56:27 -0700412 return NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700413}
414
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700415bool Heap::IsHeapAddress(const Object* obj) {
Elliott Hughes92b3b562011-09-08 16:32:26 -0700416 // Note: we deliberately don't take the lock here, and mustn't test anything that would
417 // require taking the lock.
Elliott Hughes88c5c352012-03-15 18:49:48 -0700418 if (obj == NULL) {
419 return true;
420 }
421 if (!IsAligned<kObjectAlignment>(obj)) {
Elliott Hughesa2501992011-08-26 19:39:54 -0700422 return false;
423 }
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800424 for (size_t i = 0; i < spaces_.size(); ++i) {
Ian Rogers30fab402012-01-23 15:43:46 -0800425 if (spaces_[i]->Contains(obj)) {
426 return true;
427 }
428 }
429 return false;
Elliott Hughesa2501992011-08-26 19:39:54 -0700430}
431
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700432bool Heap::IsLiveObjectLocked(const Object* obj) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700433 Locks::heap_bitmap_lock_->AssertReaderHeld();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700434 return IsHeapAddress(obj) && GetLiveBitmap()->Test(obj);
Elliott Hughes6a5bd492011-10-28 14:33:57 -0700435}
436
Elliott Hughes3e465b12011-09-02 18:26:12 -0700437#if VERIFY_OBJECT_ENABLED
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700438void Heap::VerifyObject(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700439 if (obj == NULL || this == NULL || !verify_objects_ || Runtime::Current()->IsShuttingDown() ||
Ian Rogers141d6222012-04-05 12:23:06 -0700440 Thread::Current() == NULL ||
jeffhao25045522012-03-13 19:34:37 -0700441 Runtime::Current()->GetThreadList()->GetLockOwner() == Thread::Current()->GetTid()) {
Elliott Hughes85d15452011-09-16 17:33:01 -0700442 return;
443 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700444 VerifyObjectBody(obj);
Elliott Hughes92b3b562011-09-08 16:32:26 -0700445}
446#endif
447
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700448void Heap::DumpSpaces() {
449 // TODO: C++0x auto
450 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700451 Space* space = *it;
452 LOG(INFO) << *space;
453 LOG(INFO) << *space->GetLiveBitmap();
454 LOG(INFO) << *space->GetMarkBitmap();
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700455 }
456}
457
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700458// We want to avoid bit rotting.
459void Heap::VerifyObjectBody(const Object* obj) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700460 if (!IsAligned<kObjectAlignment>(obj)) {
461 LOG(FATAL) << "Object isn't aligned: " << obj;
Mathieu Chartier0325e622012-09-05 14:22:51 -0700462 }
463
464 // TODO: Smarter live check here which takes into account allocation stacks.
465 //GlobalSynchronization::heap_bitmap_lock_->GetExclusiveOwnerTid()
466 if (!GetLiveBitmap()->Test(obj)) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700467 DumpSpaces();
468 LOG(FATAL) << "Object is dead: " << obj;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700469 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700470
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700471 // Ignore early dawn of the universe verifications
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700472 if (!VERIFY_OBJECT_FAST && num_objects_allocated_ > 10) {
Mathieu Chartierdcf8d722012-08-02 14:55:54 -0700473 const byte* raw_addr = reinterpret_cast<const byte*>(obj) +
474 Object::ClassOffset().Int32Value();
475 const Class* c = *reinterpret_cast<Class* const *>(raw_addr);
476 if (c == NULL) {
477 LOG(FATAL) << "Null class in object: " << obj;
478 } else if (!IsAligned<kObjectAlignment>(c)) {
479 LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj;
480 } else if (!GetLiveBitmap()->Test(c)) {
481 LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj;
482 }
483 // Check obj.getClass().getClass() == obj.getClass().getClass().getClass()
484 // Note: we don't use the accessors here as they have internal sanity checks
485 // that we don't want to run
486 raw_addr = reinterpret_cast<const byte*>(c) + Object::ClassOffset().Int32Value();
487 const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr);
488 raw_addr = reinterpret_cast<const byte*>(c_c) + Object::ClassOffset().Int32Value();
489 const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr);
490 CHECK_EQ(c_c, c_c_c);
491 }
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700492}
493
Brian Carlstrom78128a62011-09-15 17:21:19 -0700494void Heap::VerificationCallback(Object* obj, void* arg) {
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700495 DCHECK(obj != NULL);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700496 reinterpret_cast<Heap*>(arg)->VerifyObjectBody(obj);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700497}
498
499void Heap::VerifyHeap() {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700500 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700501 GetLiveBitmap()->Walk(Heap::VerificationCallback, this);
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700502}
503
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700504void Heap::RecordAllocation(AllocSpace* space, const Object* obj) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700505 DCHECK(obj != NULL);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700506
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700507 size_t size = space->AllocationSize(obj);
508 DCHECK_GT(size, 0u);
509 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
510 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
511 android_atomic_add(
512 size, reinterpret_cast<volatile int32_t*>(reinterpret_cast<size_t>(&num_bytes_allocated_)));
513 android_atomic_add(
514 1, reinterpret_cast<volatile int32_t*>(reinterpret_cast<size_t>(&num_objects_allocated_)));
515
516 if (Runtime::Current()->HasStatsEnabled()) {
517 RuntimeStats* global_stats = Runtime::Current()->GetStats();
518 RuntimeStats* thread_stats = Thread::Current()->GetStats();
519 ++global_stats->allocated_objects;
520 ++thread_stats->allocated_objects;
521 global_stats->allocated_bytes += size;
522 thread_stats->allocated_bytes += size;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700523 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700524
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700525 allocation_stack_->AtomicPush(obj);
Carl Shapiro58551df2011-07-24 03:09:51 -0700526}
527
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700528void Heap::RecordFree(size_t freed_objects, size_t freed_bytes) {
Mathieu Chartier637e3482012-08-17 10:41:32 -0700529 COMPILE_ASSERT(sizeof(size_t) == sizeof(int32_t),
530 int32_t_must_be_same_size_as_size_t_for_used_atomic_operations);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700531 DCHECK_LE(freed_objects, num_objects_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700532 android_atomic_add(-static_cast<int32_t>(freed_objects),
Mathieu Chartier556fad32012-08-20 16:13:20 -0700533 reinterpret_cast<volatile int32_t*>(
534 reinterpret_cast<size_t>(&num_objects_allocated_)));
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700535
536 DCHECK_LE(freed_bytes, num_bytes_allocated_);
Mathieu Chartier637e3482012-08-17 10:41:32 -0700537 android_atomic_add(-static_cast<int32_t>(freed_bytes),
Mathieu Chartier556fad32012-08-20 16:13:20 -0700538 reinterpret_cast<volatile int32_t*>(
539 reinterpret_cast<size_t>(&num_bytes_allocated_)));
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700540
541 if (Runtime::Current()->HasStatsEnabled()) {
542 RuntimeStats* global_stats = Runtime::Current()->GetStats();
543 RuntimeStats* thread_stats = Thread::Current()->GetStats();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700544 global_stats->freed_objects += freed_objects;
545 thread_stats->freed_objects += freed_objects;
Elliott Hughes307f75d2011-10-12 18:04:40 -0700546 global_stats->freed_bytes += freed_bytes;
547 thread_stats->freed_bytes += freed_bytes;
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700548 }
Carl Shapiro58551df2011-07-24 03:09:51 -0700549}
550
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700551Object* Heap::Allocate(AllocSpace* space, size_t alloc_size) {
552 Thread* self = Thread::Current();
Ian Rogers0399dde2012-06-06 17:09:28 -0700553 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
554 // done in the runnable state where suspension is expected.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700555#ifndef NDEBUG
556 {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700557 MutexLock mu(*Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700558 CHECK_EQ(self->GetState(), kRunnable);
559 }
560 self->AssertThreadSuspensionIsAllowable();
561#endif
Brian Carlstromb82b6872011-10-26 17:18:07 -0700562
Ian Rogers30fab402012-01-23 15:43:46 -0800563 Object* ptr = space->AllocWithoutGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700564 if (ptr != NULL) {
565 return ptr;
566 }
567
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700568 // The allocation failed. If the GC is running, block until it completes, and then retry the
569 // allocation.
570 GcType last_gc = WaitForConcurrentGcToComplete();
571 if (last_gc != kGcTypeNone) {
572 // A GC was in progress and we blocked, retry allocation now that memory has been freed.
573 Object* ptr = space->AllocWithoutGrowth(alloc_size);
574 if (ptr != NULL) {
575 return ptr;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700576 }
577 }
578
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700579 // Loop through our different Gc types and try to Gc until we get enough free memory.
580 for (size_t i = static_cast<size_t>(last_gc) + 1; i < static_cast<size_t>(kGcTypeMax); ++i) {
581 bool run_gc = false;
582 GcType gc_type = static_cast<GcType>(i);
583 switch (gc_type) {
584 case kGcTypeSticky: {
585 const size_t alloc_space_size = alloc_space_->Size();
586 run_gc = alloc_space_size > kMinAllocSpaceSizeForStickyGC &&
587 alloc_space_->Capacity() - alloc_space_size >= kMinRemainingSpaceForStickyGC;
588 break;
589 }
590 case kGcTypePartial:
591 run_gc = have_zygote_space_;
592 break;
593 case kGcTypeFull:
594 run_gc = true;
595 break;
596 default:
597 break;
598 }
Carl Shapiro69759ea2011-07-21 18:13:35 -0700599
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700600 if (run_gc) {
601 if (Runtime::Current()->HasStatsEnabled()) {
602 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
603 ++Thread::Current()->GetStats()->gc_for_alloc_count;
604 }
605 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
606
607 // If we actually ran a different type of Gc than requested, we can skip the index forwards.
608 GcType gc_type_ran = CollectGarbageInternal(gc_type, false);
609 DCHECK(static_cast<size_t>(gc_type_ran) >= i);
610 i = static_cast<size_t>(gc_type_ran);
611 self->TransitionFromSuspendedToRunnable();
612
613 // Did we free sufficient memory for the allocation to succeed?
614 ptr = space->AllocWithoutGrowth(alloc_size);
615 if (ptr != NULL) {
616 return ptr;
617 }
618 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700619 }
620
621 // Allocations have failed after GCs; this is an exceptional state.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700622 // Try harder, growing the heap if necessary.
Ian Rogers30fab402012-01-23 15:43:46 -0800623 ptr = space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700624 if (ptr != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800625 size_t new_footprint = space->GetFootprintLimit();
Elliott Hughes418dfe72011-10-06 18:56:27 -0700626 // OLD-TODO: may want to grow a little bit more so that the amount of
Carl Shapiro58551df2011-07-24 03:09:51 -0700627 // free space is equal to the old free space + the
628 // utilization slop for the new allocation.
Ian Rogers3bb17a62012-01-27 23:56:44 -0800629 VLOG(gc) << "Grow heap (frag case) to " << PrettySize(new_footprint)
Ian Rogers162a31c2012-01-31 16:14:31 -0800630 << " for a " << PrettySize(alloc_size) << " allocation";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700631 return ptr;
632 }
633
Elliott Hughes81ff3182012-03-23 20:35:56 -0700634 // Most allocations should have succeeded by now, so the heap is really full, really fragmented,
635 // or the requested size is really big. Do another GC, collecting SoftReferences this time. The
636 // VM spec requires that all SoftReferences have been collected and cleared before throwing OOME.
Carl Shapiro69759ea2011-07-21 18:13:35 -0700637
Elliott Hughes418dfe72011-10-06 18:56:27 -0700638 // OLD-TODO: wait for the finalizers from the previous GC to finish
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700639 VLOG(gc) << "Forcing collection of SoftReferences for " << PrettySize(alloc_size)
640 << " allocation";
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700641
642 if (Runtime::Current()->HasStatsEnabled()) {
643 ++Runtime::Current()->GetStats()->gc_for_alloc_count;
644 ++Thread::Current()->GetStats()->gc_for_alloc_count;
645 }
Mathieu Chartierfc8cfac2012-06-19 11:56:36 -0700646 // We don't need a WaitForConcurrentGcToComplete here either.
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700647 self->TransitionFromRunnableToSuspended(kWaitingPerformingGc);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700648 CollectGarbageInternal(kGcTypeFull, true);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700649 self->TransitionFromSuspendedToRunnable();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700650 return space->AllocWithGrowth(alloc_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700651}
652
Elliott Hughesbf86d042011-08-31 17:53:14 -0700653int64_t Heap::GetMaxMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700654 size_t total = 0;
655 // TODO: C++0x auto
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700656 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
657 Space* space = *it;
658 if (space->IsAllocSpace()) {
659 total += space->AsAllocSpace()->Capacity();
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700660 }
661 }
662 return total;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700663}
664
665int64_t Heap::GetTotalMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700666 return GetMaxMemory();
Elliott Hughesbf86d042011-08-31 17:53:14 -0700667}
668
669int64_t Heap::GetFreeMemory() {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700670 return GetMaxMemory() - num_bytes_allocated_;
Elliott Hughesbf86d042011-08-31 17:53:14 -0700671}
672
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700673class InstanceCounter {
674 public:
675 InstanceCounter(Class* c, bool count_assignable)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700676 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700677 : class_(c), count_assignable_(count_assignable), count_(0) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700678
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700679 }
680
681 size_t GetCount() {
682 return count_;
683 }
684
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700685 static void Callback(Object* o, void* arg)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700686 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700687 reinterpret_cast<InstanceCounter*>(arg)->VisitInstance(o);
688 }
689
690 private:
Ian Rogersb726dcb2012-09-05 08:57:23 -0700691 void VisitInstance(Object* o) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700692 Class* instance_class = o->GetClass();
693 if (count_assignable_) {
694 if (instance_class == class_) {
695 ++count_;
696 }
697 } else {
698 if (instance_class != NULL && class_->IsAssignableFrom(instance_class)) {
699 ++count_;
700 }
701 }
702 }
703
704 Class* class_;
705 bool count_assignable_;
706 size_t count_;
707};
708
709int64_t Heap::CountInstances(Class* c, bool count_assignable) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700710 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700711 InstanceCounter counter(c, count_assignable);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -0700712 GetLiveBitmap()->Walk(InstanceCounter::Callback, &counter);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700713 return counter.GetCount();
714}
715
Ian Rogers30fab402012-01-23 15:43:46 -0800716void Heap::CollectGarbage(bool clear_soft_references) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700717 // Even if we waited for a GC we still need to do another GC since weaks allocated during the
718 // last GC will not have necessarily been cleared.
719 WaitForConcurrentGcToComplete();
720 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
721 CollectGarbageInternal(have_zygote_space_ ? kGcTypePartial : kGcTypeFull, clear_soft_references);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700722}
723
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700724void Heap::PreZygoteFork() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700725 static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock);
726 MutexLock mu(zygote_creation_lock_);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700727
728 // Try to see if we have any Zygote spaces.
729 if (have_zygote_space_) {
730 return;
731 }
732
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700733 VLOG(heap) << "Starting PreZygoteFork with alloc space size " << PrettySize(alloc_space_->Size());
734
735 {
736 // Flush the alloc stack.
Ian Rogersb726dcb2012-09-05 08:57:23 -0700737 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700738 FlushAllocStack();
739 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700740
741 // Replace the first alloc space we find with a zygote space.
742 // TODO: C++0x auto
743 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
744 if ((*it)->IsAllocSpace()) {
745 AllocSpace* zygote_space = (*it)->AsAllocSpace();
746
747 // Turns the current alloc space into a Zygote space and obtain the new alloc space composed
748 // of the remaining available heap memory.
749 alloc_space_ = zygote_space->CreateZygoteSpace();
750
751 // Change the GC retention policy of the zygote space to only collect when full.
752 zygote_space->SetGcRetentionPolicy(GCRP_FULL_COLLECT);
753 AddSpace(alloc_space_);
754 have_zygote_space_ = true;
755 break;
756 }
757 }
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700758
Mathieu Chartier0325e622012-09-05 14:22:51 -0700759 // Reset the cumulative loggers since we now haave a few additional timing phases.
760 // TODO: C++0x
761 for (CumulativeTimings::iterator it = cumulative_timings_.begin();
762 it != cumulative_timings_.end(); ++it) {
763 it->second->Reset();
764 }
765
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700766 // Reset this since we now count the ZygoteSpace in the total heap size.
767 num_bytes_allocated_ = 0;
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700768}
769
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700770void Heap::FlushAllocStack() {
771 MarkStackAsLive(allocation_stack_.get());
772 allocation_stack_->Reset();
773}
774
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -0700775size_t Heap::GetUsedMemorySize() const {
776 size_t total = num_bytes_allocated_;
777 for (Spaces::const_iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
778 if ((*it)->IsZygoteSpace()) {
779 total += (*it)->AsAllocSpace()->Size();
780 }
781 }
782 return total;
783}
784
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700785void Heap::MarkStackAsLive(MarkStack* alloc_stack) {
786 // We can just assume everything is inside the alloc_space_'s bitmap since we should only have
787 // fresh allocations.
788 SpaceBitmap* live_bitmap = alloc_space_->GetLiveBitmap();
789
790 // Empty the allocation stack.
791 const size_t count = alloc_stack->Size();
792 for (size_t i = 0; i < count; ++i) {
793 const Object* obj = alloc_stack->Get(i);
794 DCHECK(obj != NULL);
795 live_bitmap->Set(obj);
796 }
797}
798
799void Heap::UnMarkStack(MarkStack* alloc_stack) {
800 SpaceBitmap* mark_bitmap = alloc_space_->GetMarkBitmap();
801
802 // Clear all of the things in the AllocStack.
803 size_t count = alloc_stack->Size();
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700804 for (size_t i = 0; i < count; ++i) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700805 const Object* obj = alloc_stack->Get(i);
806 DCHECK(obj != NULL);
807 if (mark_bitmap->Test(obj)) {
808 mark_bitmap->Clear(obj);
809 }
810 }
811}
812
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700813void Heap::UnMarkStackAsLive(MarkStack* alloc_stack) {
814 SpaceBitmap* live_bitmap = alloc_space_->GetLiveBitmap();
815
816 // Clear all of the things in the AllocStack.
817 size_t count = alloc_stack->Size();
818 for (size_t i = 0; i < count; ++i) {
819 const Object* obj = alloc_stack->Get(i);
820 DCHECK(obj != NULL);
821 if (live_bitmap->Test(obj)) {
822 live_bitmap->Clear(obj);
823 }
824 }
825}
826
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700827GcType Heap::CollectGarbageInternal(GcType gc_type, bool clear_soft_references) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700828 Locks::mutator_lock_->AssertNotHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700829#ifndef NDEBUG
830 {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700831 MutexLock mu(*Locks::thread_suspend_count_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700832 CHECK_EQ(Thread::Current()->GetState(), kWaitingPerformingGc);
833 }
834#endif
Carl Shapiro58551df2011-07-24 03:09:51 -0700835
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700836 // Ensure there is only one GC at a time.
837 bool start_collect = false;
838 while (!start_collect) {
839 {
840 MutexLock mu(*gc_complete_lock_);
841 if (!is_gc_running_) {
842 is_gc_running_ = true;
843 start_collect = true;
844 }
845 }
846 if (!start_collect) {
847 WaitForConcurrentGcToComplete();
848 // TODO: if another thread beat this one to do the GC, perhaps we should just return here?
849 // Not doing at the moment to ensure soft references are cleared.
850 }
851 }
852 gc_complete_lock_->AssertNotHeld();
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700853
854 // We need to do partial GCs every now and then to avoid the heap growing too much and
855 // fragmenting.
Mathieu Chartier0325e622012-09-05 14:22:51 -0700856 if (gc_type == kGcTypeSticky && ++sticky_gc_count_ > kPartialGCFrequency) {
857 gc_type = kGcTypePartial;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700858 }
Mathieu Chartier0325e622012-09-05 14:22:51 -0700859 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700860 sticky_gc_count_ = 0;
861 }
862
Mathieu Chartier637e3482012-08-17 10:41:32 -0700863 if (concurrent_gc_) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700864 CollectGarbageConcurrentMarkSweepPlan(gc_type, clear_soft_references);
865 } else {
866 CollectGarbageMarkSweepPlan(gc_type, clear_soft_references);
867 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700868
Ian Rogers15bf2d32012-08-28 17:33:04 -0700869 {
870 MutexLock mu(*gc_complete_lock_);
871 is_gc_running_ = false;
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700872 last_gc_type_ = gc_type;
Ian Rogers15bf2d32012-08-28 17:33:04 -0700873 // Wake anyone who may have been waiting for the GC to complete.
874 gc_complete_cond_->Broadcast();
875 }
876 // Inform DDMS that a GC completed.
877 Dbg::GcDidFinish();
Mathieu Chartier866fb2a2012-09-10 10:47:49 -0700878 return gc_type;
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700879}
Mathieu Chartiera6399032012-06-11 18:49:50 -0700880
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700881void Heap::CollectGarbageMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
882 TimingLogger timings("CollectGarbageInternal", true);
Mathieu Chartier662618f2012-06-06 12:01:47 -0700883
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700884 std::stringstream gc_type_str;
885 gc_type_str << gc_type << " ";
886
Ian Rogers00f7d0e2012-07-19 15:28:27 -0700887 // Suspend all threads are get exclusive access to the heap.
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700888 uint64_t start_time = NanoTime();
Elliott Hughes8d768a92011-09-14 16:35:25 -0700889 ThreadList* thread_list = Runtime::Current()->GetThreadList();
890 thread_list->SuspendAll();
Mathieu Chartier662618f2012-06-06 12:01:47 -0700891 timings.AddSplit("SuspendAll");
Ian Rogersb726dcb2012-09-05 08:57:23 -0700892 Locks::mutator_lock_->AssertExclusiveHeld();
Elliott Hughes83df2ac2011-10-11 16:37:54 -0700893
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700894 size_t bytes_freed = 0;
Elliott Hughesadb460d2011-10-05 17:02:34 -0700895 Object* cleared_references = NULL;
Carl Shapiro58551df2011-07-24 03:09:51 -0700896 {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700897 MarkSweep mark_sweep(mark_stack_.get());
Carl Shapiro58551df2011-07-24 03:09:51 -0700898
899 mark_sweep.Init();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700900 timings.AddSplit("Init");
Carl Shapiro58551df2011-07-24 03:09:51 -0700901
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700902 // Pre verify the heap
903 if (pre_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -0700904 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700905 VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc");
906 timings.AddSplit("VerifyHeapReferencesPreGC");
907 }
908
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700909 // Make sure that the tables have the correct pointer for the mark sweep.
910 mod_union_table_->Init(&mark_sweep);
911 zygote_mod_union_table_->Init(&mark_sweep);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700912
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700913 // Swap allocation stack and live stack, enabling us to have new allocations during this GC.
914 MarkStack* temp = allocation_stack_.release();
915 allocation_stack_.reset(live_stack_.release());
916 live_stack_.reset(temp);
917
918 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
919 // TODO: Investigate using a mark stack instead of a vector.
920 std::vector<byte*> dirty_cards;
Mathieu Chartier0325e622012-09-05 14:22:51 -0700921 if (gc_type == kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700922 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
923 card_table_->GetDirtyCards(*it, dirty_cards);
924 }
925 }
926
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700927 // Clear image space cards and keep track of cards we cleared in the mod-union table.
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700928 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
929 Space* space = *it;
930 if (space->IsImageSpace()) {
931 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700932 timings.AddSplit("ClearModUnionCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700933 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
934 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700935 timings.AddSplit("ClearZygoteCards");
936 } else {
937 card_table_->ClearSpaceCards(space);
938 timings.AddSplit("ClearCards");
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700939 }
940 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700941
Ian Rogersb726dcb2012-09-05 08:57:23 -0700942 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier0325e622012-09-05 14:22:51 -0700943 if (gc_type == kGcTypePartial) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700944 // Copy the mark bits over from the live bits, do this as early as possible or else we can
945 // accidentally un-mark roots.
946 // Needed for scanning dirty objects.
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700947 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700948 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
949 mark_sweep.CopyMarkBits(*it);
950 }
951 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700952 timings.AddSplit("CopyMarkBits");
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700953
954 // We can assume that everything < alloc_space_ start is marked at this point.
955 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartier0325e622012-09-05 14:22:51 -0700956 } else if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700957 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700958 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
959 mark_sweep.CopyMarkBits(*it);
960 }
961 }
962 timings.AddSplit("CopyMarkBits");
963
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700964 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700965 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700966
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700967 MarkStackAsLive(live_stack_.get());
968
Mathieu Chartier0325e622012-09-05 14:22:51 -0700969 if (gc_type != kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700970 live_stack_->Reset();
971 }
972
Carl Shapiro58551df2011-07-24 03:09:51 -0700973 mark_sweep.MarkRoots();
Elliott Hughes307f75d2011-10-12 18:04:40 -0700974 timings.AddSplit("MarkRoots");
Carl Shapiro58551df2011-07-24 03:09:51 -0700975
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700976 // Roots are marked on the bitmap and the mark_stack is empty.
Ian Rogers5d76c432011-10-31 21:42:49 -0700977 DCHECK(mark_sweep.IsMarkStackEmpty());
Carl Shapiro58551df2011-07-24 03:09:51 -0700978
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700979 UpdateAndMarkModUnion(timings, gc_type);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700980
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700981 if (verify_mod_union_table_) {
982 zygote_mod_union_table_->Update();
983 zygote_mod_union_table_->Verify();
984 mod_union_table_->Update();
985 mod_union_table_->Verify();
986 }
Mathieu Chartierb43b7d42012-06-19 13:15:09 -0700987
988 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier0325e622012-09-05 14:22:51 -0700989 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700990 live_stack_->Reset();
Mathieu Chartier0325e622012-09-05 14:22:51 -0700991 mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700992 } else {
993 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
994 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -0700995 mark_sweep.DisableFinger();
Carl Shapiro58551df2011-07-24 03:09:51 -0700996
Mathieu Chartier357e9be2012-08-01 11:00:14 -0700997 // Need to process references the swap since it uses IsMarked.
Ian Rogers30fab402012-01-23 15:43:46 -0800998 mark_sweep.ProcessReferences(clear_soft_references);
Elliott Hughes307f75d2011-10-12 18:04:40 -0700999 timings.AddSplit("ProcessReferences");
Carl Shapiro58551df2011-07-24 03:09:51 -07001000
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001001 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
1002 mark_sweep.SweepSystemWeaks(false);
1003 timings.AddSplit("SweepSystemWeaks");
1004
1005 // Need to swap for VERIFY_OBJECT_ENABLED since we put things in the live bitmap after they
1006 // have been allocated.
1007 const bool swap = true;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001008 if (swap) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001009 SwapBitmaps();
Mathieu Chartier654d3a22012-07-11 17:54:18 -07001010 }
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001011
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001012#ifndef NDEBUG
Mathieu Chartier262e5ff2012-06-01 17:35:38 -07001013 // Verify that we only reach marked objects from the image space
1014 mark_sweep.VerifyImageRoots();
1015 timings.AddSplit("VerifyImageRoots");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001016#endif
Carl Shapiro58551df2011-07-24 03:09:51 -07001017
Mathieu Chartier0325e622012-09-05 14:22:51 -07001018 if (gc_type != kGcTypeSticky) {
1019 mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001020 } else {
1021 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
1022 }
Elliott Hughes307f75d2011-10-12 18:04:40 -07001023 timings.AddSplit("Sweep");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001024
1025 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001026 bytes_freed = mark_sweep.GetFreedBytes();
Carl Shapiro58551df2011-07-24 03:09:51 -07001027 }
1028
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001029 // Post gc verify the heap
1030 if (post_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001031 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001032 VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc");
1033 timings.AddSplit("VerifyHeapReferencesPostGC");
1034 }
1035
Carl Shapiro58551df2011-07-24 03:09:51 -07001036 GrowForUtilization();
Elliott Hughes307f75d2011-10-12 18:04:40 -07001037 timings.AddSplit("GrowForUtilization");
Mathieu Chartierb43b7d42012-06-19 13:15:09 -07001038
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001039 thread_list->ResumeAll();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001040 timings.AddSplit("ResumeAll");
Elliott Hughesadb460d2011-10-05 17:02:34 -07001041
1042 EnqueueClearedReferences(&cleared_references);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001043 RequestHeapTrim();
Mathieu Chartier662618f2012-06-06 12:01:47 -07001044 timings.AddSplit("Finish");
Elliott Hughes83df2ac2011-10-11 16:37:54 -07001045
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001046 // If the GC was slow, then print timings in the log.
1047 uint64_t duration = (NanoTime() - start_time) / 1000 * 1000;
1048 if (duration > MsToNs(50)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001049 const size_t percent_free = GetPercentFree();
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001050 const size_t current_heap_size = GetUsedMemorySize();
Mathieu Chartier637e3482012-08-17 10:41:32 -07001051 const size_t total_memory = GetTotalMemory();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001052 LOG(INFO) << gc_type_str.str() << " "
Mathieu Chartier637e3482012-08-17 10:41:32 -07001053 << "GC freed " << PrettySize(bytes_freed) << ", " << percent_free << "% free, "
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001054 << PrettySize(current_heap_size) << "/" << PrettySize(total_memory) << ", "
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001055 << "paused " << PrettyDuration(duration);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001056 if (VLOG_IS_ON(heap)) {
1057 timings.Dump();
1058 }
Brian Carlstrom6b4ef022011-10-23 14:59:04 -07001059 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001060
Mathieu Chartier0325e622012-09-05 14:22:51 -07001061 CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
1062 logger->Start();
1063 logger->AddLogger(timings);
1064 logger->End(); // Next iteration.
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001065}
Mathieu Chartiera6399032012-06-11 18:49:50 -07001066
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001067void Heap::UpdateAndMarkModUnion(TimingLogger& timings, GcType gc_type) {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001068 if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001069 // Don't need to do anythign for mod union table in this case since we are only scanning dirty
1070 // cards.
1071 return;
1072 }
1073
1074 // Update zygote mod union table.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001075 if (gc_type == kGcTypePartial) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001076 zygote_mod_union_table_->Update();
1077 timings.AddSplit("UpdateZygoteModUnionTable");
1078
1079 zygote_mod_union_table_->MarkReferences();
1080 timings.AddSplit("ZygoteMarkReferences");
1081 }
1082
1083 // Processes the cards we cleared earlier and adds their objects into the mod-union table.
1084 mod_union_table_->Update();
1085 timings.AddSplit("UpdateModUnionTable");
1086
1087 // Scans all objects in the mod-union table.
1088 mod_union_table_->MarkReferences();
1089 timings.AddSplit("MarkImageToAllocSpaceReferences");
1090}
1091
1092void Heap::RootMatchesObjectVisitor(const Object* root, void* arg) {
1093 Object* obj = reinterpret_cast<Object*>(arg);
1094 if (root == obj) {
1095 LOG(INFO) << "Object " << obj << " is a root";
1096 }
1097}
1098
1099class ScanVisitor {
1100 public:
1101 void operator ()(const Object* obj) const {
1102 LOG(INFO) << "Would have rescanned object " << obj;
1103 }
1104};
1105
1106class VerifyReferenceVisitor {
1107 public:
1108 VerifyReferenceVisitor(Heap* heap, bool* failed)
Ian Rogersb726dcb2012-09-05 08:57:23 -07001109 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
1110 Locks::heap_bitmap_lock_)
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001111 : heap_(heap),
1112 failed_(failed) {
1113 }
1114
1115 // TODO: Fix lock analysis to not use NO_THREAD_SAFETY_ANALYSIS, requires support for smarter
1116 // analysis.
1117 void operator ()(const Object* obj, const Object* ref, const MemberOffset& /* offset */,
1118 bool /* is_static */) const NO_THREAD_SAFETY_ANALYSIS {
1119 // Verify that the reference is live.
1120 if (ref != NULL && !IsLive(ref)) {
1121 CardTable* card_table = heap_->GetCardTable();
1122 MarkStack* alloc_stack = heap_->allocation_stack_.get();
1123 MarkStack* live_stack = heap_->live_stack_.get();
1124
1125 // Print the cards around our object
1126 byte* card_addr = card_table->CardFromAddr(obj);
1127 LOG(INFO) << "Object " << obj << " references dead object " << ref << " on IsDirty = "
1128 << (*card_addr == GC_CARD_DIRTY);
1129 void* cover_begin = card_table->AddrFromCard(card_addr);
1130 void* cover_end = reinterpret_cast<void*>(reinterpret_cast<size_t>(cover_begin) +
1131 GC_CARD_SIZE);
1132 LOG(INFO) << "Card " << reinterpret_cast<void*>(card_addr) << " covers " << cover_begin
1133 << "-" << cover_end;
1134 SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
1135
1136 // Print out how the object is live.
1137 if (bitmap->Test(obj)) {
1138 LOG(INFO) << "Object " << obj << " found in live bitmap";
1139 } else if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), obj)) {
1140 LOG(INFO) << "Object " << obj << " found in allocation stack";
1141 }
1142
1143 if (std::binary_search(live_stack->Begin(), live_stack->End(), ref)) {
1144 LOG(INFO) << "Reference " << ref << " found in live stack!";
1145 }
1146
1147 // Attempt to see if the card table missed the reference.
1148 ScanVisitor scan_visitor;
1149 byte* byte_cover_begin = reinterpret_cast<byte*>(card_table->AddrFromCard(card_addr));
1150 card_table->Scan(bitmap, byte_cover_begin, byte_cover_begin + GC_CARD_SIZE, scan_visitor,
1151 IdentityFunctor());
1152
1153 // Try and see if a mark sweep collector scans the reference.
1154 MarkStack* mark_stack = heap_->mark_stack_.get();
1155 MarkSweep ms(mark_stack);
1156 ms.Init();
1157 mark_stack->Reset();
1158 ms.SetFinger(reinterpret_cast<Object*>(~size_t(0)));
1159 // All the references should end up in the mark stack.
1160 ms.ScanRoot(obj);
1161 if (std::find(mark_stack->Begin(), mark_stack->End(), ref)) {
1162 LOG(INFO) << "Ref found in the mark_stack when rescanning the object!";
1163 } else {
1164 LOG(INFO) << "Dumping mark stack contents";
1165 for (Object** it = mark_stack->Begin(); it != mark_stack->End(); ++it) {
1166 LOG(INFO) << *it;
1167 }
1168 }
1169 mark_stack->Reset();
1170
1171 // Search to see if any of the roots reference our object.
1172 void* arg = const_cast<void*>(reinterpret_cast<const void*>(obj));
1173 Runtime::Current()->VisitRoots(&Heap::RootMatchesObjectVisitor, arg);
1174 *failed_ = true;
1175 }
1176 }
1177
1178 bool IsLive(const Object* obj) const NO_THREAD_SAFETY_ANALYSIS {
1179 SpaceBitmap* bitmap = heap_->GetLiveBitmap()->GetSpaceBitmap(obj);
1180 if (bitmap != NULL) {
1181 if (bitmap->Test(obj)) {
1182 return true;
1183 }
1184 } else {
1185 heap_->DumpSpaces();
1186 LOG(FATAL) << "Object " << obj << " not found in any spaces";
1187 }
1188 MarkStack* alloc_stack = heap_->allocation_stack_.get();
1189 // At this point we need to search the allocation since things in the live stack may get swept.
1190 if (std::binary_search(alloc_stack->Begin(), alloc_stack->End(), const_cast<Object*>(obj))) {
1191 return true;
1192 }
1193 // Not either in the live bitmap or allocation stack, so the object must be dead.
1194 return false;
1195 }
1196
1197 private:
1198 Heap* heap_;
1199 bool* failed_;
1200};
1201
1202class VerifyObjectVisitor {
1203 public:
1204 VerifyObjectVisitor(Heap* heap)
1205 : heap_(heap),
1206 failed_(false) {
1207
1208 }
1209
1210 void operator ()(const Object* obj) const
Ian Rogersb726dcb2012-09-05 08:57:23 -07001211 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001212 VerifyReferenceVisitor visitor(heap_, const_cast<bool*>(&failed_));
1213 MarkSweep::VisitObjectReferences(obj, visitor);
1214 }
1215
1216 bool Failed() const {
1217 return failed_;
1218 }
1219
1220 private:
1221 Heap* heap_;
1222 bool failed_;
1223};
1224
1225// Must do this with mutators suspended since we are directly accessing the allocation stacks.
1226void Heap::VerifyHeapReferences(const std::string& phase) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001227 Locks::mutator_lock_->AssertExclusiveHeld();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001228 // Lets sort our allocation stacks so that we can efficiently binary search them.
1229 std::sort(allocation_stack_->Begin(), allocation_stack_->End());
1230 std::sort(live_stack_->Begin(), live_stack_->End());
1231 // Perform the verification.
1232 VerifyObjectVisitor visitor(this);
1233 GetLiveBitmap()->Visit(visitor);
1234 // We don't want to verify the objects in the allocation stack since they themselves may be
1235 // pointing to dead objects if they are not reachable.
1236 if (visitor.Failed()) {
1237 DumpSpaces();
1238 LOG(FATAL) << phase << " heap verification failed";
1239 }
1240}
1241
1242void Heap::SwapBitmaps() {
1243 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
1244 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
1245 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark bit
1246 // instead, resulting in no new allocated objects being incorrectly freed by sweep.
Ian Rogersb726dcb2012-09-05 08:57:23 -07001247 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001248 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1249 Space* space = *it;
1250 // We never allocate into zygote spaces.
1251 if (space->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
1252 live_bitmap_->ReplaceBitmap(space->GetLiveBitmap(), space->GetMarkBitmap());
1253 mark_bitmap_->ReplaceBitmap(space->GetMarkBitmap(), space->GetLiveBitmap());
1254 space->AsAllocSpace()->SwapBitmaps();
1255 }
1256 }
1257}
1258
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001259void Heap::CollectGarbageConcurrentMarkSweepPlan(GcType gc_type, bool clear_soft_references) {
1260 TimingLogger timings("ConcurrentCollectGarbageInternal", true);
1261 uint64_t root_begin = NanoTime(), root_end = 0, dirty_begin = 0, dirty_end = 0;
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001262 std::stringstream gc_type_str;
1263 gc_type_str << gc_type << " ";
Mathieu Chartiera6399032012-06-11 18:49:50 -07001264
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001265 // Suspend all threads are get exclusive access to the heap.
1266 ThreadList* thread_list = Runtime::Current()->GetThreadList();
1267 thread_list->SuspendAll();
1268 timings.AddSplit("SuspendAll");
Ian Rogersb726dcb2012-09-05 08:57:23 -07001269 Locks::mutator_lock_->AssertExclusiveHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001270
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001271 size_t bytes_freed = 0;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001272 Object* cleared_references = NULL;
1273 {
1274 MarkSweep mark_sweep(mark_stack_.get());
1275 timings.AddSplit("ctor");
1276
1277 mark_sweep.Init();
1278 timings.AddSplit("Init");
1279
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001280 // Pre verify the heap
1281 if (pre_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001282 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001283 VerifyHeapReferences(std::string("Pre ") + gc_type_str.str() + "Gc");
1284 timings.AddSplit("VerifyHeapReferencesPreGC");
1285 }
1286
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001287 // Swap the stacks, this is safe sunce all the mutators are suspended at this point.
1288 MarkStack* temp = allocation_stack_.release();
1289 allocation_stack_.reset(live_stack_.release());
1290 live_stack_.reset(temp);
1291
1292 // We will need to know which cards were dirty for doing concurrent processing of dirty cards.
1293 // TODO: Investigate using a mark stack instead of a vector.
1294 std::vector<byte*> dirty_cards;
Mathieu Chartier0325e622012-09-05 14:22:51 -07001295 if (gc_type == kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001296 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1297 card_table_->GetDirtyCards(*it, dirty_cards);
1298 }
1299 }
1300
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001301 // Make sure that the tables have the correct pointer for the mark sweep.
1302 mod_union_table_->Init(&mark_sweep);
1303 zygote_mod_union_table_->Init(&mark_sweep);
1304
1305 // Clear image space cards and keep track of cards we cleared in the mod-union table.
1306 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
1307 Space* space = *it;
1308 if (space->IsImageSpace()) {
1309 mod_union_table_->ClearCards(*it);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001310 timings.AddSplit("ModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001311 } else if (space->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1312 zygote_mod_union_table_->ClearCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001313 timings.AddSplit("ZygoteModUnionClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001314 } else {
1315 card_table_->ClearSpaceCards(space);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001316 timings.AddSplit("ClearCards");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001317 }
1318 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001319
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001320 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001321 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001322
Mathieu Chartier0325e622012-09-05 14:22:51 -07001323 if (gc_type == kGcTypePartial) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001324 // Copy the mark bits over from the live bits, do this as early as possible or else we can
1325 // accidentally un-mark roots.
1326 // Needed for scanning dirty objects.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001327 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001328 if ((*it)->GetGcRetentionPolicy() == GCRP_FULL_COLLECT) {
1329 mark_sweep.CopyMarkBits(*it);
1330 }
1331 }
1332 timings.AddSplit("CopyMarkBits");
1333 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
Mathieu Chartier0325e622012-09-05 14:22:51 -07001334 } else if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001335 for (Spaces::iterator it = spaces_.begin(); it != spaces_.end(); ++it) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001336 if ((*it)->GetGcRetentionPolicy() != GCRP_NEVER_COLLECT) {
1337 mark_sweep.CopyMarkBits(*it);
1338 }
1339 }
1340 timings.AddSplit("CopyMarkBits");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001341 mark_sweep.SetCondemned(reinterpret_cast<Object*>(alloc_space_->Begin()));
1342 }
1343
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001344 // Mark everything as live so that sweeping system weak works correctly for sticky mark bit
1345 // GCs.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001346 MarkStackAsLive(live_stack_.get());
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001347 timings.AddSplit("MarkStackAsLive");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001348
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001349 // TODO: Investigate whether or not this is really necessary for sticky mark bits.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001350 if (gc_type != kGcTypeSticky) {
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001351 live_stack_->Reset();
1352 mark_sweep.MarkRoots();
1353 timings.AddSplit("MarkRoots");
1354 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001355
1356 if (verify_mod_union_table_) {
1357 zygote_mod_union_table_->Update();
1358 zygote_mod_union_table_->Verify();
1359 mod_union_table_->Update();
1360 mod_union_table_->Verify();
1361 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001362 }
1363
1364 // Roots are marked on the bitmap and the mark_stack is empty.
1365 DCHECK(mark_sweep.IsMarkStackEmpty());
1366
1367 // Allow mutators to go again, acquire share on mutator_lock_ to continue.
1368 thread_list->ResumeAll();
1369 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001370 ReaderMutexLock reader_lock(*Locks::mutator_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001371 root_end = NanoTime();
1372 timings.AddSplit("RootEnd");
1373
Ian Rogersb726dcb2012-09-05 08:57:23 -07001374 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001375 UpdateAndMarkModUnion(timings, gc_type);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001376 if (gc_type != kGcTypeSticky) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001377 // Recursively mark all the non-image bits set in the mark bitmap.
Mathieu Chartier0325e622012-09-05 14:22:51 -07001378 mark_sweep.RecursiveMark(gc_type == kGcTypePartial, timings);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001379 } else {
1380 mark_sweep.RecursiveMarkCards(card_table_.get(), dirty_cards, timings);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001381 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001382 mark_sweep.DisableFinger();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001383 }
1384 // Release share on mutator_lock_ and then get exclusive access.
1385 dirty_begin = NanoTime();
1386 thread_list->SuspendAll();
1387 timings.AddSplit("ReSuspend");
Ian Rogersb726dcb2012-09-05 08:57:23 -07001388 Locks::mutator_lock_->AssertExclusiveHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001389
1390 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001391 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001392
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001393 // Re-mark root set.
1394 mark_sweep.ReMarkRoots();
1395 timings.AddSplit("ReMarkRoots");
1396
1397 // Scan dirty objects, this is only required if we are not doing concurrent GC.
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001398 mark_sweep.RecursiveMarkDirtyObjects(false);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001399 timings.AddSplit("RecursiveMarkDirtyObjects");
1400 }
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001401
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001402 {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001403 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001404 mark_sweep.ProcessReferences(clear_soft_references);
1405 timings.AddSplit("ProcessReferences");
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001406
1407 // This doesn't work with mutators unpaused for some reason, TODO: Fix.
1408 mark_sweep.SweepSystemWeaks(false);
1409 timings.AddSplit("SweepSystemWeaks");
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001410 }
1411 // Swap the live and mark bitmaps for each alloc space. This is needed since sweep re-swaps
1412 // these bitmaps. Doing this enables us to sweep with the heap unlocked since new allocations
1413 // set the live bit, but since we have the bitmaps reversed at this point, this sets the mark
1414 // bit instead, resulting in no new allocated objects being incorrectly freed by sweep.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001415 const bool swap = true;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001416 if (swap) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001417 SwapBitmaps();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001418 }
1419
1420 if (kIsDebugBuild) {
1421 // Verify that we only reach marked objects from the image space.
Ian Rogersb726dcb2012-09-05 08:57:23 -07001422 ReaderMutexLock mu(*Locks::heap_bitmap_lock_);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001423 mark_sweep.VerifyImageRoots();
1424 timings.AddSplit("VerifyImageRoots");
1425 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001426
Mathieu Chartier0325e622012-09-05 14:22:51 -07001427 if (gc_type == kGcTypeSticky) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001428 // We only sweep over the live stack, and the live stack should not intersect with the
1429 // allocation stack, so it should be safe to UnMark anything in the allocation stack as live.
1430 // This only works for sticky Gcs though!
1431 UnMarkStackAsLive(allocation_stack_.get());
1432 }
1433 timings.AddSplit("UnMarkStacks");
1434
1435 // If we are going to do post Gc verification, lets keep the mutators paused since we don't
1436 // want them to touch dead objects before we find these in verification.
1437 if (post_gc_verify_heap_) {
Ian Rogersb726dcb2012-09-05 08:57:23 -07001438 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001439 VerifyHeapReferences(std::string("Post ") + gc_type_str.str() + "Gc");
1440 timings.AddSplit("VerifyHeapReferencesPostGC");
1441 }
1442
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001443 thread_list->ResumeAll();
1444 dirty_end = NanoTime();
Ian Rogersb726dcb2012-09-05 08:57:23 -07001445 Locks::mutator_lock_->AssertNotHeld();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001446
1447 {
1448 // TODO: this lock shouldn't be necessary (it's why we did the bitmap flip above).
Ian Rogersb726dcb2012-09-05 08:57:23 -07001449 WriterMutexLock mu(*Locks::heap_bitmap_lock_);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001450 if (gc_type != kGcTypeSticky) {
1451 mark_sweep.Sweep(gc_type == kGcTypePartial, swap);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001452 } else {
1453 mark_sweep.SweepArray(timings, live_stack_.get(), swap);
1454 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001455 timings.AddSplit("Sweep");
1456 }
1457
1458 cleared_references = mark_sweep.GetClearedReferences();
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001459 bytes_freed = mark_sweep.GetFreedBytes();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001460 }
1461
1462 GrowForUtilization();
1463 timings.AddSplit("GrowForUtilization");
1464
1465 EnqueueClearedReferences(&cleared_references);
1466 RequestHeapTrim();
1467 timings.AddSplit("Finish");
1468
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001469 // If the GC was slow, then print timings in the log.
1470 uint64_t pause_roots = (root_end - root_begin) / 1000 * 1000;
1471 uint64_t pause_dirty = (dirty_end - dirty_begin) / 1000 * 1000;
Mathieu Chartier637e3482012-08-17 10:41:32 -07001472 uint64_t duration = (NanoTime() - root_begin) / 1000 * 1000;
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001473 if (pause_roots > MsToNs(5) || pause_dirty > MsToNs(5)) {
Mathieu Chartier637e3482012-08-17 10:41:32 -07001474 const size_t percent_free = GetPercentFree();
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001475 const size_t current_heap_size = GetUsedMemorySize();
Mathieu Chartier637e3482012-08-17 10:41:32 -07001476 const size_t total_memory = GetTotalMemory();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001477 LOG(INFO) << gc_type_str.str()
Mathieu Chartier637e3482012-08-17 10:41:32 -07001478 << "Concurrent GC freed " << PrettySize(bytes_freed) << ", " << percent_free
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001479 << "% free, " << PrettySize(current_heap_size) << "/"
Mathieu Chartier637e3482012-08-17 10:41:32 -07001480 << PrettySize(total_memory) << ", " << "paused " << PrettyDuration(pause_roots)
1481 << "+" << PrettyDuration(pause_dirty) << " total " << PrettyDuration(duration);
Mathieu Chartier0325e622012-09-05 14:22:51 -07001482
1483 if (VLOG_IS_ON(heap)) {
1484 timings.Dump();
1485 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001486 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001487
Mathieu Chartier0325e622012-09-05 14:22:51 -07001488 CumulativeLogger* logger = cumulative_timings_.Get(gc_type);
1489 logger->Start();
1490 logger->AddLogger(timings);
1491 logger->End(); // Next iteration.
Carl Shapiro69759ea2011-07-21 18:13:35 -07001492}
1493
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001494GcType Heap::WaitForConcurrentGcToComplete() {
1495 GcType last_gc_type = kGcTypeNone;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001496 if (concurrent_gc_) {
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001497 bool do_wait;
1498 uint64_t wait_start = NanoTime();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001499 {
1500 // Check if GC is running holding gc_complete_lock_.
1501 MutexLock mu(*gc_complete_lock_);
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001502 do_wait = is_gc_running_;
Mathieu Chartiera6399032012-06-11 18:49:50 -07001503 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001504 if (do_wait) {
1505 // We must wait, change thread state then sleep on gc_complete_cond_;
1506 ScopedThreadStateChange tsc(Thread::Current(), kWaitingForGcToComplete);
1507 {
1508 MutexLock mu(*gc_complete_lock_);
1509 while (is_gc_running_) {
1510 gc_complete_cond_->Wait(*gc_complete_lock_);
1511 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001512 last_gc_type = last_gc_type_;
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001513 }
1514 uint64_t wait_time = NanoTime() - wait_start;
1515 if (wait_time > MsToNs(5)) {
1516 LOG(INFO) << "WaitForConcurrentGcToComplete blocked for " << PrettyDuration(wait_time);
1517 }
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001518 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001519 }
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001520 return last_gc_type;
Carl Shapiro69759ea2011-07-21 18:13:35 -07001521}
1522
Elliott Hughesc967f782012-04-16 10:23:15 -07001523void Heap::DumpForSigQuit(std::ostream& os) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001524 os << "Heap: " << GetPercentFree() << "% free, " << PrettySize(num_bytes_allocated_) << "/"
1525 << PrettySize(GetTotalMemory()) << "; " << num_objects_allocated_ << " objects\n";
Mathieu Chartier0325e622012-09-05 14:22:51 -07001526 // Dump cumulative timings.
1527 LOG(INFO) << "Dumping cumulative Gc timings";
1528 for (CumulativeTimings::iterator it = cumulative_timings_.begin();
1529 it != cumulative_timings_.end(); ++it) {
1530 it->second->Dump();
1531 }
Elliott Hughesc967f782012-04-16 10:23:15 -07001532}
1533
1534size_t Heap::GetPercentFree() {
1535 size_t total = GetTotalMemory();
1536 return 100 - static_cast<size_t>(100.0f * static_cast<float>(num_bytes_allocated_) / total);
1537}
1538
Elliott Hughes4dd9b4d2011-12-12 18:29:24 -08001539void Heap::SetIdealFootprint(size_t max_allowed_footprint) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001540 AllocSpace* alloc_space = alloc_space_;
1541 // TODO: Behavior for multiple alloc spaces?
1542 size_t alloc_space_capacity = alloc_space->Capacity();
1543 if (max_allowed_footprint > alloc_space_capacity) {
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001544 VLOG(gc) << "Clamp target GC heap from " << PrettySize(max_allowed_footprint) << " to "
1545 << PrettySize(alloc_space_capacity);
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001546 max_allowed_footprint = alloc_space_capacity;
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001547 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001548 alloc_space->SetFootprintLimit(max_allowed_footprint);
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001549}
1550
Ian Rogers3bb17a62012-01-27 23:56:44 -08001551// kHeapIdealFree is the ideal maximum free size, when we grow the heap for utilization.
Shih-wei Liao7f1caab2011-10-06 12:11:04 -07001552static const size_t kHeapIdealFree = 2 * MB;
Ian Rogers3bb17a62012-01-27 23:56:44 -08001553// kHeapMinFree guarantees that you always have at least 512 KB free, when you grow for utilization,
1554// regardless of target utilization ratio.
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001555static const size_t kHeapMinFree = kHeapIdealFree / 4;
1556
Carl Shapiro69759ea2011-07-21 18:13:35 -07001557void Heap::GrowForUtilization() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001558 size_t target_size;
1559 bool use_footprint_limit = false;
1560 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001561 // We know what our utilization is at this moment.
1562 // This doesn't actually resize any memory. It just lets the heap grow more when necessary.
1563 target_size = num_bytes_allocated_ / Heap::GetTargetHeapUtilization();
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001564
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001565 if (target_size > num_bytes_allocated_ + kHeapIdealFree) {
1566 target_size = num_bytes_allocated_ + kHeapIdealFree;
1567 } else if (target_size < num_bytes_allocated_ + kHeapMinFree) {
1568 target_size = num_bytes_allocated_ + kHeapMinFree;
1569 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001570
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001571 // Calculate when to perform the next ConcurrentGC.
Mathieu Chartier1cd9c5c2012-08-23 10:52:44 -07001572 if (GetTotalMemory() - GetUsedMemorySize() < concurrent_min_free_) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001573 // Not enough free memory to perform concurrent GC.
1574 concurrent_start_bytes_ = std::numeric_limits<size_t>::max();
1575 } else {
1576 // Compute below to avoid holding both the statistics and the alloc space lock
1577 use_footprint_limit = true;
1578 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001579 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001580
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001581 if (use_footprint_limit) {
1582 size_t foot_print_limit = alloc_space_->GetFootprintLimit();
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001583 concurrent_start_bytes_ = foot_print_limit - concurrent_start_size_;
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001584 }
Shih-wei Liao8c2f6412011-10-03 22:58:14 -07001585 SetIdealFootprint(target_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -07001586}
1587
jeffhaoc1160702011-10-27 15:48:45 -07001588void Heap::ClearGrowthLimit() {
jeffhaoc1160702011-10-27 15:48:45 -07001589 WaitForConcurrentGcToComplete();
jeffhaoc1160702011-10-27 15:48:45 -07001590 alloc_space_->ClearGrowthLimit();
1591}
1592
Elliott Hughesadb460d2011-10-05 17:02:34 -07001593void Heap::SetReferenceOffsets(MemberOffset reference_referent_offset,
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001594 MemberOffset reference_queue_offset,
1595 MemberOffset reference_queueNext_offset,
1596 MemberOffset reference_pendingNext_offset,
1597 MemberOffset finalizer_reference_zombie_offset) {
Elliott Hughesadb460d2011-10-05 17:02:34 -07001598 reference_referent_offset_ = reference_referent_offset;
1599 reference_queue_offset_ = reference_queue_offset;
1600 reference_queueNext_offset_ = reference_queueNext_offset;
1601 reference_pendingNext_offset_ = reference_pendingNext_offset;
1602 finalizer_reference_zombie_offset_ = finalizer_reference_zombie_offset;
1603 CHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1604 CHECK_NE(reference_queue_offset_.Uint32Value(), 0U);
1605 CHECK_NE(reference_queueNext_offset_.Uint32Value(), 0U);
1606 CHECK_NE(reference_pendingNext_offset_.Uint32Value(), 0U);
1607 CHECK_NE(finalizer_reference_zombie_offset_.Uint32Value(), 0U);
1608}
1609
1610Object* Heap::GetReferenceReferent(Object* reference) {
1611 DCHECK(reference != NULL);
1612 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1613 return reference->GetFieldObject<Object*>(reference_referent_offset_, true);
1614}
1615
1616void Heap::ClearReferenceReferent(Object* reference) {
1617 DCHECK(reference != NULL);
1618 DCHECK_NE(reference_referent_offset_.Uint32Value(), 0U);
1619 reference->SetFieldObject(reference_referent_offset_, NULL, true);
1620}
1621
1622// Returns true if the reference object has not yet been enqueued.
1623bool Heap::IsEnqueuable(const Object* ref) {
1624 DCHECK(ref != NULL);
1625 const Object* queue = ref->GetFieldObject<Object*>(reference_queue_offset_, false);
1626 const Object* queue_next = ref->GetFieldObject<Object*>(reference_queueNext_offset_, false);
1627 return (queue != NULL) && (queue_next == NULL);
1628}
1629
1630void Heap::EnqueueReference(Object* ref, Object** cleared_reference_list) {
1631 DCHECK(ref != NULL);
1632 CHECK(ref->GetFieldObject<Object*>(reference_queue_offset_, false) != NULL);
1633 CHECK(ref->GetFieldObject<Object*>(reference_queueNext_offset_, false) == NULL);
1634 EnqueuePendingReference(ref, cleared_reference_list);
1635}
1636
1637void Heap::EnqueuePendingReference(Object* ref, Object** list) {
1638 DCHECK(ref != NULL);
1639 DCHECK(list != NULL);
1640
1641 if (*list == NULL) {
1642 ref->SetFieldObject(reference_pendingNext_offset_, ref, false);
1643 *list = ref;
1644 } else {
1645 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1646 ref->SetFieldObject(reference_pendingNext_offset_, head, false);
1647 (*list)->SetFieldObject(reference_pendingNext_offset_, ref, false);
1648 }
1649}
1650
1651Object* Heap::DequeuePendingReference(Object** list) {
1652 DCHECK(list != NULL);
1653 DCHECK(*list != NULL);
1654 Object* head = (*list)->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1655 Object* ref;
1656 if (*list == head) {
1657 ref = *list;
1658 *list = NULL;
1659 } else {
1660 Object* next = head->GetFieldObject<Object*>(reference_pendingNext_offset_, false);
1661 (*list)->SetFieldObject(reference_pendingNext_offset_, next, false);
1662 ref = head;
1663 }
1664 ref->SetFieldObject(reference_pendingNext_offset_, NULL, false);
1665 return ref;
1666}
1667
Ian Rogers5d4bdc22011-11-02 22:15:43 -07001668void Heap::AddFinalizerReference(Thread* self, Object* object) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001669 ScopedObjectAccess soa(self);
Elliott Hughes77405792012-03-15 15:22:12 -07001670 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001671 args[0].SetL(object);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001672 soa.DecodeMethod(WellKnownClasses::java_lang_ref_FinalizerReference_add)->Invoke(self, NULL, args,
1673 NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001674}
1675
1676size_t Heap::GetBytesAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001677 return num_bytes_allocated_;
1678}
1679
1680size_t Heap::GetObjectsAllocated() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001681 return num_objects_allocated_;
1682}
1683
1684size_t Heap::GetConcurrentStartSize() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001685 return concurrent_start_size_;
1686}
1687
1688size_t Heap::GetConcurrentMinFree() const {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001689 return concurrent_min_free_;
Elliott Hughesadb460d2011-10-05 17:02:34 -07001690}
1691
1692void Heap::EnqueueClearedReferences(Object** cleared) {
1693 DCHECK(cleared != NULL);
1694 if (*cleared != NULL) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001695 ScopedObjectAccess soa(Thread::Current());
Elliott Hughes77405792012-03-15 15:22:12 -07001696 JValue args[1];
Elliott Hughesf24d3ce2012-04-11 17:43:37 -07001697 args[0].SetL(*cleared);
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001698 soa.DecodeMethod(WellKnownClasses::java_lang_ref_ReferenceQueue_add)->Invoke(soa.Self(), NULL,
1699 args, NULL);
Elliott Hughesadb460d2011-10-05 17:02:34 -07001700 *cleared = NULL;
1701 }
1702}
1703
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001704void Heap::RequestConcurrentGC() {
Mathieu Chartier069387a2012-06-18 12:01:01 -07001705 // Make sure that we can do a concurrent GC.
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001706 if (requesting_gc_ || !Runtime::Current()->IsFinishedStarting() ||
1707 Runtime::Current()->IsShuttingDown() || !Runtime::Current()->IsConcurrentGcEnabled()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001708 return;
1709 }
1710
1711 requesting_gc_ = true;
1712 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001713 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1714 DCHECK(WellKnownClasses::java_lang_Daemons_requestGC != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001715 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1716 WellKnownClasses::java_lang_Daemons_requestGC);
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001717 CHECK(!env->ExceptionCheck());
1718 requesting_gc_ = false;
1719}
1720
1721void Heap::ConcurrentGC() {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001722 if (Runtime::Current()->IsShuttingDown() || !concurrent_gc_) {
Mathieu Chartier2542d662012-06-21 17:14:11 -07001723 return;
1724 }
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001725
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001726 // TODO: We shouldn't need a WaitForConcurrentGcToComplete here since only
1727 // concurrent GC resumes threads before the GC is completed and this function
1728 // is only called within the GC daemon thread.
Mathieu Chartier866fb2a2012-09-10 10:47:49 -07001729 if (WaitForConcurrentGcToComplete() == kGcTypeNone) {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001730 // Start a concurrent GC as one wasn't in progress
1731 ScopedThreadStateChange tsc(Thread::Current(), kWaitingPerformingGc);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001732 if (alloc_space_->Size() > kMinAllocSpaceSizeForStickyGC) {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001733 CollectGarbageInternal(kGcTypeSticky, false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001734 } else {
Mathieu Chartier0325e622012-09-05 14:22:51 -07001735 CollectGarbageInternal(kGcTypePartial, false);
Mathieu Chartier357e9be2012-08-01 11:00:14 -07001736 }
Mathieu Chartiercc236d72012-07-20 10:29:05 -07001737 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001738}
1739
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001740void Heap::Trim() {
Mathieu Chartiera6399032012-06-11 18:49:50 -07001741 WaitForConcurrentGcToComplete();
Mathieu Chartierfd678be2012-08-30 14:50:54 -07001742 alloc_space_->Trim();
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001743}
1744
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001745void Heap::RequestHeapTrim() {
1746 // We don't have a good measure of how worthwhile a trim might be. We can't use the live bitmap
1747 // because that only marks object heads, so a large array looks like lots of empty space. We
1748 // don't just call dlmalloc all the time, because the cost of an _attempted_ trim is proportional
1749 // to utilization (which is probably inversely proportional to how much benefit we can expect).
1750 // We could try mincore(2) but that's only a measure of how many pages we haven't given away,
1751 // not how much use we're making of those pages.
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001752 uint64_t ms_time = NsToMs(NanoTime());
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001753 {
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001754 float utilization = static_cast<float>(num_bytes_allocated_) / alloc_space_->Size();
1755 if ((utilization > 0.75f) || ((ms_time - last_trim_time_) < 2 * 1000)) {
1756 // Don't bother trimming the heap if it's more than 75% utilized, or if a
1757 // heap trim occurred in the last two seconds.
1758 return;
1759 }
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001760 }
Mathieu Chartiera6399032012-06-11 18:49:50 -07001761 if (!Runtime::Current()->IsFinishedStarting() || Runtime::Current()->IsShuttingDown()) {
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001762 // Heap trimming isn't supported without a Java runtime or Daemons (such as at dex2oat time)
Mathieu Chartiera6399032012-06-11 18:49:50 -07001763 // Also: we do not wish to start a heap trim if the runtime is shutting down.
Ian Rogerse1d490c2012-02-03 09:09:07 -08001764 return;
1765 }
Mathieu Chartier7664f5c2012-06-08 18:15:32 -07001766 last_trim_time_ = ms_time;
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001767 JNIEnv* env = Thread::Current()->GetJniEnv();
Mathieu Chartiera6399032012-06-11 18:49:50 -07001768 DCHECK(WellKnownClasses::java_lang_Daemons != NULL);
1769 DCHECK(WellKnownClasses::java_lang_Daemons_requestHeapTrim != NULL);
Ian Rogers00f7d0e2012-07-19 15:28:27 -07001770 env->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons,
1771 WellKnownClasses::java_lang_Daemons_requestHeapTrim);
Elliott Hughes8cf5bc02012-02-02 16:32:16 -08001772 CHECK(!env->ExceptionCheck());
1773}
1774
Carl Shapiro69759ea2011-07-21 18:13:35 -07001775} // namespace art