Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1 | // Copyright 2011 Google Inc. All Rights Reserved. |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 3 | #include "heap.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 4 | |
| 5 | #include <vector> |
| 6 | |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 7 | #include "UniquePtr.h" |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 8 | #include "image.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 9 | #include "mark_sweep.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 10 | #include "object.h" |
| 11 | #include "space.h" |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 12 | #include "stl_util.h" |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 13 | #include "thread_list.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 14 | |
| 15 | namespace art { |
| 16 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 17 | std::vector<Space*> Heap::spaces_; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 18 | |
Brian Carlstrom | a663ea5 | 2011-08-19 23:33:41 -0700 | [diff] [blame] | 19 | Space* Heap::boot_space_ = NULL; |
| 20 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 21 | Space* Heap::alloc_space_ = NULL; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 22 | |
| 23 | size_t Heap::maximum_size_ = 0; |
| 24 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 25 | size_t Heap::num_bytes_allocated_ = 0; |
| 26 | |
| 27 | size_t Heap::num_objects_allocated_ = 0; |
| 28 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 29 | bool Heap::is_gc_running_ = false; |
| 30 | |
| 31 | HeapBitmap* Heap::mark_bitmap_ = NULL; |
| 32 | |
| 33 | HeapBitmap* Heap::live_bitmap_ = NULL; |
| 34 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 35 | MemberOffset Heap::reference_referent_offset_ = MemberOffset(0); |
| 36 | MemberOffset Heap::reference_queue_offset_ = MemberOffset(0); |
| 37 | MemberOffset Heap::reference_queueNext_offset_ = MemberOffset(0); |
| 38 | MemberOffset Heap::reference_pendingNext_offset_ = MemberOffset(0); |
| 39 | MemberOffset Heap::finalizer_reference_zombie_offset_ = MemberOffset(0); |
Brian Carlstrom | 1f87008 | 2011-08-23 16:02:11 -0700 | [diff] [blame] | 40 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 41 | Mutex* Heap::lock_ = NULL; |
| 42 | |
| 43 | class ScopedHeapLock { |
| 44 | public: |
| 45 | ScopedHeapLock() { |
| 46 | Heap::Lock(); |
| 47 | } |
| 48 | |
| 49 | ~ScopedHeapLock() { |
| 50 | Heap::Unlock(); |
| 51 | } |
| 52 | }; |
| 53 | |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 54 | void Heap::Init(size_t initial_size, size_t maximum_size, |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 55 | const char* boot_image_file_name, |
| 56 | std::vector<const char*>& image_file_names) { |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 57 | Space* boot_space; |
| 58 | byte* requested_base; |
| 59 | if (boot_image_file_name == NULL) { |
| 60 | boot_space = NULL; |
| 61 | requested_base = NULL; |
| 62 | } else { |
| 63 | boot_space = Space::Create(boot_image_file_name); |
| 64 | if (boot_space == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 65 | LOG(FATAL) << "Failed to create space from " << boot_image_file_name; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 66 | } |
| 67 | spaces_.push_back(boot_space); |
| 68 | requested_base = boot_space->GetBase() + RoundUp(boot_space->Size(), kPageSize); |
| 69 | } |
| 70 | |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 71 | std::vector<Space*> image_spaces; |
| 72 | for (size_t i = 0; i < image_file_names.size(); i++) { |
| 73 | Space* space = Space::Create(image_file_names[i]); |
| 74 | if (space == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 75 | LOG(FATAL) << "Failed to create space from " << image_file_names[i]; |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 76 | } |
| 77 | image_spaces.push_back(space); |
| 78 | spaces_.push_back(space); |
| 79 | requested_base = space->GetBase() + RoundUp(space->Size(), kPageSize); |
| 80 | } |
| 81 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 82 | Space* space = Space::Create(initial_size, maximum_size, requested_base); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 83 | if (space == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 84 | LOG(FATAL) << "Failed to create alloc space"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 85 | } |
| 86 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 87 | if (boot_space == NULL) { |
| 88 | boot_space = space; |
| 89 | } |
| 90 | byte* base = std::min(boot_space->GetBase(), space->GetBase()); |
| 91 | byte* limit = std::max(boot_space->GetLimit(), space->GetLimit()); |
| 92 | DCHECK_LT(base, limit); |
| 93 | size_t num_bytes = limit - base; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 94 | |
| 95 | // Allocate the initial live bitmap. |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 96 | UniquePtr<HeapBitmap> live_bitmap(HeapBitmap::Create(base, num_bytes)); |
| 97 | if (live_bitmap.get() == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 98 | LOG(FATAL) << "Failed to create live bitmap"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | // Allocate the initial mark bitmap. |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 102 | UniquePtr<HeapBitmap> mark_bitmap(HeapBitmap::Create(base, num_bytes)); |
| 103 | if (mark_bitmap.get() == NULL) { |
Elliott Hughes | be759c6 | 2011-09-08 19:38:21 -0700 | [diff] [blame] | 104 | LOG(FATAL) << "Failed to create mark bitmap"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 105 | } |
| 106 | |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 107 | alloc_space_ = space; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 108 | spaces_.push_back(space); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 109 | maximum_size_ = maximum_size; |
| 110 | live_bitmap_ = live_bitmap.release(); |
| 111 | mark_bitmap_ = mark_bitmap.release(); |
| 112 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 113 | num_bytes_allocated_ = 0; |
| 114 | num_objects_allocated_ = 0; |
| 115 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 116 | // TODO: allocate the card table |
| 117 | |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 118 | // Make objects in boot_space live (after live_bitmap_ is set) |
| 119 | if (boot_image_file_name != NULL) { |
Brian Carlstrom | a663ea5 | 2011-08-19 23:33:41 -0700 | [diff] [blame] | 120 | boot_space_ = boot_space; |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 121 | RecordImageAllocations(boot_space); |
| 122 | } |
Brian Carlstrom | 69b15fb | 2011-09-03 12:25:21 -0700 | [diff] [blame] | 123 | for (size_t i = 0; i < image_spaces.size(); i++) { |
| 124 | RecordImageAllocations(image_spaces[i]); |
| 125 | } |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 126 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 127 | // It's still to early to take a lock because there are no threads yet, |
| 128 | // but we can create the heap lock now. We don't create it earlier to |
| 129 | // make it clear that you can't use locks during heap initialization. |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 130 | lock_ = new Mutex("Heap lock"); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 131 | } |
| 132 | |
| 133 | void Heap::Destroy() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 134 | ScopedHeapLock lock; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 135 | STLDeleteElements(&spaces_); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 136 | if (mark_bitmap_ != NULL) { |
| 137 | delete mark_bitmap_; |
| 138 | mark_bitmap_ = NULL; |
| 139 | } |
| 140 | if (live_bitmap_ != NULL) { |
| 141 | delete live_bitmap_; |
| 142 | } |
| 143 | live_bitmap_ = NULL; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 144 | } |
| 145 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 146 | Object* Heap::AllocObject(Class* klass, size_t num_bytes) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 147 | ScopedHeapLock lock; |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 148 | DCHECK(klass == NULL |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 149 | || klass->GetDescriptor() == NULL |
Brian Carlstrom | 4873d46 | 2011-08-21 15:23:39 -0700 | [diff] [blame] | 150 | || (klass->IsClassClass() && num_bytes >= sizeof(Class)) |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 151 | || (klass->IsVariableSize() || klass->GetObjectSize() == num_bytes)); |
| 152 | DCHECK(num_bytes >= sizeof(Object)); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 153 | Object* obj = AllocateLocked(num_bytes); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 154 | if (obj != NULL) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 155 | obj->SetClass(klass); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 156 | } |
| 157 | return obj; |
| 158 | } |
| 159 | |
Elliott Hughes | cf4c6c4 | 2011-09-01 15:16:42 -0700 | [diff] [blame] | 160 | bool Heap::IsHeapAddress(const Object* obj) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 161 | // Note: we deliberately don't take the lock here, and mustn't test anything that would |
| 162 | // require taking the lock. |
Elliott Hughes | a250199 | 2011-08-26 19:39:54 -0700 | [diff] [blame] | 163 | if (!IsAligned(obj, kObjectAlignment)) { |
| 164 | return false; |
| 165 | } |
| 166 | // TODO |
| 167 | return true; |
| 168 | } |
| 169 | |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 170 | bool Heap::verify_object_disabled_; |
| 171 | |
Elliott Hughes | 3e465b1 | 2011-09-02 18:26:12 -0700 | [diff] [blame] | 172 | #if VERIFY_OBJECT_ENABLED |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 173 | void Heap::VerifyObject(const Object* obj) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 174 | ScopedHeapLock lock; |
| 175 | Heap::VerifyObjectLocked(obj); |
| 176 | } |
| 177 | #endif |
| 178 | |
| 179 | void Heap::VerifyObjectLocked(const Object* obj) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 180 | lock_->AssertHeld(); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 181 | if (obj != NULL && !verify_object_disabled_) { |
| 182 | if (!IsAligned(obj, kObjectAlignment)) { |
| 183 | LOG(FATAL) << "Object isn't aligned: " << obj; |
| 184 | } else if (!live_bitmap_->Test(obj)) { |
| 185 | // TODO: we don't hold a lock here as it is assumed the live bit map |
| 186 | // isn't changing if the mutator is running. |
| 187 | LOG(FATAL) << "Object is dead: " << obj; |
| 188 | } |
| 189 | // Ignore early dawn of the universe verifications |
Brian Carlstrom | dbc0525 | 2011-09-09 01:59:59 -0700 | [diff] [blame] | 190 | if (num_objects_allocated_ > 10) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 191 | const byte* raw_addr = reinterpret_cast<const byte*>(obj) + |
| 192 | Object::ClassOffset().Int32Value(); |
| 193 | const Class* c = *reinterpret_cast<Class* const *>(raw_addr); |
| 194 | if (c == NULL) { |
| 195 | LOG(FATAL) << "Null class" << " in object: " << obj; |
| 196 | } else if (!IsAligned(c, kObjectAlignment)) { |
| 197 | LOG(FATAL) << "Class isn't aligned: " << c << " in object: " << obj; |
| 198 | } else if (!live_bitmap_->Test(c)) { |
| 199 | LOG(FATAL) << "Class of object is dead: " << c << " in object: " << obj; |
| 200 | } |
| 201 | // Check obj.getClass().getClass() == obj.getClass().getClass().getClass() |
| 202 | // NB we don't use the accessors here as they have internal sanity checks |
| 203 | // that we don't want to run |
| 204 | raw_addr = reinterpret_cast<const byte*>(c) + |
| 205 | Object::ClassOffset().Int32Value(); |
| 206 | const Class* c_c = *reinterpret_cast<Class* const *>(raw_addr); |
| 207 | raw_addr = reinterpret_cast<const byte*>(c_c) + |
| 208 | Object::ClassOffset().Int32Value(); |
| 209 | const Class* c_c_c = *reinterpret_cast<Class* const *>(raw_addr); |
| 210 | CHECK_EQ(c_c, c_c_c); |
| 211 | } |
| 212 | } |
| 213 | } |
| 214 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 215 | void Heap::VerificationCallback(Object* obj, void *arg) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 216 | DCHECK(obj != NULL); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 217 | Heap::VerifyObjectLocked(obj); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | void Heap::VerifyHeap() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 221 | ScopedHeapLock lock; |
| 222 | live_bitmap_->Walk(Heap::VerificationCallback, NULL); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 223 | } |
| 224 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 225 | void Heap::RecordAllocationLocked(Space* space, const Object* obj) { |
| 226 | #ifndef NDEBUG |
| 227 | if (Runtime::Current()->IsStarted()) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 228 | lock_->AssertHeld(); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 229 | } |
| 230 | #endif |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 231 | size_t size = space->AllocationSize(obj); |
| 232 | DCHECK_NE(size, 0u); |
| 233 | num_bytes_allocated_ += size; |
| 234 | num_objects_allocated_ += 1; |
| 235 | live_bitmap_->Set(obj); |
| 236 | } |
| 237 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 238 | void Heap::RecordFreeLocked(Space* space, const Object* obj) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 239 | lock_->AssertHeld(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 240 | size_t size = space->AllocationSize(obj); |
| 241 | DCHECK_NE(size, 0u); |
| 242 | if (size < num_bytes_allocated_) { |
| 243 | num_bytes_allocated_ -= size; |
| 244 | } else { |
| 245 | num_bytes_allocated_ = 0; |
| 246 | } |
| 247 | live_bitmap_->Clear(obj); |
| 248 | if (num_objects_allocated_ > 0) { |
| 249 | num_objects_allocated_ -= 1; |
| 250 | } |
| 251 | } |
| 252 | |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 253 | void Heap::RecordImageAllocations(Space* space) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 254 | DCHECK(!Runtime::Current()->IsStarted()); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 255 | CHECK(space != NULL); |
| 256 | CHECK(live_bitmap_ != NULL); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 257 | byte* current = space->GetBase() + RoundUp(sizeof(ImageHeader), kObjectAlignment); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 258 | while (current < space->GetLimit()) { |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 259 | DCHECK(IsAligned(current, kObjectAlignment)); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 260 | const Object* obj = reinterpret_cast<const Object*>(current); |
| 261 | live_bitmap_->Set(obj); |
Ian Rogers | 0cfe1fb | 2011-08-26 03:29:44 -0700 | [diff] [blame] | 262 | current += RoundUp(obj->SizeOf(), kObjectAlignment); |
Brian Carlstrom | 9cff8e1 | 2011-08-18 16:47:29 -0700 | [diff] [blame] | 263 | } |
| 264 | } |
| 265 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 266 | Object* Heap::AllocateLocked(size_t size) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 267 | lock_->AssertHeld(); |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 268 | DCHECK(alloc_space_ != NULL); |
| 269 | Space* space = alloc_space_; |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 270 | Object* obj = AllocateLocked(space, size); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 271 | if (obj != NULL) { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 272 | RecordAllocationLocked(space, obj); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 273 | } |
| 274 | return obj; |
| 275 | } |
| 276 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 277 | Object* Heap::AllocateLocked(Space* space, size_t size) { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 278 | lock_->AssertHeld(); |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 279 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 280 | // Fail impossible allocations. TODO: collect soft references. |
| 281 | if (size > maximum_size_) { |
| 282 | return NULL; |
| 283 | } |
| 284 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 285 | Object* ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 286 | if (ptr != NULL) { |
| 287 | return ptr; |
| 288 | } |
| 289 | |
| 290 | // The allocation failed. If the GC is running, block until it |
| 291 | // completes and retry. |
| 292 | if (is_gc_running_) { |
| 293 | // The GC is concurrently tracing the heap. Release the heap |
| 294 | // lock, wait for the GC to complete, and retrying allocating. |
| 295 | WaitForConcurrentGcToComplete(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 296 | ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 297 | if (ptr != NULL) { |
| 298 | return ptr; |
| 299 | } |
| 300 | } |
| 301 | |
| 302 | // Another failure. Our thread was starved or there may be too many |
| 303 | // live objects. Try a foreground GC. This will have no effect if |
| 304 | // the concurrent GC is already running. |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 305 | CollectGarbageInternal(); |
| 306 | ptr = space->AllocWithoutGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 307 | if (ptr != NULL) { |
| 308 | return ptr; |
| 309 | } |
| 310 | |
| 311 | // Even that didn't work; this is an exceptional state. |
| 312 | // Try harder, growing the heap if necessary. |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 313 | ptr = space->AllocWithGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 314 | if (ptr != NULL) { |
| 315 | //size_t new_footprint = dvmHeapSourceGetIdealFootprint(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 316 | size_t new_footprint = space->MaxAllowedFootprint(); |
| 317 | // TODO: may want to grow a little bit more so that the amount of |
| 318 | // free space is equal to the old free space + the |
| 319 | // utilization slop for the new allocation. |
| 320 | LOG(INFO) << "Grow heap (frag case) to " << new_footprint / MB |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 321 | << "for " << size << "-byte allocation"; |
| 322 | return ptr; |
| 323 | } |
| 324 | |
| 325 | // Most allocations should have succeeded by now, so the heap is |
| 326 | // really full, really fragmented, or the requested size is really |
| 327 | // big. Do another GC, collecting SoftReferences this time. The VM |
| 328 | // spec requires that all SoftReferences have been collected and |
| 329 | // cleared before throwing an OOME. |
| 330 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 331 | // TODO: wait for the finalizers from the previous GC to finish |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 332 | LOG(INFO) << "Forcing collection of SoftReferences for " |
| 333 | << size << "-byte allocation"; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 334 | CollectGarbageInternal(); |
| 335 | ptr = space->AllocWithGrowth(size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 336 | if (ptr != NULL) { |
| 337 | return ptr; |
| 338 | } |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 339 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 340 | LOG(ERROR) << "Out of memory on a " << size << " byte allocation"; |
| 341 | |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 342 | // TODO: tell the HeapSource to dump its state |
| 343 | // TODO: dump stack traces for all threads |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 344 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 345 | return NULL; |
| 346 | } |
| 347 | |
Elliott Hughes | bf86d04 | 2011-08-31 17:53:14 -0700 | [diff] [blame] | 348 | int64_t Heap::GetMaxMemory() { |
| 349 | UNIMPLEMENTED(WARNING); |
| 350 | return 0; |
| 351 | } |
| 352 | |
| 353 | int64_t Heap::GetTotalMemory() { |
| 354 | UNIMPLEMENTED(WARNING); |
| 355 | return 0; |
| 356 | } |
| 357 | |
| 358 | int64_t Heap::GetFreeMemory() { |
| 359 | UNIMPLEMENTED(WARNING); |
| 360 | return 0; |
| 361 | } |
| 362 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 363 | void Heap::CollectGarbage() { |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 364 | ScopedHeapLock lock; |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 365 | CollectGarbageInternal(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | void Heap::CollectGarbageInternal() { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 369 | lock_->AssertHeld(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 370 | |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 371 | ThreadList* thread_list = Runtime::Current()->GetThreadList(); |
| 372 | thread_list->SuspendAll(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 373 | { |
| 374 | MarkSweep mark_sweep; |
| 375 | |
| 376 | mark_sweep.Init(); |
| 377 | |
| 378 | mark_sweep.MarkRoots(); |
| 379 | |
| 380 | // Push marked roots onto the mark stack |
| 381 | |
| 382 | // TODO: if concurrent |
| 383 | // unlock heap |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 384 | // thread_list->ResumeAll(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 385 | |
| 386 | mark_sweep.RecursiveMark(); |
| 387 | |
| 388 | // TODO: if concurrent |
| 389 | // lock heap |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 390 | // thread_list->SuspendAll(); |
Carl Shapiro | 58551df | 2011-07-24 03:09:51 -0700 | [diff] [blame] | 391 | // re-mark root set |
| 392 | // scan dirty objects |
| 393 | |
| 394 | mark_sweep.ProcessReferences(false); |
| 395 | |
| 396 | // TODO: swap bitmaps |
| 397 | |
| 398 | mark_sweep.Sweep(); |
| 399 | } |
| 400 | |
| 401 | GrowForUtilization(); |
Elliott Hughes | 8d768a9 | 2011-09-14 16:35:25 -0700 | [diff] [blame] | 402 | thread_list->ResumeAll(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 403 | } |
| 404 | |
| 405 | void Heap::WaitForConcurrentGcToComplete() { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 406 | lock_->AssertHeld(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 407 | } |
| 408 | |
| 409 | // Given the current contents of the active heap, increase the allowed |
| 410 | // heap footprint to match the target utilization ratio. This should |
| 411 | // only be called immediately after a full garbage collection. |
| 412 | void Heap::GrowForUtilization() { |
Elliott Hughes | 8daa092 | 2011-09-11 13:46:25 -0700 | [diff] [blame] | 413 | lock_->AssertHeld(); |
Elliott Hughes | 53b6131 | 2011-08-12 18:28:20 -0700 | [diff] [blame] | 414 | UNIMPLEMENTED(ERROR); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 415 | } |
| 416 | |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 417 | void Heap::Lock() { |
Elliott Hughes | 93e74e8 | 2011-09-13 11:07:03 -0700 | [diff] [blame] | 418 | // TODO: grab the lock, but put ourselves into Thread::kVmWait if it looks like |
Elliott Hughes | 92b3b56 | 2011-09-08 16:32:26 -0700 | [diff] [blame] | 419 | // we're going to have to wait on the mutex. |
| 420 | lock_->Lock(); |
| 421 | } |
| 422 | |
| 423 | void Heap::Unlock() { |
| 424 | lock_->Unlock(); |
| 425 | } |
| 426 | |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 427 | } // namespace art |