blob: 60f566c8b03e40997f15b6608577608e33aae1fe [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070016
Ian Rogers1d54e732013-05-02 21:10:01 -070017#include "dlmalloc_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070018
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -070019#include "dlmalloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070020#include "gc/accounting/card_table.h"
21#include "gc/heap.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070022#include "mirror/class-inl.h"
Mathieu Chartier0f72e412013-09-06 16:40:01 -070023#include "mirror/object-inl.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080024#include "runtime.h"
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080025#include "thread.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026#include "thread_list.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070027#include "utils.h"
Ian Rogers6fac4472014-02-25 17:01:10 -080028#include "valgrind_malloc_space-inl.h"
Ian Rogers1d54e732013-05-02 21:10:01 -070029
Carl Shapiro69759ea2011-07-21 18:13:35 -070030namespace art {
Ian Rogers1d54e732013-05-02 21:10:01 -070031namespace gc {
32namespace space {
Ian Rogers30fab402012-01-23 15:43:46 -080033
Ian Rogers6fac4472014-02-25 17:01:10 -080034static constexpr bool kPrefetchDuringDlMallocFreeList = true;
35
36template class ValgrindMallocSpace<DlMallocSpace, void*>;
Mathieu Chartier2fde5332012-09-14 14:51:54 -070037
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -070038DlMallocSpace::DlMallocSpace(const std::string& name, MemMap* mem_map, void* mspace, byte* begin,
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070039 byte* end, byte* limit, size_t growth_limit)
40 : MallocSpace(name, mem_map, begin, end, limit, growth_limit),
Hiroshi Yamauchie48780b2013-12-17 17:19:53 -080041 mspace_(mspace), mspace_for_alloc_(mspace) {
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070042 CHECK(mspace != NULL);
Mathieu Chartierb062fdd2012-07-03 09:51:48 -070043}
44
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080045DlMallocSpace* DlMallocSpace::CreateFromMemMap(MemMap* mem_map, const std::string& name,
Mathieu Chartier661974a2014-01-09 11:23:53 -080046 size_t starting_size, size_t initial_size,
47 size_t growth_limit, size_t capacity) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080048 DCHECK(mem_map != nullptr);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070049 void* mspace = CreateMspace(mem_map->Begin(), starting_size, initial_size);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080050 if (mspace == nullptr) {
Ian Rogers30fab402012-01-23 15:43:46 -080051 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080052 return nullptr;
Ian Rogers30fab402012-01-23 15:43:46 -080053 }
54
lzang1385de732014-02-21 14:15:01 +080055 // Protect memory beyond the starting size. morecore will add r/w permissions when necessory
Ian Rogers3bb17a62012-01-27 23:56:44 -080056 byte* end = mem_map->Begin() + starting_size;
lzang1385de732014-02-21 14:15:01 +080057 if (capacity - starting_size > 0) {
58 CHECK_MEMORY_CALL(mprotect, (end, capacity - starting_size, PROT_NONE), name);
Ian Rogers30fab402012-01-23 15:43:46 -080059 }
60
61 // Everything is set so record in immutable structure and leave
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070062 byte* begin = mem_map->Begin();
Mathieu Chartierda44d772014-04-01 15:01:46 -070063 if (Runtime::Current()->RunningOnValgrind()) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080064 return new ValgrindMallocSpace<DlMallocSpace, void*>(
Hiroshi Yamauchi7cb7bbc2013-11-18 17:27:37 -080065 name, mem_map, mspace, begin, end, begin + capacity, growth_limit, initial_size);
Ian Rogers1d54e732013-05-02 21:10:01 -070066 } else {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080067 return new DlMallocSpace(name, mem_map, mspace, begin, end, begin + capacity, growth_limit);
Ian Rogers1d54e732013-05-02 21:10:01 -070068 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080069}
70
71DlMallocSpace* DlMallocSpace::Create(const std::string& name, size_t initial_size, size_t growth_limit,
72 size_t capacity, byte* requested_begin) {
73 uint64_t start_time = 0;
74 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
75 start_time = NanoTime();
76 LOG(INFO) << "DlMallocSpace::Create entering " << name
77 << " initial_size=" << PrettySize(initial_size)
78 << " growth_limit=" << PrettySize(growth_limit)
79 << " capacity=" << PrettySize(capacity)
80 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
81 }
82
83 // Memory we promise to dlmalloc before it asks for morecore.
84 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
85 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
86 // size of the large allocation) will be greater than the footprint limit.
87 size_t starting_size = kPageSize;
88 MemMap* mem_map = CreateMemMap(name, starting_size, &initial_size, &growth_limit, &capacity,
89 requested_begin);
90 if (mem_map == nullptr) {
91 LOG(ERROR) << "Failed to create mem map for alloc space (" << name << ") of size "
92 << PrettySize(capacity);
93 return nullptr;
94 }
95 DlMallocSpace* space = CreateFromMemMap(mem_map, name, starting_size, initial_size,
96 growth_limit, capacity);
Mathieu Chartier590fee92013-09-13 13:46:47 -070097 // We start out with only the initial size possibly containing objects.
Ian Rogers30fab402012-01-23 15:43:46 -080098 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070099 LOG(INFO) << "DlMallocSpace::Create exiting (" << PrettyDuration(NanoTime() - start_time)
Ian Rogers3bb17a62012-01-27 23:56:44 -0800100 << " ) " << *space;
Ian Rogers30fab402012-01-23 15:43:46 -0800101 }
102 return space;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700103}
104
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700105void* DlMallocSpace::CreateMspace(void* begin, size_t morecore_start, size_t initial_size) {
Ian Rogers30fab402012-01-23 15:43:46 -0800106 // clear errno to allow PLOG on error
Carl Shapiro69759ea2011-07-21 18:13:35 -0700107 errno = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800108 // create mspace using our backing storage starting at begin and with a footprint of
109 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
110 // morecore_start bytes of memory is exhaused morecore will be called.
111 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800112 if (msp != nullptr) {
Ian Rogers30fab402012-01-23 15:43:46 -0800113 // Do not allow morecore requests to succeed beyond the initial size of the heap
Ian Rogers3bb17a62012-01-27 23:56:44 -0800114 mspace_set_footprint_limit(msp, initial_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700115 } else {
Ian Rogers30fab402012-01-23 15:43:46 -0800116 PLOG(ERROR) << "create_mspace_with_base failed";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700117 }
118 return msp;
119}
120
Ian Rogers6fac4472014-02-25 17:01:10 -0800121mirror::Object* DlMallocSpace::AllocWithGrowth(Thread* self, size_t num_bytes,
122 size_t* bytes_allocated, size_t* usable_size) {
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700123 mirror::Object* result;
124 {
125 MutexLock mu(self, lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700126 // Grow as much as possible within the space.
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700127 size_t max_allowed = Capacity();
128 mspace_set_footprint_limit(mspace_, max_allowed);
129 // Try the allocation.
Ian Rogers6fac4472014-02-25 17:01:10 -0800130 result = AllocWithoutGrowthLocked(self, num_bytes, bytes_allocated, usable_size);
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700131 // Shrink back down as small as possible.
132 size_t footprint = mspace_footprint(mspace_);
133 mspace_set_footprint_limit(mspace_, footprint);
jeffhaoc1160702011-10-27 15:48:45 -0700134 }
Mathieu Chartier661974a2014-01-09 11:23:53 -0800135 if (result != nullptr) {
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700136 // Zero freshly allocated memory, done while not holding the space's lock.
137 memset(result, 0, num_bytes);
Mathieu Chartier661974a2014-01-09 11:23:53 -0800138 // Check that the result is contained in the space.
139 CHECK(!kDebugSpaces || Contains(result));
Hiroshi Yamauchi50b29282013-07-30 13:58:37 -0700140 }
Ian Rogers30fab402012-01-23 15:43:46 -0800141 return result;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700142}
143
Ian Rogers6fac4472014-02-25 17:01:10 -0800144MallocSpace* DlMallocSpace::CreateInstance(const std::string& name, MemMap* mem_map,
145 void* allocator, byte* begin, byte* end,
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700146 byte* limit, size_t growth_limit) {
147 return new DlMallocSpace(name, mem_map, allocator, begin, end, limit, growth_limit);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700148}
149
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800150size_t DlMallocSpace::Free(Thread* self, mirror::Object* ptr) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700151 MutexLock mu(self, lock_);
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700152 if (kDebugSpaces) {
Mathieu Chartier661974a2014-01-09 11:23:53 -0800153 CHECK(ptr != nullptr);
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700154 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
155 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800156 const size_t bytes_freed = AllocationSizeNonvirtual(ptr, nullptr);
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700157 if (kRecentFreeCount > 0) {
158 RegisterRecentFree(ptr);
159 }
Ian Rogers30fab402012-01-23 15:43:46 -0800160 mspace_free(mspace_, ptr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700161 return bytes_freed;
Ian Rogers30fab402012-01-23 15:43:46 -0800162}
163
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800164size_t DlMallocSpace::FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) {
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800165 DCHECK(ptrs != NULL);
166
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700167 // Don't need the lock to calculate the size of the freed pointers.
168 size_t bytes_freed = 0;
169 for (size_t i = 0; i < num_ptrs; i++) {
Ian Rogers2dd0e2c2013-01-24 12:42:14 -0800170 mirror::Object* ptr = ptrs[i];
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800171 const size_t look_ahead = 8;
172 if (kPrefetchDuringDlMallocFreeList && i + look_ahead < num_ptrs) {
173 // The head of chunk for the allocation is sizeof(size_t) behind the allocation.
174 __builtin_prefetch(reinterpret_cast<char*>(ptrs[i + look_ahead]) - sizeof(size_t));
175 }
Ian Rogers6fac4472014-02-25 17:01:10 -0800176 bytes_freed += AllocationSizeNonvirtual(ptr, nullptr);
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700177 }
178
Mathieu Chartier0f72e412013-09-06 16:40:01 -0700179 if (kRecentFreeCount > 0) {
180 MutexLock mu(self, lock_);
181 for (size_t i = 0; i < num_ptrs; i++) {
182 RegisterRecentFree(ptrs[i]);
183 }
184 }
185
Mathieu Chartier8e9a1492012-10-04 12:25:40 -0700186 if (kDebugSpaces) {
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700187 size_t num_broken_ptrs = 0;
188 for (size_t i = 0; i < num_ptrs; i++) {
189 if (!Contains(ptrs[i])) {
190 num_broken_ptrs++;
191 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
192 } else {
193 size_t size = mspace_usable_size(ptrs[i]);
194 memset(ptrs[i], 0xEF, size);
195 }
Ian Rogers30fab402012-01-23 15:43:46 -0800196 }
Mathieu Chartier2fde5332012-09-14 14:51:54 -0700197 CHECK_EQ(num_broken_ptrs, 0u);
Ian Rogers30fab402012-01-23 15:43:46 -0800198 }
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800199
200 {
201 MutexLock mu(self, lock_);
Mathieu Chartierd22d5482012-11-06 17:14:12 -0800202 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
203 return bytes_freed;
204 }
Ian Rogers30fab402012-01-23 15:43:46 -0800205}
206
207// Callback from dlmalloc when it needs to increase the footprint
208extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800209 Heap* heap = Runtime::Current()->GetHeap();
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800210 DlMallocSpace* dlmalloc_space = heap->GetDlMallocSpace();
211 // Support for multiple DlMalloc provided by a slow path.
212 if (UNLIKELY(dlmalloc_space == nullptr || dlmalloc_space->GetMspace() != mspace)) {
213 dlmalloc_space = nullptr;
214 for (space::ContinuousSpace* space : heap->GetContinuousSpaces()) {
215 if (space->IsDlMallocSpace()) {
216 DlMallocSpace* cur_dlmalloc_space = space->AsDlMallocSpace();
217 if (cur_dlmalloc_space->GetMspace() == mspace) {
218 dlmalloc_space = cur_dlmalloc_space;
219 break;
220 }
221 }
222 }
223 CHECK(dlmalloc_space != nullptr) << "Couldn't find DlmMallocSpace with mspace=" << mspace;
224 }
225 return dlmalloc_space->MoreCore(increment);
Ian Rogers30fab402012-01-23 15:43:46 -0800226}
227
Ian Rogers48931882013-01-22 14:35:16 -0800228size_t DlMallocSpace::Trim() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700229 MutexLock mu(Thread::Current(), lock_);
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700230 // Trim to release memory at the end of the space.
231 mspace_trim(mspace_, 0);
232 // Visit space looking for page-sized holes to advise the kernel we don't need.
Ian Rogers48931882013-01-22 14:35:16 -0800233 size_t reclaimed = 0;
Ian Rogers1d54e732013-05-02 21:10:01 -0700234 mspace_inspect_all(mspace_, DlmallocMadviseCallback, &reclaimed);
Ian Rogers48931882013-01-22 14:35:16 -0800235 return reclaimed;
Elliott Hughes9eebd3b2012-06-08 13:56:31 -0700236}
Ian Rogers30fab402012-01-23 15:43:46 -0800237
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700238void DlMallocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
Ian Rogers30fab402012-01-23 15:43:46 -0800239 void* arg) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700240 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800241 mspace_inspect_all(mspace_, callback, arg);
Ian Rogers15bf2d32012-08-28 17:33:04 -0700242 callback(NULL, NULL, 0, arg); // Indicate end of a space.
Ian Rogers30fab402012-01-23 15:43:46 -0800243}
244
Hiroshi Yamauchi09b07a92013-07-15 13:17:06 -0700245size_t DlMallocSpace::GetFootprint() {
246 MutexLock mu(Thread::Current(), lock_);
247 return mspace_footprint(mspace_);
248}
249
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700250size_t DlMallocSpace::GetFootprintLimit() {
Ian Rogers50b35e22012-10-04 10:09:15 -0700251 MutexLock mu(Thread::Current(), lock_);
Ian Rogers30fab402012-01-23 15:43:46 -0800252 return mspace_footprint_limit(mspace_);
253}
254
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700255void DlMallocSpace::SetFootprintLimit(size_t new_size) {
Ian Rogers50b35e22012-10-04 10:09:15 -0700256 MutexLock mu(Thread::Current(), lock_);
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700257 VLOG(heap) << "DlMallocSpace::SetFootprintLimit " << PrettySize(new_size);
Ian Rogers30fab402012-01-23 15:43:46 -0800258 // Compare against the actual footprint, rather than the Size(), because the heap may not have
259 // grown all the way to the allowed size yet.
Ian Rogers30fab402012-01-23 15:43:46 -0800260 size_t current_space_size = mspace_footprint(mspace_);
261 if (new_size < current_space_size) {
262 // Don't let the space grow any more.
263 new_size = current_space_size;
264 }
265 mspace_set_footprint_limit(mspace_, new_size);
266}
267
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700268uint64_t DlMallocSpace::GetBytesAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800269 MutexLock mu(Thread::Current(), lock_);
270 size_t bytes_allocated = 0;
271 mspace_inspect_all(mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
272 return bytes_allocated;
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700273}
274
275uint64_t DlMallocSpace::GetObjectsAllocated() {
Hiroshi Yamauchi4ce1f002013-11-18 14:49:09 -0800276 MutexLock mu(Thread::Current(), lock_);
277 size_t objects_allocated = 0;
278 mspace_inspect_all(mspace_, DlmallocObjectsAllocatedCallback, &objects_allocated);
279 return objects_allocated;
Hiroshi Yamauchibe031ff2013-10-08 16:42:37 -0700280}
281
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800282void DlMallocSpace::Clear() {
283 madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED);
284 GetLiveBitmap()->Clear();
285 GetMarkBitmap()->Clear();
286}
287
Mathieu Chartier15d34022014-02-26 17:16:38 -0800288void DlMallocSpace::Reset() {
289 // TODO: Delete and create new mspace here.
290}
291
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -0700292#ifndef NDEBUG
293void DlMallocSpace::CheckMoreCoreForPrecondition() {
294 lock_.AssertHeld(Thread::Current());
295}
296#endif
297
Ian Rogers1d54e732013-05-02 21:10:01 -0700298} // namespace space
299} // namespace gc
Carl Shapiro69759ea2011-07-21 18:13:35 -0700300} // namespace art