blob: 6ecca145ae5a76fc5b25118654160e6d5dad6182 [file] [log] [blame]
Carl Shapiro69759ea2011-07-21 18:13:35 -07001// Copyright 2011 Google Inc. All Rights Reserved.
Carl Shapiro69759ea2011-07-21 18:13:35 -07002
Brian Carlstrom578bbdc2011-07-21 14:07:47 -07003#include "space.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -07004
5#include <sys/mman.h>
6
Elliott Hughes90a33692011-08-30 13:27:07 -07007#include "UniquePtr.h"
Ian Rogers30fab402012-01-23 15:43:46 -08008#include "dlmalloc.h"
Brian Carlstrom4a289ed2011-08-16 17:17:49 -07009#include "file.h"
10#include "image.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070011#include "logging.h"
Brian Carlstrom4a289ed2011-08-16 17:17:49 -070012#include "os.h"
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070013#include "utils.h"
Carl Shapiro69759ea2011-07-21 18:13:35 -070014
15namespace art {
16
Ian Rogers30fab402012-01-23 15:43:46 -080017#ifndef NDEBUG
18#define DEBUG_SPACES 1
19#endif
20
21#define CHECK_MEMORY_CALL(call, args, what) \
22 do { \
23 int rc = call args; \
24 if (UNLIKELY(rc != 0)) { \
25 errno = rc; \
26 PLOG(FATAL) << # call << " failed for " << what; \
27 } \
28 } while (false)
29
30AllocSpace* Space::CreateAllocSpace(const std::string& name, size_t initial_size,
31 size_t growth_limit, size_t capacity,
32 byte* requested_begin) {
Ian Rogers3bb17a62012-01-27 23:56:44 -080033 // Memory we promise to dlmalloc before it asks for morecore.
34 // Note: making this value large means that large allocations are unlikely to succeed as dlmalloc
35 // will ask for this memory from sys_alloc which will fail as the footprint (this value plus the
36 // size of the large allocation) will be greater than the footprint limit.
37 size_t starting_size = kPageSize;
Ian Rogers30fab402012-01-23 15:43:46 -080038 uint64_t start_time = 0;
39 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
40 start_time = NanoTime();
41 VLOG(startup) << "Space::CreateAllocSpace entering " << name
Ian Rogers3bb17a62012-01-27 23:56:44 -080042 << " initial_size=" << PrettySize(initial_size)
43 << " growth_limit=" << PrettySize(growth_limit)
44 << " capacity=" << PrettySize(capacity)
Ian Rogers30fab402012-01-23 15:43:46 -080045 << " requested_begin=" << reinterpret_cast<void*>(requested_begin);
Carl Shapiro69759ea2011-07-21 18:13:35 -070046 }
Ian Rogers30fab402012-01-23 15:43:46 -080047
48 // Sanity check arguments
Ian Rogers3bb17a62012-01-27 23:56:44 -080049 if (starting_size > initial_size) {
50 initial_size = starting_size;
51 }
Ian Rogers30fab402012-01-23 15:43:46 -080052 if (initial_size > growth_limit) {
53 LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size ("
Ian Rogers3bb17a62012-01-27 23:56:44 -080054 << PrettySize(initial_size) << ") is larger than its capacity ("
55 << PrettySize(growth_limit) << ")";
Ian Rogers30fab402012-01-23 15:43:46 -080056 return NULL;
57 }
58 if (growth_limit > capacity) {
Ian Rogers3bb17a62012-01-27 23:56:44 -080059 LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity ("
60 << PrettySize(growth_limit) << ") is larger than the capacity ("
61 << PrettySize(capacity) << ")";
Ian Rogers30fab402012-01-23 15:43:46 -080062 return NULL;
63 }
64
65 // Page align growth limit and capacity which will be used to manage mmapped storage
66 growth_limit = RoundUp(growth_limit, kPageSize);
67 capacity = RoundUp(capacity, kPageSize);
68
69 UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin,
70 capacity, PROT_READ | PROT_WRITE));
71 if (mem_map.get() == NULL) {
72 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
Ian Rogers3bb17a62012-01-27 23:56:44 -080073 << PrettySize(capacity);
Ian Rogers30fab402012-01-23 15:43:46 -080074 return NULL;
75 }
76
Ian Rogers3bb17a62012-01-27 23:56:44 -080077 void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), starting_size, initial_size);
Ian Rogers30fab402012-01-23 15:43:46 -080078 if (mspace == NULL) {
79 LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")";
80 return NULL;
81 }
82
Ian Rogers3bb17a62012-01-27 23:56:44 -080083 // Protect memory beyond the initial size.
84 byte* end = mem_map->Begin() + starting_size;
Ian Rogers30fab402012-01-23 15:43:46 -080085 if (capacity - initial_size > 0) {
86 CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name);
87 }
88
89 // Everything is set so record in immutable structure and leave
90 AllocSpace* space = new AllocSpace(name, mem_map.release(), mspace, end, growth_limit);
91 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Ian Rogers3bb17a62012-01-27 23:56:44 -080092 LOG(INFO) << "Space::CreateAllocSpace exiting (" << PrettyDuration(NanoTime() - start_time)
93 << " ) " << *space;
Ian Rogers30fab402012-01-23 15:43:46 -080094 }
95 return space;
Carl Shapiro69759ea2011-07-21 18:13:35 -070096}
97
Ian Rogers3bb17a62012-01-27 23:56:44 -080098void* AllocSpace::CreateMallocSpace(void* begin, size_t morecore_start, size_t initial_size) {
Ian Rogers30fab402012-01-23 15:43:46 -080099 // clear errno to allow PLOG on error
Carl Shapiro69759ea2011-07-21 18:13:35 -0700100 errno = 0;
Ian Rogers3bb17a62012-01-27 23:56:44 -0800101 // create mspace using our backing storage starting at begin and with a footprint of
102 // morecore_start. Don't use an internal dlmalloc lock (as we already hold heap lock). When
103 // morecore_start bytes of memory is exhaused morecore will be called.
104 void* msp = create_mspace_with_base(begin, morecore_start, false /*locked*/);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700105 if (msp != NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800106 // Do not allow morecore requests to succeed beyond the initial size of the heap
Ian Rogers3bb17a62012-01-27 23:56:44 -0800107 mspace_set_footprint_limit(msp, initial_size);
Carl Shapiro69759ea2011-07-21 18:13:35 -0700108 } else {
Ian Rogers30fab402012-01-23 15:43:46 -0800109 PLOG(ERROR) << "create_mspace_with_base failed";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700110 }
111 return msp;
112}
113
Ian Rogers30fab402012-01-23 15:43:46 -0800114Object* AllocSpace::AllocWithoutGrowth(size_t num_bytes) {
115 Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
116#if DEBUG_SPACES
117 if (result != NULL) {
118 CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result)
119 << ") not in bounds of heap " << *this;
jeffhaoc1160702011-10-27 15:48:45 -0700120 }
Ian Rogers30fab402012-01-23 15:43:46 -0800121#endif
122 return result;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700123}
124
Ian Rogers30fab402012-01-23 15:43:46 -0800125Object* AllocSpace::AllocWithGrowth(size_t num_bytes) {
126 // Grow as much as possible within the mspace.
127 size_t max_allowed = Capacity();
128 mspace_set_footprint_limit(mspace_, max_allowed);
129 // Try the allocation.
130 void* ptr = AllocWithoutGrowth(num_bytes);
131 // Shrink back down as small as possible.
132 size_t footprint = mspace_footprint(mspace_);
133 mspace_set_footprint_limit(mspace_, footprint);
134 // Return the new allocation or NULL.
135 Object* result = reinterpret_cast<Object*>(ptr);
136 CHECK(result == NULL || Contains(result));
137 return result;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700138}
139
Ian Rogers30fab402012-01-23 15:43:46 -0800140void AllocSpace::Free(Object* ptr) {
141#if DEBUG_SPACES
142 CHECK(ptr != NULL);
143 CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this;
144#endif
145 mspace_free(mspace_, ptr);
146}
147
148void AllocSpace::FreeList(size_t num_ptrs, Object** ptrs) {
149#if DEBUG_SPACES
150 CHECK(ptrs != NULL);
151 size_t num_broken_ptrs = 0;
152 for (size_t i = 0; i < num_ptrs; i++) {
153 if(!Contains(ptrs[i])) {
154 num_broken_ptrs++;
155 LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this;
156 }
157 }
158 CHECK_EQ(num_broken_ptrs, 0u);
159#endif
160 mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs);
161}
162
163// Callback from dlmalloc when it needs to increase the footprint
164extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) {
165 AllocSpace* space = Heap::GetAllocSpace();
166 if (LIKELY(space->GetMspace() == mspace)) {
167 return space->MoreCore(increment);
168 } else {
169 // Exhaustively search alloc spaces
170 const std::vector<Space*>& spaces = Heap::GetSpaces();
171 for (size_t i = 0; i < spaces.size(); i++) {
172 if (spaces[i]->IsAllocSpace()) {
173 AllocSpace* space = spaces[i]->AsAllocSpace();
174 if (mspace == space->GetMspace()) {
175 return space->MoreCore(increment);
176 }
177 }
178 }
179 LOG(FATAL) << "Unexpected call to art_heap_morecore. mspace: " << mspace
180 << " increment: " << increment;
181 return NULL;
182 }
183}
184
185void* AllocSpace::MoreCore(intptr_t increment) {
186 byte* original_end = end_;
187 if (increment != 0) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800188 VLOG(heap) << "AllocSpace::MoreCore " << PrettySize(increment);
Ian Rogers30fab402012-01-23 15:43:46 -0800189 byte* new_end = original_end + increment;
190 if (increment > 0) {
191#if DEBUG_SPACES
192 // Should never be asked to increase the allocation beyond the capacity of the space. Enforced
193 // by mspace_set_footprint_limit.
194 CHECK_LE(new_end, Begin() + Capacity());
195#endif
196 CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetSpaceName());
197 } else {
198#if DEBUG_SPACES
199 // Should never be asked for negative footprint (ie before begin)
200 CHECK_GT(original_end + increment, Begin());
201#endif
202 // Advise we don't need the pages and protect them
Ian Rogers3bb17a62012-01-27 23:56:44 -0800203 // TODO: by removing permissions to the pages we may be causing TLB shoot-down which can be
204 // expensive (note the same isn't true for giving permissions to a page as the protected
205 // page shouldn't be in a TLB). We should investigate performance impact of just
206 // removing ignoring the memory protection change here and in Space::CreateAllocSpace. It's
207 // likely just a useful debug feature.
Ian Rogers30fab402012-01-23 15:43:46 -0800208 size_t size = -increment;
209 CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetSpaceName());
210 CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetSpaceName());
211 }
212 // Update end_
213 end_ = new_end;
214 }
215 return original_end;
216}
217
218size_t AllocSpace::AllocationSize(const Object* obj) {
219 return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) + kChunkOverhead;
220}
221
222// Call back from mspace_inspect_all returning the start and end of chunks and the bytes used,
223// if used_bytes is 0 then it indicates the range isn't in use and we madvise to the system that
224// we don't need it
225static void DontNeed(void* start, void* end, size_t used_bytes, void* num_bytes) {
226 if (used_bytes == 0) {
227 start = reinterpret_cast<void*>(RoundUp((uintptr_t)start, kPageSize));
228 end = reinterpret_cast<void*>(RoundDown((uintptr_t)end, kPageSize));
229 if (end > start) {
230 // We have a page aligned region to madvise on
231 size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
232 CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim");
233 }
234 }
235}
236
237void AllocSpace::Trim() {
238 // Trim to release memory at the end of the space
239 mspace_trim(mspace_, 0);
240 // Visit space looking for page size holes to advise we don't need
241 size_t num_bytes_released = 0;
242 mspace_inspect_all(mspace_, DontNeed, &num_bytes_released);
243}
244
245
246void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg),
247 void* arg) {
248 mspace_inspect_all(mspace_, callback, arg);
249}
250
251size_t AllocSpace::GetFootprintLimit() {
252 return mspace_footprint_limit(mspace_);
253}
254
255void AllocSpace::SetFootprintLimit(size_t new_size) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800256 VLOG(heap) << "AllocSpace::SetFootprintLimit " << PrettySize(new_size);
Ian Rogers30fab402012-01-23 15:43:46 -0800257 // Compare against the actual footprint, rather than the Size(), because the heap may not have
258 // grown all the way to the allowed size yet.
Ian Rogers30fab402012-01-23 15:43:46 -0800259 size_t current_space_size = mspace_footprint(mspace_);
260 if (new_size < current_space_size) {
261 // Don't let the space grow any more.
262 new_size = current_space_size;
263 }
264 mspace_set_footprint_limit(mspace_, new_size);
265}
266
267ImageSpace* Space::CreateImageSpace(const std::string& image_file_name) {
268 CHECK(image_file_name != NULL);
269
270 uint64_t start_time = 0;
271 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
272 start_time = NanoTime();
273 LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name;
274 }
275
Brian Carlstrom58ae9412011-10-04 00:56:06 -0700276 UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false));
Elliott Hughes90a33692011-08-30 13:27:07 -0700277 if (file.get() == NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800278 LOG(ERROR) << "Failed to open " << image_file_name;
279 return NULL;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700280 }
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700281 ImageHeader image_header;
282 bool success = file->ReadFully(&image_header, sizeof(image_header));
283 if (!success || !image_header.IsValid()) {
Ian Rogers30fab402012-01-23 15:43:46 -0800284 LOG(ERROR) << "Invalid image header " << image_file_name;
285 return NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700286 }
Ian Rogers30fab402012-01-23 15:43:46 -0800287 UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(),
Brian Carlstrom89521892011-12-07 22:05:07 -0800288 file->Length(),
289 // TODO: selectively PROT_EXEC stubs
290 PROT_READ | PROT_WRITE | PROT_EXEC,
291 MAP_PRIVATE | MAP_FIXED,
292 file->Fd(),
293 0));
Elliott Hughes90a33692011-08-30 13:27:07 -0700294 if (map.get() == NULL) {
Ian Rogers30fab402012-01-23 15:43:46 -0800295 LOG(ERROR) << "Failed to map " << image_file_name;
296 return NULL;
Brian Carlstrom4a289ed2011-08-16 17:17:49 -0700297 }
Ian Rogers30fab402012-01-23 15:43:46 -0800298 CHECK_EQ(image_header.GetImageBegin(), map->Begin());
299 DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader)));
Brian Carlstroma663ea52011-08-19 23:33:41 -0700300
Ian Rogers30fab402012-01-23 15:43:46 -0800301 Runtime* runtime = Runtime::Current();
Brian Carlstrom16192862011-09-12 17:50:06 -0700302 Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray);
Ian Rogers169c9a72011-11-13 20:13:17 -0800303 runtime->SetJniDlsymLookupStub(down_cast<ByteArray*>(jni_stub_array));
Brian Carlstrom16192862011-09-12 17:50:06 -0700304
Brian Carlstrome24fa612011-09-29 00:53:55 -0700305 Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700306 runtime->SetAbstractMethodErrorStubArray(down_cast<ByteArray*>(ame_stub_array));
Brian Carlstrome24fa612011-09-29 00:53:55 -0700307
Ian Rogersad25ac52011-10-04 19:13:33 -0700308 Object* resolution_stub_array = image_header.GetImageRoot(ImageHeader::kInstanceResolutionStubArray);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700309 runtime->SetResolutionStubArray(
Ian Rogers1cb0a1d2011-10-06 15:24:35 -0700310 down_cast<ByteArray*>(resolution_stub_array), Runtime::kInstanceMethod);
Ian Rogersad25ac52011-10-04 19:13:33 -0700311 resolution_stub_array = image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700312 runtime->SetResolutionStubArray(
Ian Rogers1cb0a1d2011-10-06 15:24:35 -0700313 down_cast<ByteArray*>(resolution_stub_array), Runtime::kStaticMethod);
314 resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700315 runtime->SetResolutionStubArray(
Ian Rogers1cb0a1d2011-10-06 15:24:35 -0700316 down_cast<ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod);
Ian Rogersad25ac52011-10-04 19:13:33 -0700317
Ian Rogersff1ed472011-09-20 13:46:24 -0700318 Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod);
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700319 runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kSaveAll);
320 callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod);
321 runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kRefsOnly);
322 callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod);
323 runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kRefsAndArgs);
Ian Rogersff1ed472011-09-20 13:46:24 -0700324
Ian Rogers30fab402012-01-23 15:43:46 -0800325 ImageSpace* space = new ImageSpace(image_file_name, map.release());
326 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800327 LOG(INFO) << "Space::CreateImageSpace exiting (" << PrettyDuration(NanoTime() - start_time)
328 << ") " << *space;
Ian Rogers5d76c432011-10-31 21:42:49 -0700329 }
Ian Rogers30fab402012-01-23 15:43:46 -0800330 return space;
Ian Rogers5d76c432011-10-31 21:42:49 -0700331}
332
Ian Rogers30fab402012-01-23 15:43:46 -0800333void ImageSpace::RecordImageAllocations(HeapBitmap* live_bitmap) const {
334 uint64_t start_time = 0;
335 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
336 LOG(INFO) << "ImageSpace::RecordImageAllocations entering";
337 start_time = NanoTime();
Carl Shapiro69759ea2011-07-21 18:13:35 -0700338 }
Ian Rogers30fab402012-01-23 15:43:46 -0800339 DCHECK(!Runtime::Current()->IsStarted());
340 CHECK(live_bitmap != NULL);
341 byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment);
342 byte* end = End();
343 while (current < end) {
344 DCHECK_ALIGNED(current, kObjectAlignment);
345 const Object* obj = reinterpret_cast<const Object*>(current);
346 live_bitmap->Set(obj);
347 current += RoundUp(obj->SizeOf(), kObjectAlignment);
348 }
349 if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
Ian Rogers3bb17a62012-01-27 23:56:44 -0800350 LOG(INFO) << "ImageSpace::RecordImageAllocations exiting ("
351 << PrettyDuration(NanoTime() - start_time) << ")";
Carl Shapiro69759ea2011-07-21 18:13:35 -0700352 }
353}
354
Ian Rogers30fab402012-01-23 15:43:46 -0800355std::ostream& operator<<(std::ostream& os, const Space& space) {
356 os << (space.IsImageSpace() ? "Image" : "Alloc") << "Space["
357 << "begin=" << reinterpret_cast<void*>(space.Begin())
358 << ",end=" << reinterpret_cast<void*>(space.End())
Ian Rogers3bb17a62012-01-27 23:56:44 -0800359 << ",size=" << PrettySize(space.Size()) << ",capacity=" << PrettySize(space.Capacity())
Ian Rogers30fab402012-01-23 15:43:46 -0800360 << ",name=\"" << space.GetSpaceName() << "\"]";
361 return os;
Carl Shapiro69759ea2011-07-21 18:13:35 -0700362}
363
364} // namespace art