Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 1 | // Copyright 2011 Google Inc. All Rights Reserved. |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 2 | |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 3 | #include "space.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 4 | |
| 5 | #include <sys/mman.h> |
| 6 | |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 7 | #include "UniquePtr.h" |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 8 | #include "dlmalloc.h" |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 9 | #include "file.h" |
| 10 | #include "image.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 11 | #include "logging.h" |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 12 | #include "os.h" |
Brian Carlstrom | 578bbdc | 2011-07-21 14:07:47 -0700 | [diff] [blame] | 13 | #include "utils.h" |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 14 | |
| 15 | namespace art { |
| 16 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 17 | #ifndef NDEBUG |
| 18 | #define DEBUG_SPACES 1 |
| 19 | #endif |
| 20 | |
| 21 | #define CHECK_MEMORY_CALL(call, args, what) \ |
| 22 | do { \ |
| 23 | int rc = call args; \ |
| 24 | if (UNLIKELY(rc != 0)) { \ |
| 25 | errno = rc; \ |
| 26 | PLOG(FATAL) << # call << " failed for " << what; \ |
| 27 | } \ |
| 28 | } while (false) |
| 29 | |
| 30 | AllocSpace* Space::CreateAllocSpace(const std::string& name, size_t initial_size, |
| 31 | size_t growth_limit, size_t capacity, |
| 32 | byte* requested_begin) { |
| 33 | uint64_t start_time = 0; |
| 34 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 35 | start_time = NanoTime(); |
| 36 | VLOG(startup) << "Space::CreateAllocSpace entering " << name |
| 37 | << " initial_size=" << (initial_size / KB) << "KiB" |
| 38 | << " growth_limit=" << (growth_limit / KB) << "KiB" |
| 39 | << " capacity=" << (capacity / KB) << "KiB" |
| 40 | << " requested_begin=" << reinterpret_cast<void*>(requested_begin); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 41 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 42 | |
| 43 | // Sanity check arguments |
| 44 | if (initial_size > growth_limit) { |
| 45 | LOG(ERROR) << "Failed to create alloc space (" << name << ") where the initial size (" |
| 46 | << initial_size << ") is larger than its capacity (" << growth_limit << ")"; |
| 47 | return NULL; |
| 48 | } |
| 49 | if (growth_limit > capacity) { |
| 50 | LOG(ERROR) << "Failed to create alloc space (" << name << ") where the growth limit capacity" |
| 51 | " (" << growth_limit << ") is larger than the capacity (" << capacity << ")"; |
| 52 | return NULL; |
| 53 | } |
| 54 | |
| 55 | // Page align growth limit and capacity which will be used to manage mmapped storage |
| 56 | growth_limit = RoundUp(growth_limit, kPageSize); |
| 57 | capacity = RoundUp(capacity, kPageSize); |
| 58 | |
| 59 | UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, |
| 60 | capacity, PROT_READ | PROT_WRITE)); |
| 61 | if (mem_map.get() == NULL) { |
| 62 | LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " |
| 63 | << capacity << " bytes"; |
| 64 | return NULL; |
| 65 | } |
| 66 | |
| 67 | void* mspace = AllocSpace::CreateMallocSpace(mem_map->Begin(), initial_size, capacity); |
| 68 | if (mspace == NULL) { |
| 69 | LOG(ERROR) << "Failed to initialize mspace for alloc space (" << name << ")"; |
| 70 | return NULL; |
| 71 | } |
| 72 | |
| 73 | // Protect memory beyond the initial size |
| 74 | byte* end = mem_map->Begin() + initial_size; |
| 75 | if (capacity - initial_size > 0) { |
| 76 | CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), name); |
| 77 | } |
| 78 | |
| 79 | // Everything is set so record in immutable structure and leave |
| 80 | AllocSpace* space = new AllocSpace(name, mem_map.release(), mspace, end, growth_limit); |
| 81 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 82 | uint64_t duration_ms = (NanoTime() - start_time)/1000/1000; |
| 83 | LOG(INFO) << "Space::CreateAllocSpace exiting (" << duration_ms << " ms) " << *space; |
| 84 | } |
| 85 | return space; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 86 | } |
| 87 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 88 | void* AllocSpace::CreateMallocSpace(void* begin, size_t size, size_t capacity) { |
| 89 | // clear errno to allow PLOG on error |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 90 | errno = 0; |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 91 | // create mspace using our backing storage starting at begin and of half the specified size. |
| 92 | // Don't use an internal dlmalloc lock (as we already hold heap lock). When size is exhaused |
| 93 | // morecore will be called. |
| 94 | void* msp = create_mspace_with_base(begin, size, false /*locked*/); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 95 | if (msp != NULL) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 96 | // Do not allow morecore requests to succeed beyond the initial size of the heap |
| 97 | mspace_set_footprint_limit(msp, size); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 98 | } else { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 99 | PLOG(ERROR) << "create_mspace_with_base failed"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 100 | } |
| 101 | return msp; |
| 102 | } |
| 103 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 104 | Object* AllocSpace::AllocWithoutGrowth(size_t num_bytes) { |
| 105 | Object* result = reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes)); |
| 106 | #if DEBUG_SPACES |
| 107 | if (result != NULL) { |
| 108 | CHECK(Contains(result)) << "Allocation (" << reinterpret_cast<void*>(result) |
| 109 | << ") not in bounds of heap " << *this; |
jeffhao | c116070 | 2011-10-27 15:48:45 -0700 | [diff] [blame] | 110 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 111 | #endif |
| 112 | return result; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 113 | } |
| 114 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 115 | Object* AllocSpace::AllocWithGrowth(size_t num_bytes) { |
| 116 | // Grow as much as possible within the mspace. |
| 117 | size_t max_allowed = Capacity(); |
| 118 | mspace_set_footprint_limit(mspace_, max_allowed); |
| 119 | // Try the allocation. |
| 120 | void* ptr = AllocWithoutGrowth(num_bytes); |
| 121 | // Shrink back down as small as possible. |
| 122 | size_t footprint = mspace_footprint(mspace_); |
| 123 | mspace_set_footprint_limit(mspace_, footprint); |
| 124 | // Return the new allocation or NULL. |
| 125 | Object* result = reinterpret_cast<Object*>(ptr); |
| 126 | CHECK(result == NULL || Contains(result)); |
| 127 | return result; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 128 | } |
| 129 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 130 | void AllocSpace::Free(Object* ptr) { |
| 131 | #if DEBUG_SPACES |
| 132 | CHECK(ptr != NULL); |
| 133 | CHECK(Contains(ptr)) << "Free (" << ptr << ") not in bounds of heap " << *this; |
| 134 | #endif |
| 135 | mspace_free(mspace_, ptr); |
| 136 | } |
| 137 | |
| 138 | void AllocSpace::FreeList(size_t num_ptrs, Object** ptrs) { |
| 139 | #if DEBUG_SPACES |
| 140 | CHECK(ptrs != NULL); |
| 141 | size_t num_broken_ptrs = 0; |
| 142 | for (size_t i = 0; i < num_ptrs; i++) { |
| 143 | if(!Contains(ptrs[i])) { |
| 144 | num_broken_ptrs++; |
| 145 | LOG(ERROR) << "FreeList[" << i << "] (" << ptrs[i] << ") not in bounds of heap " << *this; |
| 146 | } |
| 147 | } |
| 148 | CHECK_EQ(num_broken_ptrs, 0u); |
| 149 | #endif |
| 150 | mspace_bulk_free(mspace_, reinterpret_cast<void**>(ptrs), num_ptrs); |
| 151 | } |
| 152 | |
| 153 | // Callback from dlmalloc when it needs to increase the footprint |
| 154 | extern "C" void* art_heap_morecore(void* mspace, intptr_t increment) { |
| 155 | AllocSpace* space = Heap::GetAllocSpace(); |
| 156 | if (LIKELY(space->GetMspace() == mspace)) { |
| 157 | return space->MoreCore(increment); |
| 158 | } else { |
| 159 | // Exhaustively search alloc spaces |
| 160 | const std::vector<Space*>& spaces = Heap::GetSpaces(); |
| 161 | for (size_t i = 0; i < spaces.size(); i++) { |
| 162 | if (spaces[i]->IsAllocSpace()) { |
| 163 | AllocSpace* space = spaces[i]->AsAllocSpace(); |
| 164 | if (mspace == space->GetMspace()) { |
| 165 | return space->MoreCore(increment); |
| 166 | } |
| 167 | } |
| 168 | } |
| 169 | LOG(FATAL) << "Unexpected call to art_heap_morecore. mspace: " << mspace |
| 170 | << " increment: " << increment; |
| 171 | return NULL; |
| 172 | } |
| 173 | } |
| 174 | |
| 175 | void* AllocSpace::MoreCore(intptr_t increment) { |
| 176 | byte* original_end = end_; |
| 177 | if (increment != 0) { |
| 178 | VLOG(heap) << "AllocSpace::MoreCore " << (increment/KB) << "KiB"; |
| 179 | byte* new_end = original_end + increment; |
| 180 | if (increment > 0) { |
| 181 | #if DEBUG_SPACES |
| 182 | // Should never be asked to increase the allocation beyond the capacity of the space. Enforced |
| 183 | // by mspace_set_footprint_limit. |
| 184 | CHECK_LE(new_end, Begin() + Capacity()); |
| 185 | #endif |
| 186 | CHECK_MEMORY_CALL(mprotect, (original_end, increment, PROT_READ | PROT_WRITE), GetSpaceName()); |
| 187 | } else { |
| 188 | #if DEBUG_SPACES |
| 189 | // Should never be asked for negative footprint (ie before begin) |
| 190 | CHECK_GT(original_end + increment, Begin()); |
| 191 | #endif |
| 192 | // Advise we don't need the pages and protect them |
| 193 | size_t size = -increment; |
| 194 | CHECK_MEMORY_CALL(madvise, (new_end, size, MADV_DONTNEED), GetSpaceName()); |
| 195 | CHECK_MEMORY_CALL(mprotect, (new_end, size, PROT_NONE), GetSpaceName()); |
| 196 | } |
| 197 | // Update end_ |
| 198 | end_ = new_end; |
| 199 | } |
| 200 | return original_end; |
| 201 | } |
| 202 | |
| 203 | size_t AllocSpace::AllocationSize(const Object* obj) { |
| 204 | return mspace_usable_size(const_cast<void*>(reinterpret_cast<const void*>(obj))) + kChunkOverhead; |
| 205 | } |
| 206 | |
| 207 | // Call back from mspace_inspect_all returning the start and end of chunks and the bytes used, |
| 208 | // if used_bytes is 0 then it indicates the range isn't in use and we madvise to the system that |
| 209 | // we don't need it |
| 210 | static void DontNeed(void* start, void* end, size_t used_bytes, void* num_bytes) { |
| 211 | if (used_bytes == 0) { |
| 212 | start = reinterpret_cast<void*>(RoundUp((uintptr_t)start, kPageSize)); |
| 213 | end = reinterpret_cast<void*>(RoundDown((uintptr_t)end, kPageSize)); |
| 214 | if (end > start) { |
| 215 | // We have a page aligned region to madvise on |
| 216 | size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start); |
| 217 | CHECK_MEMORY_CALL(madvise, (start, length, MADV_DONTNEED), "trim"); |
| 218 | } |
| 219 | } |
| 220 | } |
| 221 | |
| 222 | void AllocSpace::Trim() { |
| 223 | // Trim to release memory at the end of the space |
| 224 | mspace_trim(mspace_, 0); |
| 225 | // Visit space looking for page size holes to advise we don't need |
| 226 | size_t num_bytes_released = 0; |
| 227 | mspace_inspect_all(mspace_, DontNeed, &num_bytes_released); |
| 228 | } |
| 229 | |
| 230 | |
| 231 | void AllocSpace::Walk(void(*callback)(void *start, void *end, size_t num_bytes, void* callback_arg), |
| 232 | void* arg) { |
| 233 | mspace_inspect_all(mspace_, callback, arg); |
| 234 | } |
| 235 | |
| 236 | size_t AllocSpace::GetFootprintLimit() { |
| 237 | return mspace_footprint_limit(mspace_); |
| 238 | } |
| 239 | |
| 240 | void AllocSpace::SetFootprintLimit(size_t new_size) { |
| 241 | VLOG(heap) << "AllocSpace::SetFootprintLimit " << (new_size/KB) << "KiB"; |
| 242 | // Compare against the actual footprint, rather than the Size(), because the heap may not have |
| 243 | // grown all the way to the allowed size yet. |
| 244 | // |
| 245 | size_t current_space_size = mspace_footprint(mspace_); |
| 246 | if (new_size < current_space_size) { |
| 247 | // Don't let the space grow any more. |
| 248 | new_size = current_space_size; |
| 249 | } |
| 250 | mspace_set_footprint_limit(mspace_, new_size); |
| 251 | } |
| 252 | |
| 253 | ImageSpace* Space::CreateImageSpace(const std::string& image_file_name) { |
| 254 | CHECK(image_file_name != NULL); |
| 255 | |
| 256 | uint64_t start_time = 0; |
| 257 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 258 | start_time = NanoTime(); |
| 259 | LOG(INFO) << "Space::CreateImageSpace entering" << " image_file_name=" << image_file_name; |
| 260 | } |
| 261 | |
Brian Carlstrom | 58ae941 | 2011-10-04 00:56:06 -0700 | [diff] [blame] | 262 | UniquePtr<File> file(OS::OpenFile(image_file_name.c_str(), false)); |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 263 | if (file.get() == NULL) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 264 | LOG(ERROR) << "Failed to open " << image_file_name; |
| 265 | return NULL; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 266 | } |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 267 | ImageHeader image_header; |
| 268 | bool success = file->ReadFully(&image_header, sizeof(image_header)); |
| 269 | if (!success || !image_header.IsValid()) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 270 | LOG(ERROR) << "Invalid image header " << image_file_name; |
| 271 | return NULL; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 272 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 273 | UniquePtr<MemMap> map(MemMap::MapFileAtAddress(image_header.GetImageBegin(), |
Brian Carlstrom | 8952189 | 2011-12-07 22:05:07 -0800 | [diff] [blame] | 274 | file->Length(), |
| 275 | // TODO: selectively PROT_EXEC stubs |
| 276 | PROT_READ | PROT_WRITE | PROT_EXEC, |
| 277 | MAP_PRIVATE | MAP_FIXED, |
| 278 | file->Fd(), |
| 279 | 0)); |
Elliott Hughes | 90a3369 | 2011-08-30 13:27:07 -0700 | [diff] [blame] | 280 | if (map.get() == NULL) { |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 281 | LOG(ERROR) << "Failed to map " << image_file_name; |
| 282 | return NULL; |
Brian Carlstrom | 4a289ed | 2011-08-16 17:17:49 -0700 | [diff] [blame] | 283 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 284 | CHECK_EQ(image_header.GetImageBegin(), map->Begin()); |
| 285 | DCHECK_EQ(0, memcmp(&image_header, map->Begin(), sizeof(ImageHeader))); |
Brian Carlstrom | a663ea5 | 2011-08-19 23:33:41 -0700 | [diff] [blame] | 286 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 287 | Runtime* runtime = Runtime::Current(); |
Brian Carlstrom | 1619286 | 2011-09-12 17:50:06 -0700 | [diff] [blame] | 288 | Object* jni_stub_array = image_header.GetImageRoot(ImageHeader::kJniStubArray); |
Ian Rogers | 169c9a7 | 2011-11-13 20:13:17 -0800 | [diff] [blame] | 289 | runtime->SetJniDlsymLookupStub(down_cast<ByteArray*>(jni_stub_array)); |
Brian Carlstrom | 1619286 | 2011-09-12 17:50:06 -0700 | [diff] [blame] | 290 | |
Brian Carlstrom | e24fa61 | 2011-09-29 00:53:55 -0700 | [diff] [blame] | 291 | Object* ame_stub_array = image_header.GetImageRoot(ImageHeader::kAbstractMethodErrorStubArray); |
Ian Rogers | 4f0d07c | 2011-10-06 23:38:47 -0700 | [diff] [blame] | 292 | runtime->SetAbstractMethodErrorStubArray(down_cast<ByteArray*>(ame_stub_array)); |
Brian Carlstrom | e24fa61 | 2011-09-29 00:53:55 -0700 | [diff] [blame] | 293 | |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 294 | Object* resolution_stub_array = image_header.GetImageRoot(ImageHeader::kInstanceResolutionStubArray); |
Ian Rogers | 4f0d07c | 2011-10-06 23:38:47 -0700 | [diff] [blame] | 295 | runtime->SetResolutionStubArray( |
Ian Rogers | 1cb0a1d | 2011-10-06 15:24:35 -0700 | [diff] [blame] | 296 | down_cast<ByteArray*>(resolution_stub_array), Runtime::kInstanceMethod); |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 297 | resolution_stub_array = image_header.GetImageRoot(ImageHeader::kStaticResolutionStubArray); |
Ian Rogers | 4f0d07c | 2011-10-06 23:38:47 -0700 | [diff] [blame] | 298 | runtime->SetResolutionStubArray( |
Ian Rogers | 1cb0a1d | 2011-10-06 15:24:35 -0700 | [diff] [blame] | 299 | down_cast<ByteArray*>(resolution_stub_array), Runtime::kStaticMethod); |
| 300 | resolution_stub_array = image_header.GetImageRoot(ImageHeader::kUnknownMethodResolutionStubArray); |
Ian Rogers | 4f0d07c | 2011-10-06 23:38:47 -0700 | [diff] [blame] | 301 | runtime->SetResolutionStubArray( |
Ian Rogers | 1cb0a1d | 2011-10-06 15:24:35 -0700 | [diff] [blame] | 302 | down_cast<ByteArray*>(resolution_stub_array), Runtime::kUnknownMethod); |
Ian Rogers | ad25ac5 | 2011-10-04 19:13:33 -0700 | [diff] [blame] | 303 | |
Ian Rogers | ff1ed47 | 2011-09-20 13:46:24 -0700 | [diff] [blame] | 304 | Object* callee_save_method = image_header.GetImageRoot(ImageHeader::kCalleeSaveMethod); |
Ian Rogers | 4f0d07c | 2011-10-06 23:38:47 -0700 | [diff] [blame] | 305 | runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kSaveAll); |
| 306 | callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsOnlySaveMethod); |
| 307 | runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kRefsOnly); |
| 308 | callee_save_method = image_header.GetImageRoot(ImageHeader::kRefsAndArgsSaveMethod); |
| 309 | runtime->SetCalleeSaveMethod(down_cast<Method*>(callee_save_method), Runtime::kRefsAndArgs); |
Ian Rogers | ff1ed47 | 2011-09-20 13:46:24 -0700 | [diff] [blame] | 310 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 311 | ImageSpace* space = new ImageSpace(image_file_name, map.release()); |
| 312 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 313 | uint64_t duration_ms = (NanoTime() - start_time)/1000/1000; |
| 314 | LOG(INFO) << "Space::CreateImageSpace exiting (" << duration_ms << " ms) " << *space; |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 315 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 316 | return space; |
Ian Rogers | 5d76c43 | 2011-10-31 21:42:49 -0700 | [diff] [blame] | 317 | } |
| 318 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 319 | void ImageSpace::RecordImageAllocations(HeapBitmap* live_bitmap) const { |
| 320 | uint64_t start_time = 0; |
| 321 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 322 | LOG(INFO) << "ImageSpace::RecordImageAllocations entering"; |
| 323 | start_time = NanoTime(); |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 324 | } |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 325 | DCHECK(!Runtime::Current()->IsStarted()); |
| 326 | CHECK(live_bitmap != NULL); |
| 327 | byte* current = Begin() + RoundUp(sizeof(ImageHeader), kObjectAlignment); |
| 328 | byte* end = End(); |
| 329 | while (current < end) { |
| 330 | DCHECK_ALIGNED(current, kObjectAlignment); |
| 331 | const Object* obj = reinterpret_cast<const Object*>(current); |
| 332 | live_bitmap->Set(obj); |
| 333 | current += RoundUp(obj->SizeOf(), kObjectAlignment); |
| 334 | } |
| 335 | if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { |
| 336 | uint64_t duration_ms = (NanoTime() - start_time)/1000/1000; |
| 337 | LOG(INFO) << "ImageSpace::RecordImageAllocations exiting (" << duration_ms << " ms)"; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 338 | } |
| 339 | } |
| 340 | |
Ian Rogers | 30fab40 | 2012-01-23 15:43:46 -0800 | [diff] [blame^] | 341 | std::ostream& operator<<(std::ostream& os, const Space& space) { |
| 342 | os << (space.IsImageSpace() ? "Image" : "Alloc") << "Space[" |
| 343 | << "begin=" << reinterpret_cast<void*>(space.Begin()) |
| 344 | << ",end=" << reinterpret_cast<void*>(space.End()) |
| 345 | << ",size=" << (space.Size()/KB) << "KiB" |
| 346 | << ",capacity=" << (space.Capacity()/KB) << "KiB" |
| 347 | << ",name=\"" << space.GetSpaceName() << "\"]"; |
| 348 | return os; |
Carl Shapiro | 69759ea | 2011-07-21 18:13:35 -0700 | [diff] [blame] | 349 | } |
| 350 | |
| 351 | } // namespace art |