| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * Copyright (C) 2013 The Android Open Source Project | 
 | 3 |  * | 
 | 4 |  * Licensed under the Apache License, Version 2.0 (the "License"); | 
 | 5 |  * you may not use this file except in compliance with the License. | 
 | 6 |  * You may obtain a copy of the License at | 
 | 7 |  * | 
 | 8 |  *      http://www.apache.org/licenses/LICENSE-2.0 | 
 | 9 |  * | 
 | 10 |  * Unless required by applicable law or agreed to in writing, software | 
 | 11 |  * distributed under the License is distributed on an "AS IS" BASIS, | 
 | 12 |  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | 
 | 13 |  * See the License for the specific language governing permissions and | 
 | 14 |  * limitations under the License. | 
 | 15 |  */ | 
 | 16 |  | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 17 | #include "rosalloc.h" | 
 | 18 |  | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 19 | #include "base/memory_tool.h" | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 20 | #include "base/mutex-inl.h" | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 21 | #include "gc/space/memory_tool_settings.h" | 
| Vladimir Marko | 3481ba2 | 2015-04-13 12:22:36 +0100 | [diff] [blame] | 22 | #include "mem_map.h" | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 23 | #include "mirror/class-inl.h" | 
 | 24 | #include "mirror/object.h" | 
 | 25 | #include "mirror/object-inl.h" | 
| Brian Carlstrom | 218daa2 | 2013-11-25 14:51:44 -0800 | [diff] [blame] | 26 | #include "thread-inl.h" | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 27 | #include "thread_list.h" | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 28 |  | 
 | 29 | #include <map> | 
 | 30 | #include <list> | 
| Ian Rogers | c7dd295 | 2014-10-21 23:31:19 -0700 | [diff] [blame] | 31 | #include <sstream> | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 32 | #include <vector> | 
 | 33 |  | 
 | 34 | namespace art { | 
 | 35 | namespace gc { | 
 | 36 | namespace allocator { | 
 | 37 |  | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 38 | static constexpr bool kUsePrefetchDuringAllocRun = false; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 39 | static constexpr bool kPrefetchNewRunDataByZeroing = false; | 
 | 40 | static constexpr size_t kPrefetchStride = 64; | 
 | 41 |  | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 42 | size_t RosAlloc::bracketSizes[kNumOfSizeBrackets]; | 
 | 43 | size_t RosAlloc::numOfPages[kNumOfSizeBrackets]; | 
 | 44 | size_t RosAlloc::numOfSlots[kNumOfSizeBrackets]; | 
 | 45 | size_t RosAlloc::headerSizes[kNumOfSizeBrackets]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 46 | bool RosAlloc::initialized_ = false; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 47 | size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 }; | 
 | 48 | RosAlloc::Run* RosAlloc::dedicated_full_run_ = | 
 | 49 |     reinterpret_cast<RosAlloc::Run*>(dedicated_full_run_storage_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 50 |  | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 51 | RosAlloc::RosAlloc(void* base, size_t capacity, size_t max_capacity, | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 52 |                    PageReleaseMode page_release_mode, bool running_on_memory_tool, | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 53 |                    size_t page_release_size_threshold) | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 54 |     : base_(reinterpret_cast<uint8_t*>(base)), footprint_(capacity), | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 55 |       capacity_(capacity), max_capacity_(max_capacity), | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 56 |       lock_("rosalloc global lock", kRosAllocGlobalLock), | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 57 |       bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock), | 
 | 58 |       page_release_mode_(page_release_mode), | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 59 |       page_release_size_threshold_(page_release_size_threshold), | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 60 |       is_running_on_memory_tool_(running_on_memory_tool) { | 
| Hiroshi Yamauchi | b5e31f3 | 2016-02-18 15:01:17 -0800 | [diff] [blame] | 61 |   DCHECK_ALIGNED(base, kPageSize); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 62 |   DCHECK_EQ(RoundUp(capacity, kPageSize), capacity); | 
 | 63 |   DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 64 |   CHECK_LE(capacity, max_capacity); | 
| Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 65 |   CHECK_ALIGNED(page_release_size_threshold_, kPageSize); | 
| Hiroshi Yamauchi | b5e31f3 | 2016-02-18 15:01:17 -0800 | [diff] [blame] | 66 |   // Zero the memory explicitly (don't rely on that the mem map is zero-initialized). | 
 | 67 |   if (!kMadviseZeroes) { | 
 | 68 |     memset(base_, 0, max_capacity); | 
 | 69 |   } | 
 | 70 |   CHECK_EQ(madvise(base_, max_capacity, MADV_DONTNEED), 0); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 71 |   if (!initialized_) { | 
 | 72 |     Initialize(); | 
 | 73 |   } | 
 | 74 |   VLOG(heap) << "RosAlloc base=" | 
 | 75 |              << std::hex << (intptr_t)base_ << ", end=" | 
 | 76 |              << std::hex << (intptr_t)(base_ + capacity_) | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 77 |              << ", capacity=" << std::dec << capacity_ | 
 | 78 |              << ", max_capacity=" << std::dec << max_capacity_; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 79 |   for (size_t i = 0; i < kNumOfSizeBrackets; i++) { | 
| Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 80 |     size_bracket_lock_names_[i] = | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 81 |         StringPrintf("an rosalloc size bracket %d lock", static_cast<int>(i)); | 
| Zuo Wang | f37a88b | 2014-07-10 04:26:41 -0700 | [diff] [blame] | 82 |     size_bracket_locks_[i] = new Mutex(size_bracket_lock_names_[i].c_str(), kRosAllocBracketLock); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 83 |     current_runs_[i] = dedicated_full_run_; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 84 |   } | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 85 |   DCHECK_EQ(footprint_, capacity_); | 
 | 86 |   size_t num_of_pages = footprint_ / kPageSize; | 
 | 87 |   size_t max_num_of_pages = max_capacity_ / kPageSize; | 
 | 88 |   std::string error_msg; | 
| Vladimir Marko | 5c42c29 | 2015-02-25 12:02:49 +0000 | [diff] [blame] | 89 |   page_map_mem_map_.reset(MemMap::MapAnonymous("rosalloc page map", nullptr, | 
 | 90 |                                                RoundUp(max_num_of_pages, kPageSize), | 
 | 91 |                                                PROT_READ | PROT_WRITE, false, false, &error_msg)); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 92 |   CHECK(page_map_mem_map_.get() != nullptr) << "Couldn't allocate the page map : " << error_msg; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 93 |   page_map_ = page_map_mem_map_->Begin(); | 
 | 94 |   page_map_size_ = num_of_pages; | 
 | 95 |   max_page_map_size_ = max_num_of_pages; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 96 |   free_page_run_size_map_.resize(num_of_pages); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 97 |   FreePageRun* free_pages = reinterpret_cast<FreePageRun*>(base_); | 
 | 98 |   if (kIsDebugBuild) { | 
 | 99 |     free_pages->magic_num_ = kMagicNumFree; | 
 | 100 |   } | 
 | 101 |   free_pages->SetByteSize(this, capacity_); | 
 | 102 |   DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0)); | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 103 |   DCHECK(free_pages->IsFree()); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 104 |   free_pages->ReleasePages(this); | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 105 |   DCHECK(free_pages->IsFree()); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 106 |   free_page_runs_.insert(free_pages); | 
 | 107 |   if (kTraceRosAlloc) { | 
 | 108 |     LOG(INFO) << "RosAlloc::RosAlloc() : Inserted run 0x" << std::hex | 
 | 109 |               << reinterpret_cast<intptr_t>(free_pages) | 
 | 110 |               << " into free_page_runs_"; | 
 | 111 |   } | 
 | 112 | } | 
 | 113 |  | 
| Mathieu Chartier | 661974a | 2014-01-09 11:23:53 -0800 | [diff] [blame] | 114 | RosAlloc::~RosAlloc() { | 
 | 115 |   for (size_t i = 0; i < kNumOfSizeBrackets; i++) { | 
 | 116 |     delete size_bracket_locks_[i]; | 
 | 117 |   } | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 118 |   if (is_running_on_memory_tool_) { | 
 | 119 |     MEMORY_TOOL_MAKE_DEFINED(base_, capacity_); | 
 | 120 |   } | 
| Mathieu Chartier | 661974a | 2014-01-09 11:23:53 -0800 | [diff] [blame] | 121 | } | 
 | 122 |  | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 123 | void* RosAlloc::AllocPages(Thread* self, size_t num_pages, uint8_t page_map_type) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 124 |   lock_.AssertHeld(self); | 
 | 125 |   DCHECK(page_map_type == kPageMapRun || page_map_type == kPageMapLargeObject); | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 126 |   FreePageRun* res = nullptr; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 127 |   const size_t req_byte_size = num_pages * kPageSize; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 128 |   // Find the lowest address free page run that's large enough. | 
 | 129 |   for (auto it = free_page_runs_.begin(); it != free_page_runs_.end(); ) { | 
 | 130 |     FreePageRun* fpr = *it; | 
 | 131 |     DCHECK(fpr->IsFree()); | 
 | 132 |     size_t fpr_byte_size = fpr->ByteSize(this); | 
 | 133 |     DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0)); | 
 | 134 |     if (req_byte_size <= fpr_byte_size) { | 
 | 135 |       // Found one. | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 136 |       it = free_page_runs_.erase(it); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 137 |       if (kTraceRosAlloc) { | 
 | 138 |         LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" | 
 | 139 |                   << std::hex << reinterpret_cast<intptr_t>(fpr) | 
 | 140 |                   << " from free_page_runs_"; | 
 | 141 |       } | 
 | 142 |       if (req_byte_size < fpr_byte_size) { | 
 | 143 |         // Split. | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 144 |         FreePageRun* remainder = | 
 | 145 |             reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 146 |         if (kIsDebugBuild) { | 
 | 147 |           remainder->magic_num_ = kMagicNumFree; | 
 | 148 |         } | 
 | 149 |         remainder->SetByteSize(this, fpr_byte_size - req_byte_size); | 
 | 150 |         DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 151 |         // Don't need to call madvise on remainder here. | 
 | 152 |         free_page_runs_.insert(remainder); | 
 | 153 |         if (kTraceRosAlloc) { | 
 | 154 |           LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex | 
 | 155 |                     << reinterpret_cast<intptr_t>(remainder) | 
 | 156 |                     << " into free_page_runs_"; | 
 | 157 |         } | 
 | 158 |         fpr->SetByteSize(this, req_byte_size); | 
 | 159 |         DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 160 |       } | 
 | 161 |       res = fpr; | 
 | 162 |       break; | 
 | 163 |     } else { | 
 | 164 |       ++it; | 
 | 165 |     } | 
 | 166 |   } | 
 | 167 |  | 
 | 168 |   // Failed to allocate pages. Grow the footprint, if possible. | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 169 |   if (UNLIKELY(res == nullptr && capacity_ > footprint_)) { | 
 | 170 |     FreePageRun* last_free_page_run = nullptr; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 171 |     size_t last_free_page_run_size; | 
 | 172 |     auto it = free_page_runs_.rbegin(); | 
 | 173 |     if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) { | 
 | 174 |       // There is a free page run at the end. | 
 | 175 |       DCHECK(last_free_page_run->IsFree()); | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 176 |       DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run))); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 177 |       last_free_page_run_size = last_free_page_run->ByteSize(this); | 
 | 178 |     } else { | 
 | 179 |       // There is no free page run at the end. | 
 | 180 |       last_free_page_run_size = 0; | 
 | 181 |     } | 
 | 182 |     DCHECK_LT(last_free_page_run_size, req_byte_size); | 
 | 183 |     if (capacity_ - footprint_ + last_free_page_run_size >= req_byte_size) { | 
 | 184 |       // If we grow the heap, we can allocate it. | 
 | 185 |       size_t increment = std::min(std::max(2 * MB, req_byte_size - last_free_page_run_size), | 
 | 186 |                                   capacity_ - footprint_); | 
 | 187 |       DCHECK_EQ(increment % kPageSize, static_cast<size_t>(0)); | 
 | 188 |       size_t new_footprint = footprint_ + increment; | 
 | 189 |       size_t new_num_of_pages = new_footprint / kPageSize; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 190 |       DCHECK_LT(page_map_size_, new_num_of_pages); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 191 |       DCHECK_LT(free_page_run_size_map_.size(), new_num_of_pages); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 192 |       page_map_size_ = new_num_of_pages; | 
 | 193 |       DCHECK_LE(page_map_size_, max_page_map_size_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 194 |       free_page_run_size_map_.resize(new_num_of_pages); | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 195 |       ArtRosAllocMoreCore(this, increment); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 196 |       if (last_free_page_run_size > 0) { | 
 | 197 |         // There was a free page run at the end. Expand its size. | 
 | 198 |         DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this)); | 
 | 199 |         last_free_page_run->SetByteSize(this, last_free_page_run_size + increment); | 
 | 200 |         DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 201 |         DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 202 |       } else { | 
 | 203 |         // Otherwise, insert a new free page run at the end. | 
 | 204 |         FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_); | 
 | 205 |         if (kIsDebugBuild) { | 
 | 206 |           new_free_page_run->magic_num_ = kMagicNumFree; | 
 | 207 |         } | 
 | 208 |         new_free_page_run->SetByteSize(this, increment); | 
 | 209 |         DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 210 |         free_page_runs_.insert(new_free_page_run); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 211 |         DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 212 |         if (kTraceRosAlloc) { | 
 | 213 |           LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x" | 
 | 214 |                     << std::hex << reinterpret_cast<intptr_t>(new_free_page_run) | 
 | 215 |                     << " into free_page_runs_"; | 
 | 216 |         } | 
 | 217 |       } | 
 | 218 |       DCHECK_LE(footprint_ + increment, capacity_); | 
 | 219 |       if (kTraceRosAlloc) { | 
 | 220 |         LOG(INFO) << "RosAlloc::AllocPages() : increased the footprint from " | 
 | 221 |                   << footprint_ << " to " << new_footprint; | 
 | 222 |       } | 
 | 223 |       footprint_ = new_footprint; | 
 | 224 |  | 
 | 225 |       // And retry the last free page run. | 
 | 226 |       it = free_page_runs_.rbegin(); | 
 | 227 |       DCHECK(it != free_page_runs_.rend()); | 
 | 228 |       FreePageRun* fpr = *it; | 
 | 229 |       if (kIsDebugBuild && last_free_page_run_size > 0) { | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 230 |         DCHECK(last_free_page_run != nullptr); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 231 |         DCHECK_EQ(last_free_page_run, fpr); | 
 | 232 |       } | 
 | 233 |       size_t fpr_byte_size = fpr->ByteSize(this); | 
 | 234 |       DCHECK_EQ(fpr_byte_size % kPageSize, static_cast<size_t>(0)); | 
 | 235 |       DCHECK_LE(req_byte_size, fpr_byte_size); | 
 | 236 |       free_page_runs_.erase(fpr); | 
 | 237 |       if (kTraceRosAlloc) { | 
 | 238 |         LOG(INFO) << "RosAlloc::AllocPages() : Erased run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr) | 
 | 239 |                   << " from free_page_runs_"; | 
 | 240 |       } | 
 | 241 |       if (req_byte_size < fpr_byte_size) { | 
 | 242 |         // Split if there's a remainder. | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 243 |         FreePageRun* remainder = reinterpret_cast<FreePageRun*>(reinterpret_cast<uint8_t*>(fpr) + req_byte_size); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 244 |         if (kIsDebugBuild) { | 
 | 245 |           remainder->magic_num_ = kMagicNumFree; | 
 | 246 |         } | 
 | 247 |         remainder->SetByteSize(this, fpr_byte_size - req_byte_size); | 
 | 248 |         DCHECK_EQ(remainder->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 249 |         free_page_runs_.insert(remainder); | 
 | 250 |         if (kTraceRosAlloc) { | 
 | 251 |           LOG(INFO) << "RosAlloc::AllocPages() : Inserted run 0x" << std::hex | 
 | 252 |                     << reinterpret_cast<intptr_t>(remainder) | 
 | 253 |                     << " into free_page_runs_"; | 
 | 254 |         } | 
 | 255 |         fpr->SetByteSize(this, req_byte_size); | 
 | 256 |         DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 257 |       } | 
 | 258 |       res = fpr; | 
 | 259 |     } | 
 | 260 |   } | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 261 |   if (LIKELY(res != nullptr)) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 262 |     // Update the page map. | 
 | 263 |     size_t page_map_idx = ToPageMapIndex(res); | 
 | 264 |     for (size_t i = 0; i < num_pages; i++) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 265 |       DCHECK(IsFreePage(page_map_idx + i)); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 266 |     } | 
 | 267 |     switch (page_map_type) { | 
 | 268 |     case kPageMapRun: | 
 | 269 |       page_map_[page_map_idx] = kPageMapRun; | 
 | 270 |       for (size_t i = 1; i < num_pages; i++) { | 
 | 271 |         page_map_[page_map_idx + i] = kPageMapRunPart; | 
 | 272 |       } | 
 | 273 |       break; | 
 | 274 |     case kPageMapLargeObject: | 
 | 275 |       page_map_[page_map_idx] = kPageMapLargeObject; | 
 | 276 |       for (size_t i = 1; i < num_pages; i++) { | 
 | 277 |         page_map_[page_map_idx + i] = kPageMapLargeObjectPart; | 
 | 278 |       } | 
 | 279 |       break; | 
 | 280 |     default: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 281 |       LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_type); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 282 |       break; | 
 | 283 |     } | 
 | 284 |     if (kIsDebugBuild) { | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 285 |       // Clear the first page since it is not madvised due to the magic number. | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 286 |       memset(res, 0, kPageSize); | 
 | 287 |     } | 
 | 288 |     if (kTraceRosAlloc) { | 
 | 289 |       LOG(INFO) << "RosAlloc::AllocPages() : 0x" << std::hex << reinterpret_cast<intptr_t>(res) | 
 | 290 |                 << "-0x" << (reinterpret_cast<intptr_t>(res) + num_pages * kPageSize) | 
 | 291 |                 << "(" << std::dec << (num_pages * kPageSize) << ")"; | 
 | 292 |     } | 
 | 293 |     return res; | 
 | 294 |   } | 
 | 295 |  | 
 | 296 |   // Fail. | 
 | 297 |   if (kTraceRosAlloc) { | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 298 |     LOG(INFO) << "RosAlloc::AllocPages() : nullptr"; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 299 |   } | 
 | 300 |   return nullptr; | 
 | 301 | } | 
 | 302 |  | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 303 | size_t RosAlloc::FreePages(Thread* self, void* ptr, bool already_zero) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 304 |   lock_.AssertHeld(self); | 
 | 305 |   size_t pm_idx = ToPageMapIndex(ptr); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 306 |   DCHECK_LT(pm_idx, page_map_size_); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 307 |   uint8_t pm_type = page_map_[pm_idx]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 308 |   DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 309 |   uint8_t pm_part_type; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 310 |   switch (pm_type) { | 
 | 311 |   case kPageMapRun: | 
 | 312 |     pm_part_type = kPageMapRunPart; | 
 | 313 |     break; | 
 | 314 |   case kPageMapLargeObject: | 
 | 315 |     pm_part_type = kPageMapLargeObjectPart; | 
 | 316 |     break; | 
 | 317 |   default: | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 318 |     LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << " : " << "pm_idx=" << pm_idx << ", pm_type=" | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 319 |                << static_cast<int>(pm_type) << ", ptr=" << std::hex | 
 | 320 |                << reinterpret_cast<intptr_t>(ptr); | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 321 |     return 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 322 |   } | 
 | 323 |   // Update the page map and count the number of pages. | 
 | 324 |   size_t num_pages = 1; | 
 | 325 |   page_map_[pm_idx] = kPageMapEmpty; | 
 | 326 |   size_t idx = pm_idx + 1; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 327 |   size_t end = page_map_size_; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 328 |   while (idx < end && page_map_[idx] == pm_part_type) { | 
 | 329 |     page_map_[idx] = kPageMapEmpty; | 
 | 330 |     num_pages++; | 
 | 331 |     idx++; | 
 | 332 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 333 |   const size_t byte_size = num_pages * kPageSize; | 
 | 334 |   if (already_zero) { | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 335 |     if (ShouldCheckZeroMemory()) { | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 336 |       const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(ptr); | 
 | 337 |       for (size_t i = 0; i < byte_size / sizeof(uintptr_t); ++i) { | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 338 |         CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i; | 
 | 339 |       } | 
 | 340 |     } | 
 | 341 |   } else if (!DoesReleaseAllPages()) { | 
 | 342 |     memset(ptr, 0, byte_size); | 
 | 343 |   } | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 344 |  | 
 | 345 |   if (kTraceRosAlloc) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 346 |     LOG(INFO) << __PRETTY_FUNCTION__ << " : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr) | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 347 |               << "-0x" << (reinterpret_cast<intptr_t>(ptr) + byte_size) | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 348 |               << "(" << std::dec << (num_pages * kPageSize) << ")"; | 
 | 349 |   } | 
 | 350 |  | 
 | 351 |   // Turn it into a free run. | 
 | 352 |   FreePageRun* fpr = reinterpret_cast<FreePageRun*>(ptr); | 
 | 353 |   if (kIsDebugBuild) { | 
 | 354 |     fpr->magic_num_ = kMagicNumFree; | 
 | 355 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 356 |   fpr->SetByteSize(this, byte_size); | 
| Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 357 |   DCHECK_ALIGNED(fpr->ByteSize(this), kPageSize); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 358 |  | 
 | 359 |   DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end()); | 
 | 360 |   if (!free_page_runs_.empty()) { | 
 | 361 |     // Try to coalesce in the higher address direction. | 
 | 362 |     if (kTraceRosAlloc) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 363 |       LOG(INFO) << __PRETTY_FUNCTION__ << "RosAlloc::FreePages() : trying to coalesce a free page run 0x" | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 364 |                 << std::hex << reinterpret_cast<uintptr_t>(fpr) << " [" << std::dec << pm_idx << "] -0x" | 
 | 365 |                 << std::hex << reinterpret_cast<uintptr_t>(fpr->End(this)) << " [" << std::dec | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 366 |                 << (fpr->End(this) == End() ? page_map_size_ : ToPageMapIndex(fpr->End(this))) << "]"; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 367 |     } | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 368 |     for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.end(); ) { | 
 | 369 |       FreePageRun* h = *it; | 
 | 370 |       DCHECK_EQ(h->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 371 |       if (kTraceRosAlloc) { | 
 | 372 |         LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a higher free page run 0x" | 
 | 373 |                   << std::hex << reinterpret_cast<uintptr_t>(h) << " [" << std::dec << ToPageMapIndex(h) << "] -0x" | 
 | 374 |                   << std::hex << reinterpret_cast<uintptr_t>(h->End(this)) << " [" << std::dec | 
 | 375 |                   << (h->End(this) == End() ? page_map_size_ : ToPageMapIndex(h->End(this))) << "]"; | 
 | 376 |       } | 
 | 377 |       if (fpr->End(this) == h->Begin()) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 378 |         if (kTraceRosAlloc) { | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 379 |           LOG(INFO) << "Success"; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 380 |         } | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 381 |         // Clear magic num since this is no longer the start of a free page run. | 
 | 382 |         if (kIsDebugBuild) { | 
 | 383 |           h->magic_num_ = 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 384 |         } | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 385 |         it = free_page_runs_.erase(it); | 
 | 386 |         if (kTraceRosAlloc) { | 
 | 387 |           LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex | 
 | 388 |                     << reinterpret_cast<intptr_t>(h) | 
 | 389 |                     << " from free_page_runs_"; | 
 | 390 |         } | 
 | 391 |         fpr->SetByteSize(this, fpr->ByteSize(this) + h->ByteSize(this)); | 
 | 392 |         DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 393 |       } else { | 
 | 394 |         // Not adjacent. Stop. | 
 | 395 |         if (kTraceRosAlloc) { | 
 | 396 |           LOG(INFO) << "Fail"; | 
 | 397 |         } | 
 | 398 |         break; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 399 |       } | 
 | 400 |     } | 
 | 401 |     // Try to coalesce in the lower address direction. | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 402 |     for (auto it = free_page_runs_.upper_bound(fpr); it != free_page_runs_.begin(); ) { | 
 | 403 |       --it; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 404 |  | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 405 |       FreePageRun* l = *it; | 
 | 406 |       DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 407 |       if (kTraceRosAlloc) { | 
 | 408 |         LOG(INFO) << "RosAlloc::FreePages() : trying to coalesce with a lower free page run 0x" | 
 | 409 |                   << std::hex << reinterpret_cast<uintptr_t>(l) << " [" << std::dec << ToPageMapIndex(l) << "] -0x" | 
 | 410 |                   << std::hex << reinterpret_cast<uintptr_t>(l->End(this)) << " [" << std::dec | 
 | 411 |                   << (l->End(this) == End() ? page_map_size_ : ToPageMapIndex(l->End(this))) << "]"; | 
 | 412 |       } | 
 | 413 |       if (l->End(this) == fpr->Begin()) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 414 |         if (kTraceRosAlloc) { | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 415 |           LOG(INFO) << "Success"; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 416 |         } | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 417 |         it = free_page_runs_.erase(it); | 
 | 418 |         if (kTraceRosAlloc) { | 
 | 419 |           LOG(INFO) << "RosAlloc::FreePages() : (coalesce) Erased run 0x" << std::hex | 
 | 420 |                     << reinterpret_cast<intptr_t>(l) | 
 | 421 |                     << " from free_page_runs_"; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 422 |         } | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 423 |         l->SetByteSize(this, l->ByteSize(this) + fpr->ByteSize(this)); | 
 | 424 |         DCHECK_EQ(l->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 425 |         // Clear magic num since this is no longer the start of a free page run. | 
 | 426 |         if (kIsDebugBuild) { | 
 | 427 |           fpr->magic_num_ = 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 428 |         } | 
| Vladimir Marko | e74fe1e | 2016-08-31 18:56:04 +0100 | [diff] [blame] | 429 |         fpr = l; | 
 | 430 |       } else { | 
 | 431 |         // Not adjacent. Stop. | 
 | 432 |         if (kTraceRosAlloc) { | 
 | 433 |           LOG(INFO) << "Fail"; | 
 | 434 |         } | 
 | 435 |         break; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 436 |       } | 
 | 437 |     } | 
 | 438 |   } | 
 | 439 |  | 
 | 440 |   // Insert it. | 
 | 441 |   DCHECK_EQ(fpr->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 442 |   DCHECK(free_page_runs_.find(fpr) == free_page_runs_.end()); | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 443 |   DCHECK(fpr->IsFree()); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 444 |   fpr->ReleasePages(this); | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 445 |   DCHECK(fpr->IsFree()); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 446 |   free_page_runs_.insert(fpr); | 
 | 447 |   DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end()); | 
 | 448 |   if (kTraceRosAlloc) { | 
 | 449 |     LOG(INFO) << "RosAlloc::FreePages() : Inserted run 0x" << std::hex << reinterpret_cast<intptr_t>(fpr) | 
 | 450 |               << " into free_page_runs_"; | 
 | 451 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 452 |   return byte_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 453 | } | 
 | 454 |  | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 455 | void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated, | 
 | 456 |                                  size_t* usable_size, size_t* bytes_tl_bulk_allocated) { | 
 | 457 |   DCHECK(bytes_allocated != nullptr); | 
 | 458 |   DCHECK(usable_size != nullptr); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 459 |   DCHECK_GT(size, kLargeSizeThreshold); | 
| Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 460 |   size_t num_pages = RoundUp(size, kPageSize) / kPageSize; | 
 | 461 |   void* r; | 
 | 462 |   { | 
 | 463 |     MutexLock mu(self, lock_); | 
 | 464 |     r = AllocPages(self, num_pages, kPageMapLargeObject); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 465 |   } | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 466 |   if (UNLIKELY(r == nullptr)) { | 
 | 467 |     if (kTraceRosAlloc) { | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 468 |       LOG(INFO) << "RosAlloc::AllocLargeObject() : nullptr"; | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 469 |     } | 
 | 470 |     return nullptr; | 
 | 471 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 472 |   const size_t total_bytes = num_pages * kPageSize; | 
 | 473 |   *bytes_allocated = total_bytes; | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 474 |   *usable_size = total_bytes; | 
 | 475 |   *bytes_tl_bulk_allocated = total_bytes; | 
| Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 476 |   if (kTraceRosAlloc) { | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 477 |     LOG(INFO) << "RosAlloc::AllocLargeObject() : 0x" << std::hex << reinterpret_cast<intptr_t>(r) | 
 | 478 |               << "-0x" << (reinterpret_cast<intptr_t>(r) + num_pages * kPageSize) | 
 | 479 |               << "(" << std::dec << (num_pages * kPageSize) << ")"; | 
 | 480 |   } | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 481 |   // Check if the returned memory is really all zero. | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 482 |   if (ShouldCheckZeroMemory()) { | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 483 |     CHECK_EQ(total_bytes % sizeof(uintptr_t), 0U); | 
 | 484 |     const uintptr_t* words = reinterpret_cast<uintptr_t*>(r); | 
 | 485 |     for (size_t i = 0; i < total_bytes / sizeof(uintptr_t); ++i) { | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 486 |       CHECK_EQ(words[i], 0U); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 487 |     } | 
 | 488 |   } | 
| Hiroshi Yamauchi | 3c2856e | 2013-11-22 13:42:53 -0800 | [diff] [blame] | 489 |   return r; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 490 | } | 
 | 491 |  | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 492 | size_t RosAlloc::FreeInternal(Thread* self, void* ptr) { | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 493 |   DCHECK_LE(base_, ptr); | 
 | 494 |   DCHECK_LT(ptr, base_ + footprint_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 495 |   size_t pm_idx = RoundDownToPageMapIndex(ptr); | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 496 |   Run* run = nullptr; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 497 |   { | 
 | 498 |     MutexLock mu(self, lock_); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 499 |     DCHECK_LT(pm_idx, page_map_size_); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 500 |     uint8_t page_map_entry = page_map_[pm_idx]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 501 |     if (kTraceRosAlloc) { | 
 | 502 |       LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx | 
 | 503 |                 << ", page_map_entry=" << static_cast<int>(page_map_entry); | 
 | 504 |     } | 
 | 505 |     switch (page_map_[pm_idx]) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 506 |       case kPageMapLargeObject: | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 507 |         return FreePages(self, ptr, false); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 508 |       case kPageMapLargeObjectPart: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 509 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]); | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 510 |         return 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 511 |       case kPageMapRunPart: { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 512 |         // Find the beginning of the run. | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 513 |         do { | 
 | 514 |           --pm_idx; | 
 | 515 |           DCHECK_LT(pm_idx, capacity_ / kPageSize); | 
 | 516 |         } while (page_map_[pm_idx] != kPageMapRun); | 
| Ian Rogers | fc787ec | 2014-10-09 21:56:44 -0700 | [diff] [blame] | 517 |         FALLTHROUGH_INTENDED; | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 518 |       case kPageMapRun: | 
 | 519 |         run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 520 |         DCHECK_EQ(run->magic_num_, kMagicNum); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 521 |         break; | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 522 |       case kPageMapReleased: | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 523 |       case kPageMapEmpty: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 524 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]); | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 525 |         return 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 526 |       } | 
 | 527 |       default: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 528 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]); | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 529 |         return 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 530 |     } | 
 | 531 |   } | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 532 |   DCHECK(run != nullptr); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 533 |   return FreeFromRun(self, ptr, run); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 534 | } | 
 | 535 |  | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 536 | size_t RosAlloc::Free(Thread* self, void* ptr) { | 
| Mathieu Chartier | f5997b4 | 2014-06-20 10:37:54 -0700 | [diff] [blame] | 537 |   ReaderMutexLock rmu(self, bulk_free_lock_); | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 538 |   return FreeInternal(self, ptr); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 539 | } | 
 | 540 |  | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 541 | RosAlloc::Run* RosAlloc::AllocRun(Thread* self, size_t idx) { | 
 | 542 |   RosAlloc::Run* new_run = nullptr; | 
 | 543 |   { | 
 | 544 |     MutexLock mu(self, lock_); | 
 | 545 |     new_run = reinterpret_cast<Run*>(AllocPages(self, numOfPages[idx], kPageMapRun)); | 
 | 546 |   } | 
 | 547 |   if (LIKELY(new_run != nullptr)) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 548 |     if (kIsDebugBuild) { | 
 | 549 |       new_run->magic_num_ = kMagicNum; | 
 | 550 |     } | 
 | 551 |     new_run->size_bracket_idx_ = idx; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 552 |     DCHECK(!new_run->IsThreadLocal()); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 553 |     DCHECK(!new_run->to_be_bulk_freed_); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 554 |     if (kUsePrefetchDuringAllocRun && idx < kNumThreadLocalSizeBrackets) { | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 555 |       // Take ownership of the cache lines if we are likely to be thread local run. | 
 | 556 |       if (kPrefetchNewRunDataByZeroing) { | 
 | 557 |         // Zeroing the data is sometimes faster than prefetching but it increases memory usage | 
 | 558 |         // since we end up dirtying zero pages which may have been madvised. | 
 | 559 |         new_run->ZeroData(); | 
 | 560 |       } else { | 
 | 561 |         const size_t num_of_slots = numOfSlots[idx]; | 
 | 562 |         const size_t bracket_size = bracketSizes[idx]; | 
 | 563 |         const size_t num_of_bytes = num_of_slots * bracket_size; | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 564 |         uint8_t* begin = reinterpret_cast<uint8_t*>(new_run) + headerSizes[idx]; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 565 |         for (size_t i = 0; i < num_of_bytes; i += kPrefetchStride) { | 
 | 566 |           __builtin_prefetch(begin + i); | 
 | 567 |         } | 
 | 568 |       } | 
 | 569 |     } | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 570 |     new_run->InitFreeList(); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 571 |   } | 
 | 572 |   return new_run; | 
 | 573 | } | 
 | 574 |  | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 575 | RosAlloc::Run* RosAlloc::RefillRun(Thread* self, size_t idx) { | 
 | 576 |   // Get the lowest address non-full run from the binary tree. | 
| Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 577 |   auto* const bt = &non_full_runs_[idx]; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 578 |   if (!bt->empty()) { | 
 | 579 |     // If there's one, use it as the current run. | 
 | 580 |     auto it = bt->begin(); | 
 | 581 |     Run* non_full_run = *it; | 
 | 582 |     DCHECK(non_full_run != nullptr); | 
 | 583 |     DCHECK(!non_full_run->IsThreadLocal()); | 
 | 584 |     bt->erase(it); | 
 | 585 |     return non_full_run; | 
 | 586 |   } | 
 | 587 |   // If there's none, allocate a new run and use it as the current run. | 
 | 588 |   return AllocRun(self, idx); | 
 | 589 | } | 
 | 590 |  | 
| Hiroshi Yamauchi | 52cf5c0 | 2014-05-02 12:20:36 -0700 | [diff] [blame] | 591 | inline void* RosAlloc::AllocFromCurrentRunUnlocked(Thread* self, size_t idx) { | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 592 |   Run* current_run = current_runs_[idx]; | 
 | 593 |   DCHECK(current_run != nullptr); | 
 | 594 |   void* slot_addr = current_run->AllocSlot(); | 
 | 595 |   if (UNLIKELY(slot_addr == nullptr)) { | 
 | 596 |     // The current run got full. Try to refill it. | 
 | 597 |     DCHECK(current_run->IsFull()); | 
 | 598 |     if (kIsDebugBuild && current_run != dedicated_full_run_) { | 
 | 599 |       full_runs_[idx].insert(current_run); | 
 | 600 |       if (kTraceRosAlloc) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 601 |         LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex | 
 | 602 |                   << reinterpret_cast<intptr_t>(current_run) | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 603 |                   << " into full_runs_[" << std::dec << idx << "]"; | 
 | 604 |       } | 
 | 605 |       DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end()); | 
 | 606 |       DCHECK(full_runs_[idx].find(current_run) != full_runs_[idx].end()); | 
 | 607 |     } | 
 | 608 |     current_run = RefillRun(self, idx); | 
 | 609 |     if (UNLIKELY(current_run == nullptr)) { | 
 | 610 |       // Failed to allocate a new run, make sure that it is the dedicated full run. | 
 | 611 |       current_runs_[idx] = dedicated_full_run_; | 
 | 612 |       return nullptr; | 
 | 613 |     } | 
 | 614 |     DCHECK(current_run != nullptr); | 
 | 615 |     DCHECK(non_full_runs_[idx].find(current_run) == non_full_runs_[idx].end()); | 
 | 616 |     DCHECK(full_runs_[idx].find(current_run) == full_runs_[idx].end()); | 
 | 617 |     current_run->SetIsThreadLocal(false); | 
 | 618 |     current_runs_[idx] = current_run; | 
 | 619 |     DCHECK(!current_run->IsFull()); | 
 | 620 |     slot_addr = current_run->AllocSlot(); | 
 | 621 |     // Must succeed now with a new run. | 
 | 622 |     DCHECK(slot_addr != nullptr); | 
 | 623 |   } | 
 | 624 |   return slot_addr; | 
 | 625 | } | 
 | 626 |  | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 627 | void* RosAlloc::AllocFromRunThreadUnsafe(Thread* self, size_t size, size_t* bytes_allocated, | 
 | 628 |                                          size_t* usable_size, | 
 | 629 |                                          size_t* bytes_tl_bulk_allocated) { | 
 | 630 |   DCHECK(bytes_allocated != nullptr); | 
 | 631 |   DCHECK(usable_size != nullptr); | 
 | 632 |   DCHECK(bytes_tl_bulk_allocated != nullptr); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 633 |   DCHECK_LE(size, kLargeSizeThreshold); | 
 | 634 |   size_t bracket_size; | 
 | 635 |   size_t idx = SizeToIndexAndBracketSize(size, &bracket_size); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 636 |   Locks::mutator_lock_->AssertExclusiveHeld(self); | 
 | 637 |   void* slot_addr = AllocFromCurrentRunUnlocked(self, idx); | 
 | 638 |   if (LIKELY(slot_addr != nullptr)) { | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 639 |     *bytes_allocated = bracket_size; | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 640 |     *usable_size = bracket_size; | 
 | 641 |     *bytes_tl_bulk_allocated = bracket_size; | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 642 |   } | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 643 |   // Caller verifies that it is all 0. | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 644 |   return slot_addr; | 
 | 645 | } | 
 | 646 |  | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 647 | void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated, | 
 | 648 |                              size_t* usable_size, size_t* bytes_tl_bulk_allocated) { | 
 | 649 |   DCHECK(bytes_allocated != nullptr); | 
 | 650 |   DCHECK(usable_size != nullptr); | 
 | 651 |   DCHECK(bytes_tl_bulk_allocated != nullptr); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 652 |   DCHECK_LE(size, kLargeSizeThreshold); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 653 |   size_t bracket_size; | 
 | 654 |   size_t idx = SizeToIndexAndBracketSize(size, &bracket_size); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 655 |   void* slot_addr; | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 656 |   if (LIKELY(idx < kNumThreadLocalSizeBrackets)) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 657 |     // Use a thread-local run. | 
| Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 658 |     Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx)); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 659 |     // Allow invalid since this will always fail the allocation. | 
| Mathieu Chartier | 4fd2050 | 2014-04-28 09:35:55 -0700 | [diff] [blame] | 660 |     if (kIsDebugBuild) { | 
 | 661 |       // Need the lock to prevent race conditions. | 
 | 662 |       MutexLock mu(self, *size_bracket_locks_[idx]); | 
 | 663 |       CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end()); | 
 | 664 |       CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end()); | 
 | 665 |     } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 666 |     DCHECK(thread_local_run != nullptr); | 
 | 667 |     DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 668 |     slot_addr = thread_local_run->AllocSlot(); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 669 |     // The allocation must fail if the run is invalid. | 
 | 670 |     DCHECK(thread_local_run != dedicated_full_run_ || slot_addr == nullptr) | 
 | 671 |         << "allocated from an invalid run"; | 
 | 672 |     if (UNLIKELY(slot_addr == nullptr)) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 673 |       // The run got full. Try to free slots. | 
 | 674 |       DCHECK(thread_local_run->IsFull()); | 
 | 675 |       MutexLock mu(self, *size_bracket_locks_[idx]); | 
 | 676 |       bool is_all_free_after_merge; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 677 |       // This is safe to do for the dedicated_full_run_ since the bitmaps are empty. | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 678 |       if (thread_local_run->MergeThreadLocalFreeListToFreeList(&is_all_free_after_merge)) { | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 679 |         DCHECK_NE(thread_local_run, dedicated_full_run_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 680 |         // Some slot got freed. Keep it. | 
 | 681 |         DCHECK(!thread_local_run->IsFull()); | 
 | 682 |         DCHECK_EQ(is_all_free_after_merge, thread_local_run->IsAllFree()); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 683 |       } else { | 
 | 684 |         // No slots got freed. Try to refill the thread-local run. | 
 | 685 |         DCHECK(thread_local_run->IsFull()); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 686 |         if (thread_local_run != dedicated_full_run_) { | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 687 |           thread_local_run->SetIsThreadLocal(false); | 
 | 688 |           if (kIsDebugBuild) { | 
 | 689 |             full_runs_[idx].insert(thread_local_run); | 
 | 690 |             if (kTraceRosAlloc) { | 
 | 691 |               LOG(INFO) << "RosAlloc::AllocFromRun() : Inserted run 0x" << std::hex | 
 | 692 |                         << reinterpret_cast<intptr_t>(thread_local_run) | 
 | 693 |                         << " into full_runs_[" << std::dec << idx << "]"; | 
 | 694 |             } | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 695 |           } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 696 |           DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end()); | 
 | 697 |           DCHECK(full_runs_[idx].find(thread_local_run) != full_runs_[idx].end()); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 698 |         } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 699 |  | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 700 |         thread_local_run = RefillRun(self, idx); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 701 |         if (UNLIKELY(thread_local_run == nullptr)) { | 
 | 702 |           self->SetRosAllocRun(idx, dedicated_full_run_); | 
 | 703 |           return nullptr; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 704 |         } | 
 | 705 |         DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end()); | 
 | 706 |         DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end()); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 707 |         thread_local_run->SetIsThreadLocal(true); | 
| Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 708 |         self->SetRosAllocRun(idx, thread_local_run); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 709 |         DCHECK(!thread_local_run->IsFull()); | 
 | 710 |       } | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 711 |       DCHECK(thread_local_run != nullptr); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 712 |       DCHECK(!thread_local_run->IsFull()); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 713 |       DCHECK(thread_local_run->IsThreadLocal()); | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 714 |       // Account for all the free slots in the new or refreshed thread local run. | 
 | 715 |       *bytes_tl_bulk_allocated = thread_local_run->NumberOfFreeSlots() * bracket_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 716 |       slot_addr = thread_local_run->AllocSlot(); | 
 | 717 |       // Must succeed now with a new run. | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 718 |       DCHECK(slot_addr != nullptr); | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 719 |     } else { | 
 | 720 |       // The slot is already counted. Leave it as is. | 
 | 721 |       *bytes_tl_bulk_allocated = 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 722 |     } | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 723 |     DCHECK(slot_addr != nullptr); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 724 |     if (kTraceRosAlloc) { | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 725 |       LOG(INFO) << "RosAlloc::AllocFromRun() thread-local : 0x" << std::hex | 
 | 726 |                 << reinterpret_cast<intptr_t>(slot_addr) | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 727 |                 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size) | 
 | 728 |                 << "(" << std::dec << (bracket_size) << ")"; | 
 | 729 |     } | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 730 |     *bytes_allocated = bracket_size; | 
 | 731 |     *usable_size = bracket_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 732 |   } else { | 
 | 733 |     // Use the (shared) current run. | 
 | 734 |     MutexLock mu(self, *size_bracket_locks_[idx]); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 735 |     slot_addr = AllocFromCurrentRunUnlocked(self, idx); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 736 |     if (kTraceRosAlloc) { | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 737 |       LOG(INFO) << "RosAlloc::AllocFromRun() : 0x" << std::hex | 
 | 738 |                 << reinterpret_cast<intptr_t>(slot_addr) | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 739 |                 << "-0x" << (reinterpret_cast<intptr_t>(slot_addr) + bracket_size) | 
 | 740 |                 << "(" << std::dec << (bracket_size) << ")"; | 
 | 741 |     } | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 742 |     if (LIKELY(slot_addr != nullptr)) { | 
 | 743 |       *bytes_allocated = bracket_size; | 
 | 744 |       *usable_size = bracket_size; | 
 | 745 |       *bytes_tl_bulk_allocated = bracket_size; | 
 | 746 |     } | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 747 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 748 |   // Caller verifies that it is all 0. | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 749 |   return slot_addr; | 
 | 750 | } | 
 | 751 |  | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 752 | size_t RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) { | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 753 |   DCHECK_EQ(run->magic_num_, kMagicNum); | 
 | 754 |   DCHECK_LT(run, ptr); | 
 | 755 |   DCHECK_LT(ptr, run->End()); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 756 |   const size_t idx = run->size_bracket_idx_; | 
 | 757 |   const size_t bracket_size = bracketSizes[idx]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 758 |   bool run_was_full = false; | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 759 |   MutexLock brackets_mu(self, *size_bracket_locks_[idx]); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 760 |   if (kIsDebugBuild) { | 
 | 761 |     run_was_full = run->IsFull(); | 
 | 762 |   } | 
 | 763 |   if (kTraceRosAlloc) { | 
 | 764 |     LOG(INFO) << "RosAlloc::FreeFromRun() : 0x" << std::hex << reinterpret_cast<intptr_t>(ptr); | 
 | 765 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 766 |   if (LIKELY(run->IsThreadLocal())) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 767 |     // It's a thread-local run. Just mark the thread-local free bit map and return. | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 768 |     DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 769 |     DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end()); | 
 | 770 |     DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end()); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 771 |     run->AddToThreadLocalFreeList(ptr); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 772 |     if (kTraceRosAlloc) { | 
 | 773 |       LOG(INFO) << "RosAlloc::FreeFromRun() : Freed a slot in a thread local run 0x" << std::hex | 
 | 774 |                 << reinterpret_cast<intptr_t>(run); | 
 | 775 |     } | 
 | 776 |     // A thread local run will be kept as a thread local even if it's become all free. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 777 |     return bracket_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 778 |   } | 
 | 779 |   // Free the slot in the run. | 
 | 780 |   run->FreeSlot(ptr); | 
| Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 781 |   auto* non_full_runs = &non_full_runs_[idx]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 782 |   if (run->IsAllFree()) { | 
 | 783 |     // It has just become completely free. Free the pages of this run. | 
 | 784 |     std::set<Run*>::iterator pos = non_full_runs->find(run); | 
 | 785 |     if (pos != non_full_runs->end()) { | 
 | 786 |       non_full_runs->erase(pos); | 
 | 787 |       if (kTraceRosAlloc) { | 
 | 788 |         LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex | 
 | 789 |                   << reinterpret_cast<intptr_t>(run) << " from non_full_runs_"; | 
 | 790 |       } | 
 | 791 |     } | 
 | 792 |     if (run == current_runs_[idx]) { | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 793 |       current_runs_[idx] = dedicated_full_run_; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 794 |     } | 
 | 795 |     DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end()); | 
 | 796 |     DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end()); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 797 |     run->ZeroHeaderAndSlotHeaders(); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 798 |     { | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 799 |       MutexLock lock_mu(self, lock_); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 800 |       FreePages(self, run, true); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 801 |     } | 
 | 802 |   } else { | 
 | 803 |     // It is not completely free. If it wasn't the current run or | 
 | 804 |     // already in the non-full run set (i.e., it was full) insert it | 
 | 805 |     // into the non-full run set. | 
 | 806 |     if (run != current_runs_[idx]) { | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 807 |       auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr; | 
| Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 808 |       auto pos = non_full_runs->find(run); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 809 |       if (pos == non_full_runs->end()) { | 
 | 810 |         DCHECK(run_was_full); | 
 | 811 |         DCHECK(full_runs->find(run) != full_runs->end()); | 
 | 812 |         if (kIsDebugBuild) { | 
 | 813 |           full_runs->erase(run); | 
 | 814 |           if (kTraceRosAlloc) { | 
 | 815 |             LOG(INFO) << "RosAlloc::FreeFromRun() : Erased run 0x" << std::hex | 
 | 816 |                       << reinterpret_cast<intptr_t>(run) << " from full_runs_"; | 
 | 817 |           } | 
 | 818 |         } | 
 | 819 |         non_full_runs->insert(run); | 
 | 820 |         DCHECK(!run->IsFull()); | 
 | 821 |         if (kTraceRosAlloc) { | 
 | 822 |           LOG(INFO) << "RosAlloc::FreeFromRun() : Inserted run 0x" << std::hex | 
 | 823 |                     << reinterpret_cast<intptr_t>(run) | 
 | 824 |                     << " into non_full_runs_[" << std::dec << idx << "]"; | 
 | 825 |         } | 
 | 826 |       } | 
 | 827 |     } | 
 | 828 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 829 |   return bracket_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 830 | } | 
 | 831 |  | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 832 | template<bool kUseTail> | 
 | 833 | std::string RosAlloc::Run::FreeListToStr(SlotFreeList<kUseTail>* free_list) { | 
 | 834 |   std::string free_list_str; | 
 | 835 |   const uint8_t idx = size_bracket_idx_; | 
 | 836 |   const size_t bracket_size = bracketSizes[idx]; | 
 | 837 |   for (Slot* slot = free_list->Head(); slot != nullptr; slot = slot->Next()) { | 
 | 838 |     bool is_last = slot->Next() == nullptr; | 
 | 839 |     uintptr_t slot_offset = reinterpret_cast<uintptr_t>(slot) - | 
 | 840 |         reinterpret_cast<uintptr_t>(FirstSlot()); | 
 | 841 |     DCHECK_EQ(slot_offset % bracket_size, 0U); | 
 | 842 |     uintptr_t slot_idx = slot_offset / bracket_size; | 
 | 843 |     if (!is_last) { | 
 | 844 |       free_list_str.append(StringPrintf("%u-", static_cast<uint32_t>(slot_idx))); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 845 |     } else { | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 846 |       free_list_str.append(StringPrintf("%u", static_cast<uint32_t>(slot_idx))); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 847 |     } | 
 | 848 |   } | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 849 |   return free_list_str; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 850 | } | 
 | 851 |  | 
 | 852 | std::string RosAlloc::Run::Dump() { | 
 | 853 |   size_t idx = size_bracket_idx_; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 854 |   std::ostringstream stream; | 
 | 855 |   stream << "RosAlloc Run = " << reinterpret_cast<void*>(this) | 
 | 856 |          << "{ magic_num=" << static_cast<int>(magic_num_) | 
 | 857 |          << " size_bracket_idx=" << idx | 
 | 858 |          << " is_thread_local=" << static_cast<int>(is_thread_local_) | 
 | 859 |          << " to_be_bulk_freed=" << static_cast<int>(to_be_bulk_freed_) | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 860 |          << " free_list=" << FreeListToStr(&free_list_) | 
 | 861 |          << " bulk_free_list=" << FreeListToStr(&bulk_free_list_) | 
 | 862 |          << " thread_local_list=" << FreeListToStr(&thread_local_free_list_) | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 863 |          << " }" << std::endl; | 
 | 864 |   return stream.str(); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 865 | } | 
 | 866 |  | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 867 | void RosAlloc::Run::FreeSlot(void* ptr) { | 
 | 868 |   DCHECK(!IsThreadLocal()); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 869 |   const uint8_t idx = size_bracket_idx_; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 870 |   const size_t bracket_size = bracketSizes[idx]; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 871 |   Slot* slot = ToSlot(ptr); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 872 |   // Zero out the memory. | 
 | 873 |   // TODO: Investigate alternate memset since ptr is guaranteed to be aligned to 16. | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 874 |   memset(slot, 0, bracket_size); | 
 | 875 |   free_list_.Add(slot); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 876 |   if (kTraceRosAlloc) { | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 877 |     LOG(INFO) << "RosAlloc::Run::FreeSlot() : " << slot | 
 | 878 |               << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 879 |   } | 
 | 880 | } | 
 | 881 |  | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 882 | inline bool RosAlloc::Run::MergeThreadLocalFreeListToFreeList(bool* is_all_free_after_out) { | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 883 |   DCHECK(IsThreadLocal()); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 884 |   // Merge the thread local free list into the free list and clear the thread local free list. | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 885 |   const uint8_t idx = size_bracket_idx_; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 886 |   bool thread_local_free_list_size = thread_local_free_list_.Size(); | 
 | 887 |   const size_t size_before = free_list_.Size(); | 
 | 888 |   free_list_.Merge(&thread_local_free_list_); | 
 | 889 |   const size_t size_after = free_list_.Size(); | 
 | 890 |   DCHECK_EQ(size_before < size_after, thread_local_free_list_size > 0); | 
 | 891 |   DCHECK_LE(size_before, size_after); | 
 | 892 |   *is_all_free_after_out = free_list_.Size() == numOfSlots[idx]; | 
 | 893 |   // Return true at least one slot was added to the free list. | 
 | 894 |   return size_before < size_after; | 
 | 895 | } | 
 | 896 |  | 
 | 897 | inline void RosAlloc::Run::MergeBulkFreeListToFreeList() { | 
 | 898 |   DCHECK(!IsThreadLocal()); | 
 | 899 |   // Merge the bulk free list into the free list and clear the bulk free list. | 
 | 900 |   free_list_.Merge(&bulk_free_list_); | 
 | 901 | } | 
 | 902 |  | 
 | 903 | inline void RosAlloc::Run::MergeBulkFreeListToThreadLocalFreeList() { | 
 | 904 |   DCHECK(IsThreadLocal()); | 
 | 905 |   // Merge the bulk free list into the thread local free list and clear the bulk free list. | 
 | 906 |   thread_local_free_list_.Merge(&bulk_free_list_); | 
 | 907 | } | 
 | 908 |  | 
 | 909 | inline void RosAlloc::Run::AddToThreadLocalFreeList(void* ptr) { | 
 | 910 |   DCHECK(IsThreadLocal()); | 
 | 911 |   AddToFreeListShared(ptr, &thread_local_free_list_, __FUNCTION__); | 
 | 912 | } | 
 | 913 |  | 
 | 914 | inline size_t RosAlloc::Run::AddToBulkFreeList(void* ptr) { | 
 | 915 |   return AddToFreeListShared(ptr, &bulk_free_list_, __FUNCTION__); | 
 | 916 | } | 
 | 917 |  | 
 | 918 | inline size_t RosAlloc::Run::AddToFreeListShared(void* ptr, | 
 | 919 |                                                  SlotFreeList<true>* free_list, | 
 | 920 |                                                  const char* caller_name) { | 
 | 921 |   const uint8_t idx = size_bracket_idx_; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 922 |   const size_t bracket_size = bracketSizes[idx]; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 923 |   Slot* slot = ToSlot(ptr); | 
 | 924 |   memset(slot, 0, bracket_size); | 
 | 925 |   free_list->Add(slot); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 926 |   if (kTraceRosAlloc) { | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 927 |     LOG(INFO) << "RosAlloc::Run::" << caller_name << "() : " << ptr | 
 | 928 |               << ", bracket_size=" << std::dec << bracket_size << ", slot_idx=" << SlotIndex(slot); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 929 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 930 |   return bracket_size; | 
 | 931 | } | 
 | 932 |  | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 933 | inline void RosAlloc::Run::ZeroHeaderAndSlotHeaders() { | 
 | 934 |   DCHECK(IsAllFree()); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 935 |   const uint8_t idx = size_bracket_idx_; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 936 |   // Zero the slot header (next pointers). | 
 | 937 |   for (Slot* slot = free_list_.Head(); slot != nullptr; ) { | 
 | 938 |     Slot* next_slot = slot->Next(); | 
 | 939 |     slot->Clear(); | 
 | 940 |     slot = next_slot; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 941 |   } | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 942 |   // Zero the header. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 943 |   memset(this, 0, headerSizes[idx]); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 944 |   // Check that the entire run is all zero. | 
 | 945 |   if (kIsDebugBuild) { | 
 | 946 |     const size_t size = numOfPages[idx] * kPageSize; | 
 | 947 |     const uintptr_t* word_ptr = reinterpret_cast<uintptr_t*>(this); | 
 | 948 |     for (size_t i = 0; i < size / sizeof(uintptr_t); ++i) { | 
 | 949 |       CHECK_EQ(word_ptr[i], 0U) << "words don't match at index " << i; | 
 | 950 |     } | 
 | 951 |   } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 952 | } | 
 | 953 |  | 
 | 954 | inline void RosAlloc::Run::ZeroData() { | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 955 |   const uint8_t idx = size_bracket_idx_; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 956 |   uint8_t* slot_begin = reinterpret_cast<uint8_t*>(FirstSlot()); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 957 |   memset(slot_begin, 0, numOfSlots[idx] * bracketSizes[idx]); | 
 | 958 | } | 
 | 959 |  | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 960 | void RosAlloc::Run::InspectAllSlots(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), | 
 | 961 |                                     void* arg) { | 
 | 962 |   size_t idx = size_bracket_idx_; | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 963 |   uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 964 |   size_t num_slots = numOfSlots[idx]; | 
 | 965 |   size_t bracket_size = IndexToBracketSize(idx); | 
| Mathieu Chartier | c38c5ea | 2015-02-04 17:46:29 -0800 | [diff] [blame] | 966 |   DCHECK_EQ(slot_base + num_slots * bracket_size, | 
 | 967 |             reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 968 |   // Free slots are on the free list and the allocated/used slots are not. We traverse the free list | 
 | 969 |   // to find out and record which slots are free in the is_free array. | 
 | 970 |   std::unique_ptr<bool[]> is_free(new bool[num_slots]());  // zero initialized | 
 | 971 |   for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) { | 
 | 972 |     size_t slot_idx = SlotIndex(slot); | 
 | 973 |     DCHECK_LT(slot_idx, num_slots); | 
 | 974 |     is_free[slot_idx] = true; | 
 | 975 |   } | 
 | 976 |   if (IsThreadLocal()) { | 
 | 977 |     for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) { | 
 | 978 |       size_t slot_idx = SlotIndex(slot); | 
 | 979 |       DCHECK_LT(slot_idx, num_slots); | 
 | 980 |       is_free[slot_idx] = true; | 
| Mathieu Chartier | c38c5ea | 2015-02-04 17:46:29 -0800 | [diff] [blame] | 981 |     } | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 982 |   } | 
 | 983 |   for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) { | 
 | 984 |     uint8_t* slot_addr = slot_base + slot_idx * bracket_size; | 
 | 985 |     if (!is_free[slot_idx]) { | 
 | 986 |       handler(slot_addr, slot_addr + bracket_size, bracket_size, arg); | 
 | 987 |     } else { | 
 | 988 |       handler(slot_addr, slot_addr + bracket_size, 0, arg); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 989 |     } | 
 | 990 |   } | 
 | 991 | } | 
 | 992 |  | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 993 | // If true, read the page map entries in BulkFree() without using the | 
 | 994 | // lock for better performance, assuming that the existence of an | 
 | 995 | // allocated chunk/pointer being freed in BulkFree() guarantees that | 
 | 996 | // the page map entry won't change. Disabled for now. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 997 | static constexpr bool kReadPageMapEntryWithoutLockInBulkFree = true; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 998 |  | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 999 | size_t RosAlloc::BulkFree(Thread* self, void** ptrs, size_t num_ptrs) { | 
 | 1000 |   size_t freed_bytes = 0; | 
| Ian Rogers | cf7f191 | 2014-10-22 22:06:39 -0700 | [diff] [blame] | 1001 |   if ((false)) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1002 |     // Used only to test Free() as GC uses only BulkFree(). | 
 | 1003 |     for (size_t i = 0; i < num_ptrs; ++i) { | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1004 |       freed_bytes += FreeInternal(self, ptrs[i]); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1005 |     } | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1006 |     return freed_bytes; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1007 |   } | 
 | 1008 |  | 
 | 1009 |   WriterMutexLock wmu(self, bulk_free_lock_); | 
 | 1010 |  | 
 | 1011 |   // First mark slots to free in the bulk free bit map without locking the | 
| Ian Rogers | 5fcfa7d | 2014-05-15 11:43:06 -0700 | [diff] [blame] | 1012 |   // size bracket locks. On host, unordered_set is faster than vector + flag. | 
| Bilyan Borisov | bb661c0 | 2016-04-04 16:27:32 +0100 | [diff] [blame] | 1013 | #ifdef ART_TARGET_ANDROID | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1014 |   std::vector<Run*> runs; | 
 | 1015 | #else | 
| Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 1016 |   std::unordered_set<Run*, hash_run, eq_run> runs; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1017 | #endif | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1018 |   for (size_t i = 0; i < num_ptrs; i++) { | 
 | 1019 |     void* ptr = ptrs[i]; | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1020 |     DCHECK_LE(base_, ptr); | 
 | 1021 |     DCHECK_LT(ptr, base_ + footprint_); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1022 |     size_t pm_idx = RoundDownToPageMapIndex(ptr); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1023 |     Run* run = nullptr; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1024 |     if (kReadPageMapEntryWithoutLockInBulkFree) { | 
 | 1025 |       // Read the page map entries without locking the lock. | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1026 |       uint8_t page_map_entry = page_map_[pm_idx]; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1027 |       if (kTraceRosAlloc) { | 
 | 1028 |         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx=" | 
 | 1029 |                   << std::dec << pm_idx | 
 | 1030 |                   << ", page_map_entry=" << static_cast<int>(page_map_entry); | 
 | 1031 |       } | 
 | 1032 |       if (LIKELY(page_map_entry == kPageMapRun)) { | 
 | 1033 |         run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1034 |       } else if (LIKELY(page_map_entry == kPageMapRunPart)) { | 
 | 1035 |         size_t pi = pm_idx; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1036 |         // Find the beginning of the run. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1037 |         do { | 
 | 1038 |           --pi; | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1039 |           DCHECK_LT(pi, capacity_ / kPageSize); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1040 |         } while (page_map_[pi] != kPageMapRun); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1041 |         run = reinterpret_cast<Run*>(base_ + pi * kPageSize); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1042 |       } else if (page_map_entry == kPageMapLargeObject) { | 
 | 1043 |         MutexLock mu(self, lock_); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1044 |         freed_bytes += FreePages(self, ptr, false); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1045 |         continue; | 
 | 1046 |       } else { | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1047 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1048 |       } | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1049 |     } else { | 
 | 1050 |       // Read the page map entries with a lock. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1051 |       MutexLock mu(self, lock_); | 
 | 1052 |       DCHECK_LT(pm_idx, page_map_size_); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1053 |       uint8_t page_map_entry = page_map_[pm_idx]; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1054 |       if (kTraceRosAlloc) { | 
 | 1055 |         LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx=" | 
 | 1056 |                   << std::dec << pm_idx | 
 | 1057 |                   << ", page_map_entry=" << static_cast<int>(page_map_entry); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1058 |       } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1059 |       if (LIKELY(page_map_entry == kPageMapRun)) { | 
 | 1060 |         run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize); | 
 | 1061 |       } else if (LIKELY(page_map_entry == kPageMapRunPart)) { | 
 | 1062 |         size_t pi = pm_idx; | 
 | 1063 |         // Find the beginning of the run. | 
 | 1064 |         do { | 
 | 1065 |           --pi; | 
 | 1066 |           DCHECK_LT(pi, capacity_ / kPageSize); | 
 | 1067 |         } while (page_map_[pi] != kPageMapRun); | 
 | 1068 |         run = reinterpret_cast<Run*>(base_ + pi * kPageSize); | 
 | 1069 |       } else if (page_map_entry == kPageMapLargeObject) { | 
 | 1070 |         freed_bytes += FreePages(self, ptr, false); | 
 | 1071 |         continue; | 
 | 1072 |       } else { | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1073 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_entry); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1074 |       } | 
 | 1075 |     } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1076 |     DCHECK(run != nullptr); | 
 | 1077 |     DCHECK_EQ(run->magic_num_, kMagicNum); | 
 | 1078 |     // Set the bit in the bulk free bit map. | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1079 |     freed_bytes += run->AddToBulkFreeList(ptr); | 
| Bilyan Borisov | bb661c0 | 2016-04-04 16:27:32 +0100 | [diff] [blame] | 1080 | #ifdef ART_TARGET_ANDROID | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1081 |     if (!run->to_be_bulk_freed_) { | 
 | 1082 |       run->to_be_bulk_freed_ = true; | 
 | 1083 |       runs.push_back(run); | 
 | 1084 |     } | 
 | 1085 | #else | 
 | 1086 |     runs.insert(run); | 
 | 1087 | #endif | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1088 |   } | 
 | 1089 |  | 
 | 1090 |   // Now, iterate over the affected runs and update the alloc bit map | 
 | 1091 |   // based on the bulk free bit map (for non-thread-local runs) and | 
 | 1092 |   // union the bulk free bit map into the thread-local free bit map | 
 | 1093 |   // (for thread-local runs.) | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1094 |   for (Run* run : runs) { | 
| Bilyan Borisov | bb661c0 | 2016-04-04 16:27:32 +0100 | [diff] [blame] | 1095 | #ifdef ART_TARGET_ANDROID | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1096 |     DCHECK(run->to_be_bulk_freed_); | 
 | 1097 |     run->to_be_bulk_freed_ = false; | 
 | 1098 | #endif | 
 | 1099 |     size_t idx = run->size_bracket_idx_; | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1100 |     MutexLock brackets_mu(self, *size_bracket_locks_[idx]); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1101 |     if (run->IsThreadLocal()) { | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1102 |       DCHECK_LT(run->size_bracket_idx_, kNumThreadLocalSizeBrackets); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1103 |       DCHECK(non_full_runs_[idx].find(run) == non_full_runs_[idx].end()); | 
 | 1104 |       DCHECK(full_runs_[idx].find(run) == full_runs_[idx].end()); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1105 |       run->MergeBulkFreeListToThreadLocalFreeList(); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1106 |       if (kTraceRosAlloc) { | 
 | 1107 |         LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a thread local run 0x" | 
 | 1108 |                   << std::hex << reinterpret_cast<intptr_t>(run); | 
 | 1109 |       } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1110 |       DCHECK(run->IsThreadLocal()); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1111 |       // A thread local run will be kept as a thread local even if | 
 | 1112 |       // it's become all free. | 
 | 1113 |     } else { | 
 | 1114 |       bool run_was_full = run->IsFull(); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1115 |       run->MergeBulkFreeListToFreeList(); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1116 |       if (kTraceRosAlloc) { | 
 | 1117 |         LOG(INFO) << "RosAlloc::BulkFree() : Freed slot(s) in a run 0x" << std::hex | 
 | 1118 |                   << reinterpret_cast<intptr_t>(run); | 
 | 1119 |       } | 
 | 1120 |       // Check if the run should be moved to non_full_runs_ or | 
 | 1121 |       // free_page_runs_. | 
| Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 1122 |       auto* non_full_runs = &non_full_runs_[idx]; | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1123 |       auto* full_runs = kIsDebugBuild ? &full_runs_[idx] : nullptr; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1124 |       if (run->IsAllFree()) { | 
 | 1125 |         // It has just become completely free. Free the pages of the | 
 | 1126 |         // run. | 
 | 1127 |         bool run_was_current = run == current_runs_[idx]; | 
 | 1128 |         if (run_was_current) { | 
 | 1129 |           DCHECK(full_runs->find(run) == full_runs->end()); | 
 | 1130 |           DCHECK(non_full_runs->find(run) == non_full_runs->end()); | 
 | 1131 |           // If it was a current run, reuse it. | 
 | 1132 |         } else if (run_was_full) { | 
 | 1133 |           // If it was full, remove it from the full run set (debug | 
 | 1134 |           // only.) | 
 | 1135 |           if (kIsDebugBuild) { | 
| Ian Rogers | 700a402 | 2014-05-19 16:49:03 -0700 | [diff] [blame] | 1136 |             std::unordered_set<Run*, hash_run, eq_run>::iterator pos = full_runs->find(run); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1137 |             DCHECK(pos != full_runs->end()); | 
 | 1138 |             full_runs->erase(pos); | 
 | 1139 |             if (kTraceRosAlloc) { | 
 | 1140 |               LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex | 
 | 1141 |                         << reinterpret_cast<intptr_t>(run) | 
 | 1142 |                         << " from full_runs_"; | 
 | 1143 |             } | 
 | 1144 |             DCHECK(full_runs->find(run) == full_runs->end()); | 
 | 1145 |           } | 
 | 1146 |         } else { | 
 | 1147 |           // If it was in a non full run set, remove it from the set. | 
 | 1148 |           DCHECK(full_runs->find(run) == full_runs->end()); | 
 | 1149 |           DCHECK(non_full_runs->find(run) != non_full_runs->end()); | 
 | 1150 |           non_full_runs->erase(run); | 
 | 1151 |           if (kTraceRosAlloc) { | 
 | 1152 |             LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex | 
 | 1153 |                       << reinterpret_cast<intptr_t>(run) | 
 | 1154 |                       << " from non_full_runs_"; | 
 | 1155 |           } | 
 | 1156 |           DCHECK(non_full_runs->find(run) == non_full_runs->end()); | 
 | 1157 |         } | 
 | 1158 |         if (!run_was_current) { | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1159 |           run->ZeroHeaderAndSlotHeaders(); | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1160 |           MutexLock lock_mu(self, lock_); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1161 |           FreePages(self, run, true); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1162 |         } | 
 | 1163 |       } else { | 
 | 1164 |         // It is not completely free. If it wasn't the current run or | 
 | 1165 |         // already in the non-full run set (i.e., it was full) insert | 
 | 1166 |         // it into the non-full run set. | 
 | 1167 |         if (run == current_runs_[idx]) { | 
 | 1168 |           DCHECK(non_full_runs->find(run) == non_full_runs->end()); | 
 | 1169 |           DCHECK(full_runs->find(run) == full_runs->end()); | 
 | 1170 |           // If it was a current run, keep it. | 
 | 1171 |         } else if (run_was_full) { | 
 | 1172 |           // If it was full, remove it from the full run set (debug | 
 | 1173 |           // only) and insert into the non-full run set. | 
 | 1174 |           DCHECK(full_runs->find(run) != full_runs->end()); | 
 | 1175 |           DCHECK(non_full_runs->find(run) == non_full_runs->end()); | 
 | 1176 |           if (kIsDebugBuild) { | 
 | 1177 |             full_runs->erase(run); | 
 | 1178 |             if (kTraceRosAlloc) { | 
 | 1179 |               LOG(INFO) << "RosAlloc::BulkFree() : Erased run 0x" << std::hex | 
 | 1180 |                         << reinterpret_cast<intptr_t>(run) | 
 | 1181 |                         << " from full_runs_"; | 
 | 1182 |             } | 
 | 1183 |           } | 
 | 1184 |           non_full_runs->insert(run); | 
 | 1185 |           if (kTraceRosAlloc) { | 
 | 1186 |             LOG(INFO) << "RosAlloc::BulkFree() : Inserted run 0x" << std::hex | 
 | 1187 |                       << reinterpret_cast<intptr_t>(run) | 
 | 1188 |                       << " into non_full_runs_[" << std::dec << idx; | 
 | 1189 |           } | 
 | 1190 |         } else { | 
 | 1191 |           // If it was not full, so leave it in the non full run set. | 
 | 1192 |           DCHECK(full_runs->find(run) == full_runs->end()); | 
 | 1193 |           DCHECK(non_full_runs->find(run) != non_full_runs->end()); | 
 | 1194 |         } | 
 | 1195 |       } | 
 | 1196 |     } | 
 | 1197 |   } | 
| Mathieu Chartier | 8585bad | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1198 |   return freed_bytes; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1199 | } | 
 | 1200 |  | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1201 | std::string RosAlloc::DumpPageMap() { | 
 | 1202 |   std::ostringstream stream; | 
 | 1203 |   stream << "RosAlloc PageMap: " << std::endl; | 
 | 1204 |   lock_.AssertHeld(Thread::Current()); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1205 |   size_t end = page_map_size_; | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1206 |   FreePageRun* curr_fpr = nullptr; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1207 |   size_t curr_fpr_size = 0; | 
 | 1208 |   size_t remaining_curr_fpr_size = 0; | 
 | 1209 |   size_t num_running_empty_pages = 0; | 
 | 1210 |   for (size_t i = 0; i < end; ++i) { | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1211 |     uint8_t pm = page_map_[i]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1212 |     switch (pm) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1213 |       case kPageMapReleased: | 
 | 1214 |         // Fall-through. | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1215 |       case kPageMapEmpty: { | 
 | 1216 |         FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize); | 
 | 1217 |         if (free_page_runs_.find(fpr) != free_page_runs_.end()) { | 
 | 1218 |           // Encountered a fresh free page run. | 
 | 1219 |           DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0)); | 
 | 1220 |           DCHECK(fpr->IsFree()); | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1221 |           DCHECK(curr_fpr == nullptr); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1222 |           DCHECK_EQ(curr_fpr_size, static_cast<size_t>(0)); | 
 | 1223 |           curr_fpr = fpr; | 
 | 1224 |           curr_fpr_size = fpr->ByteSize(this); | 
 | 1225 |           DCHECK_EQ(curr_fpr_size % kPageSize, static_cast<size_t>(0)); | 
 | 1226 |           remaining_curr_fpr_size = curr_fpr_size - kPageSize; | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1227 |           stream << "[" << i << "]=" << (pm == kPageMapReleased ? "Released" : "Empty") | 
 | 1228 |                  << " (FPR start) fpr_size=" << curr_fpr_size | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1229 |                  << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1230 |           if (remaining_curr_fpr_size == 0) { | 
 | 1231 |             // Reset at the end of the current free page run. | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1232 |             curr_fpr = nullptr; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1233 |             curr_fpr_size = 0; | 
 | 1234 |           } | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1235 |           stream << "curr_fpr=0x" << std::hex << reinterpret_cast<intptr_t>(curr_fpr) << std::endl; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1236 |           DCHECK_EQ(num_running_empty_pages, static_cast<size_t>(0)); | 
 | 1237 |         } else { | 
 | 1238 |           // Still part of the current free page run. | 
 | 1239 |           DCHECK_NE(num_running_empty_pages, static_cast<size_t>(0)); | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1240 |           DCHECK(curr_fpr != nullptr && curr_fpr_size > 0 && remaining_curr_fpr_size > 0); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1241 |           DCHECK_EQ(remaining_curr_fpr_size % kPageSize, static_cast<size_t>(0)); | 
 | 1242 |           DCHECK_GE(remaining_curr_fpr_size, static_cast<size_t>(kPageSize)); | 
 | 1243 |           remaining_curr_fpr_size -= kPageSize; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1244 |           stream << "[" << i << "]=Empty (FPR part)" | 
 | 1245 |                  << " remaining_fpr_size=" << remaining_curr_fpr_size << std::endl; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1246 |           if (remaining_curr_fpr_size == 0) { | 
 | 1247 |             // Reset at the end of the current free page run. | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1248 |             curr_fpr = nullptr; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1249 |             curr_fpr_size = 0; | 
 | 1250 |           } | 
 | 1251 |         } | 
 | 1252 |         num_running_empty_pages++; | 
 | 1253 |         break; | 
 | 1254 |       } | 
 | 1255 |       case kPageMapLargeObject: { | 
 | 1256 |         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0)); | 
 | 1257 |         num_running_empty_pages = 0; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1258 |         stream << "[" << i << "]=Large (start)" << std::endl; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1259 |         break; | 
 | 1260 |       } | 
 | 1261 |       case kPageMapLargeObjectPart: | 
 | 1262 |         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0)); | 
 | 1263 |         num_running_empty_pages = 0; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1264 |         stream << "[" << i << "]=Large (part)" << std::endl; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1265 |         break; | 
 | 1266 |       case kPageMapRun: { | 
 | 1267 |         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0)); | 
 | 1268 |         num_running_empty_pages = 0; | 
 | 1269 |         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize); | 
 | 1270 |         size_t idx = run->size_bracket_idx_; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1271 |         stream << "[" << i << "]=Run (start)" | 
 | 1272 |                << " idx=" << idx | 
 | 1273 |                << " numOfPages=" << numOfPages[idx] | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1274 |                << " is_thread_local=" << run->is_thread_local_ | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1275 |                << " is_all_free=" << (run->IsAllFree() ? 1 : 0) | 
 | 1276 |                << std::endl; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1277 |         break; | 
 | 1278 |       } | 
 | 1279 |       case kPageMapRunPart: | 
 | 1280 |         DCHECK_EQ(remaining_curr_fpr_size, static_cast<size_t>(0)); | 
 | 1281 |         num_running_empty_pages = 0; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1282 |         stream << "[" << i << "]=Run (part)" << std::endl; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1283 |         break; | 
 | 1284 |       default: | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1285 |         stream << "[" << i << "]=Unrecognizable page map type: " << pm; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1286 |         break; | 
 | 1287 |     } | 
 | 1288 |   } | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1289 |   return stream.str(); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1290 | } | 
 | 1291 |  | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 1292 | size_t RosAlloc::UsableSize(const void* ptr) { | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1293 |   DCHECK_LE(base_, ptr); | 
 | 1294 |   DCHECK_LT(ptr, base_ + footprint_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1295 |   size_t pm_idx = RoundDownToPageMapIndex(ptr); | 
 | 1296 |   MutexLock mu(Thread::Current(), lock_); | 
 | 1297 |   switch (page_map_[pm_idx]) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1298 |     case kPageMapReleased: | 
 | 1299 |       // Fall-through. | 
 | 1300 |     case kPageMapEmpty: | 
 | 1301 |       LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr=" | 
 | 1302 |                  << std::hex << reinterpret_cast<intptr_t>(ptr); | 
 | 1303 |       break; | 
 | 1304 |     case kPageMapLargeObject: { | 
 | 1305 |       size_t num_pages = 1; | 
 | 1306 |       size_t idx = pm_idx + 1; | 
 | 1307 |       size_t end = page_map_size_; | 
 | 1308 |       while (idx < end && page_map_[idx] == kPageMapLargeObjectPart) { | 
 | 1309 |         num_pages++; | 
 | 1310 |         idx++; | 
 | 1311 |       } | 
 | 1312 |       return num_pages * kPageSize; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1313 |     } | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1314 |     case kPageMapLargeObjectPart: | 
 | 1315 |       LOG(FATAL) << "Unreachable - " << __PRETTY_FUNCTION__ << ": pm_idx=" << pm_idx << ", ptr=" | 
 | 1316 |                  << std::hex << reinterpret_cast<intptr_t>(ptr); | 
 | 1317 |       break; | 
 | 1318 |     case kPageMapRun: | 
 | 1319 |     case kPageMapRunPart: { | 
 | 1320 |       // Find the beginning of the run. | 
 | 1321 |       while (page_map_[pm_idx] != kPageMapRun) { | 
 | 1322 |         pm_idx--; | 
 | 1323 |         DCHECK_LT(pm_idx, capacity_ / kPageSize); | 
 | 1324 |       } | 
 | 1325 |       DCHECK_EQ(page_map_[pm_idx], kPageMapRun); | 
 | 1326 |       Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize); | 
 | 1327 |       DCHECK_EQ(run->magic_num_, kMagicNum); | 
 | 1328 |       size_t idx = run->size_bracket_idx_; | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 1329 |       size_t offset_from_slot_base = reinterpret_cast<const uint8_t*>(ptr) | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1330 |           - (reinterpret_cast<uint8_t*>(run) + headerSizes[idx]); | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1331 |       DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0)); | 
 | 1332 |       return IndexToBracketSize(idx); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1333 |     } | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1334 |     default: { | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1335 |       LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(page_map_[pm_idx]); | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1336 |       break; | 
 | 1337 |     } | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1338 |   } | 
 | 1339 |   return 0; | 
 | 1340 | } | 
 | 1341 |  | 
 | 1342 | bool RosAlloc::Trim() { | 
 | 1343 |   MutexLock mu(Thread::Current(), lock_); | 
 | 1344 |   FreePageRun* last_free_page_run; | 
 | 1345 |   DCHECK_EQ(footprint_ % kPageSize, static_cast<size_t>(0)); | 
 | 1346 |   auto it = free_page_runs_.rbegin(); | 
 | 1347 |   if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) { | 
 | 1348 |     // Remove the last free page run, if any. | 
 | 1349 |     DCHECK(last_free_page_run->IsFree()); | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1350 |     DCHECK(IsFreePage(ToPageMapIndex(last_free_page_run))); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1351 |     DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0)); | 
 | 1352 |     DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_); | 
 | 1353 |     free_page_runs_.erase(last_free_page_run); | 
 | 1354 |     size_t decrement = last_free_page_run->ByteSize(this); | 
 | 1355 |     size_t new_footprint = footprint_ - decrement; | 
 | 1356 |     DCHECK_EQ(new_footprint % kPageSize, static_cast<size_t>(0)); | 
 | 1357 |     size_t new_num_of_pages = new_footprint / kPageSize; | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1358 |     DCHECK_GE(page_map_size_, new_num_of_pages); | 
 | 1359 |     // Zero out the tail of the page map. | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1360 |     uint8_t* zero_begin = const_cast<uint8_t*>(page_map_) + new_num_of_pages; | 
 | 1361 |     uint8_t* madvise_begin = AlignUp(zero_begin, kPageSize); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1362 |     DCHECK_LE(madvise_begin, page_map_mem_map_->End()); | 
 | 1363 |     size_t madvise_size = page_map_mem_map_->End() - madvise_begin; | 
 | 1364 |     if (madvise_size > 0) { | 
 | 1365 |       DCHECK_ALIGNED(madvise_begin, kPageSize); | 
 | 1366 |       DCHECK_EQ(RoundUp(madvise_size, kPageSize), madvise_size); | 
| Ian Rogers | c5f1773 | 2014-06-05 20:48:42 -0700 | [diff] [blame] | 1367 |       if (!kMadviseZeroes) { | 
 | 1368 |         memset(madvise_begin, 0, madvise_size); | 
 | 1369 |       } | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1370 |       CHECK_EQ(madvise(madvise_begin, madvise_size, MADV_DONTNEED), 0); | 
 | 1371 |     } | 
 | 1372 |     if (madvise_begin - zero_begin) { | 
 | 1373 |       memset(zero_begin, 0, madvise_begin - zero_begin); | 
 | 1374 |     } | 
 | 1375 |     page_map_size_ = new_num_of_pages; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1376 |     free_page_run_size_map_.resize(new_num_of_pages); | 
 | 1377 |     DCHECK_EQ(free_page_run_size_map_.size(), new_num_of_pages); | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1378 |     ArtRosAllocMoreCore(this, -(static_cast<intptr_t>(decrement))); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1379 |     if (kTraceRosAlloc) { | 
 | 1380 |       LOG(INFO) << "RosAlloc::Trim() : decreased the footprint from " | 
 | 1381 |                 << footprint_ << " to " << new_footprint; | 
 | 1382 |     } | 
 | 1383 |     DCHECK_LT(new_footprint, footprint_); | 
 | 1384 |     DCHECK_LT(new_footprint, capacity_); | 
 | 1385 |     footprint_ = new_footprint; | 
 | 1386 |     return true; | 
 | 1387 |   } | 
 | 1388 |   return false; | 
 | 1389 | } | 
 | 1390 |  | 
 | 1391 | void RosAlloc::InspectAll(void (*handler)(void* start, void* end, size_t used_bytes, void* callback_arg), | 
 | 1392 |                           void* arg) { | 
 | 1393 |   // Note: no need to use this to release pages as we already do so in FreePages(). | 
| Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 1394 |   if (handler == nullptr) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1395 |     return; | 
 | 1396 |   } | 
 | 1397 |   MutexLock mu(Thread::Current(), lock_); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1398 |   size_t pm_end = page_map_size_; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1399 |   size_t i = 0; | 
 | 1400 |   while (i < pm_end) { | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1401 |     uint8_t pm = page_map_[i]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1402 |     switch (pm) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1403 |       case kPageMapReleased: | 
 | 1404 |         // Fall-through. | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1405 |       case kPageMapEmpty: { | 
 | 1406 |         // The start of a free page run. | 
 | 1407 |         FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize); | 
 | 1408 |         DCHECK(free_page_runs_.find(fpr) != free_page_runs_.end()); | 
 | 1409 |         size_t fpr_size = fpr->ByteSize(this); | 
| Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 1410 |         DCHECK_ALIGNED(fpr_size, kPageSize); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1411 |         void* start = fpr; | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 1412 |         if (kIsDebugBuild) { | 
 | 1413 |           // In the debug build, the first page of a free page run | 
 | 1414 |           // contains a magic number for debugging. Exclude it. | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1415 |           start = reinterpret_cast<uint8_t*>(fpr) + kPageSize; | 
| Hiroshi Yamauchi | 573f7d2 | 2013-12-17 11:54:23 -0800 | [diff] [blame] | 1416 |         } | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1417 |         void* end = reinterpret_cast<uint8_t*>(fpr) + fpr_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1418 |         handler(start, end, 0, arg); | 
 | 1419 |         size_t num_pages = fpr_size / kPageSize; | 
 | 1420 |         if (kIsDebugBuild) { | 
 | 1421 |           for (size_t j = i + 1; j < i + num_pages; ++j) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1422 |             DCHECK(IsFreePage(j)); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1423 |           } | 
 | 1424 |         } | 
 | 1425 |         i += fpr_size / kPageSize; | 
 | 1426 |         DCHECK_LE(i, pm_end); | 
 | 1427 |         break; | 
 | 1428 |       } | 
 | 1429 |       case kPageMapLargeObject: { | 
 | 1430 |         // The start of a large object. | 
 | 1431 |         size_t num_pages = 1; | 
 | 1432 |         size_t idx = i + 1; | 
 | 1433 |         while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) { | 
 | 1434 |           num_pages++; | 
 | 1435 |           idx++; | 
 | 1436 |         } | 
 | 1437 |         void* start = base_ + i * kPageSize; | 
 | 1438 |         void* end = base_ + (i + num_pages) * kPageSize; | 
 | 1439 |         size_t used_bytes = num_pages * kPageSize; | 
 | 1440 |         handler(start, end, used_bytes, arg); | 
 | 1441 |         if (kIsDebugBuild) { | 
 | 1442 |           for (size_t j = i + 1; j < i + num_pages; ++j) { | 
 | 1443 |             DCHECK_EQ(page_map_[j], kPageMapLargeObjectPart); | 
 | 1444 |           } | 
 | 1445 |         } | 
 | 1446 |         i += num_pages; | 
 | 1447 |         DCHECK_LE(i, pm_end); | 
 | 1448 |         break; | 
 | 1449 |       } | 
 | 1450 |       case kPageMapLargeObjectPart: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1451 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1452 |         break; | 
 | 1453 |       case kPageMapRun: { | 
 | 1454 |         // The start of a run. | 
 | 1455 |         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1456 |         DCHECK_EQ(run->magic_num_, kMagicNum); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1457 |         // The dedicated full run doesn't contain any real allocations, don't visit the slots in | 
 | 1458 |         // there. | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1459 |         run->InspectAllSlots(handler, arg); | 
 | 1460 |         size_t num_pages = numOfPages[run->size_bracket_idx_]; | 
 | 1461 |         if (kIsDebugBuild) { | 
 | 1462 |           for (size_t j = i + 1; j < i + num_pages; ++j) { | 
 | 1463 |             DCHECK_EQ(page_map_[j], kPageMapRunPart); | 
 | 1464 |           } | 
 | 1465 |         } | 
 | 1466 |         i += num_pages; | 
 | 1467 |         DCHECK_LE(i, pm_end); | 
 | 1468 |         break; | 
 | 1469 |       } | 
 | 1470 |       case kPageMapRunPart: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1471 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1472 |         break; | 
 | 1473 |       default: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1474 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1475 |         break; | 
 | 1476 |     } | 
 | 1477 |   } | 
 | 1478 | } | 
 | 1479 |  | 
 | 1480 | size_t RosAlloc::Footprint() { | 
 | 1481 |   MutexLock mu(Thread::Current(), lock_); | 
 | 1482 |   return footprint_; | 
 | 1483 | } | 
 | 1484 |  | 
 | 1485 | size_t RosAlloc::FootprintLimit() { | 
 | 1486 |   MutexLock mu(Thread::Current(), lock_); | 
 | 1487 |   return capacity_; | 
 | 1488 | } | 
 | 1489 |  | 
 | 1490 | void RosAlloc::SetFootprintLimit(size_t new_capacity) { | 
 | 1491 |   MutexLock mu(Thread::Current(), lock_); | 
 | 1492 |   DCHECK_EQ(RoundUp(new_capacity, kPageSize), new_capacity); | 
 | 1493 |   // Only growing is supported here. But Trim() is supported. | 
 | 1494 |   if (capacity_ < new_capacity) { | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1495 |     CHECK_LE(new_capacity, max_capacity_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1496 |     capacity_ = new_capacity; | 
 | 1497 |     VLOG(heap) << "new capacity=" << capacity_; | 
 | 1498 |   } | 
 | 1499 | } | 
 | 1500 |  | 
| Lei Li | 5784621 | 2015-06-11 17:50:20 +0800 | [diff] [blame] | 1501 | // Below may be called by mutator itself just before thread termination. | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1502 | size_t RosAlloc::RevokeThreadLocalRuns(Thread* thread) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1503 |   Thread* self = Thread::Current(); | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1504 |   size_t free_bytes = 0U; | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1505 |   for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1506 |     MutexLock mu(self, *size_bracket_locks_[idx]); | 
| Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1507 |     Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx)); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1508 |     CHECK(thread_local_run != nullptr); | 
 | 1509 |     // Invalid means already revoked. | 
 | 1510 |     DCHECK(thread_local_run->IsThreadLocal()); | 
 | 1511 |     if (thread_local_run != dedicated_full_run_) { | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1512 |       // Note the thread local run may not be full here. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1513 |       thread->SetRosAllocRun(idx, dedicated_full_run_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1514 |       DCHECK_EQ(thread_local_run->magic_num_, kMagicNum); | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1515 |       // Count the number of free slots left. | 
 | 1516 |       size_t num_free_slots = thread_local_run->NumberOfFreeSlots(); | 
 | 1517 |       free_bytes += num_free_slots * bracketSizes[idx]; | 
| Lei Li | 5784621 | 2015-06-11 17:50:20 +0800 | [diff] [blame] | 1518 |       // The above bracket index lock guards thread local free list to avoid race condition | 
 | 1519 |       // with unioning bulk free list to thread local free list by GC thread in BulkFree. | 
 | 1520 |       // If thread local run is true, GC thread will help update thread local free list | 
 | 1521 |       // in BulkFree. And the latest thread local free list will be merged to free list | 
 | 1522 |       // either when this thread local run is full or when revoking this run here. In this | 
 | 1523 |       // case the free list wll be updated. If thread local run is false, GC thread will help | 
 | 1524 |       // merge bulk free list in next BulkFree. | 
 | 1525 |       // Thus no need to merge bulk free list to free list again here. | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1526 |       bool dont_care; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1527 |       thread_local_run->MergeThreadLocalFreeListToFreeList(&dont_care); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1528 |       thread_local_run->SetIsThreadLocal(false); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1529 |       DCHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end()); | 
 | 1530 |       DCHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end()); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1531 |       RevokeRun(self, idx, thread_local_run); | 
 | 1532 |     } | 
 | 1533 |   } | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1534 |   return free_bytes; | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1535 | } | 
 | 1536 |  | 
 | 1537 | void RosAlloc::RevokeRun(Thread* self, size_t idx, Run* run) { | 
 | 1538 |   size_bracket_locks_[idx]->AssertHeld(self); | 
 | 1539 |   DCHECK(run != dedicated_full_run_); | 
 | 1540 |   if (run->IsFull()) { | 
 | 1541 |     if (kIsDebugBuild) { | 
 | 1542 |       full_runs_[idx].insert(run); | 
 | 1543 |       DCHECK(full_runs_[idx].find(run) != full_runs_[idx].end()); | 
 | 1544 |       if (kTraceRosAlloc) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1545 |         LOG(INFO) << __PRETTY_FUNCTION__  << " : Inserted run 0x" << std::hex | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1546 |                   << reinterpret_cast<intptr_t>(run) | 
 | 1547 |                   << " into full_runs_[" << std::dec << idx << "]"; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1548 |       } | 
 | 1549 |     } | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1550 |   } else if (run->IsAllFree()) { | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1551 |     run->ZeroHeaderAndSlotHeaders(); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1552 |     MutexLock mu(self, lock_); | 
 | 1553 |     FreePages(self, run, true); | 
 | 1554 |   } else { | 
 | 1555 |     non_full_runs_[idx].insert(run); | 
 | 1556 |     DCHECK(non_full_runs_[idx].find(run) != non_full_runs_[idx].end()); | 
 | 1557 |     if (kTraceRosAlloc) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1558 |       LOG(INFO) << __PRETTY_FUNCTION__ << " : Inserted run 0x" << std::hex | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1559 |                 << reinterpret_cast<intptr_t>(run) | 
 | 1560 |                 << " into non_full_runs_[" << std::dec << idx << "]"; | 
 | 1561 |     } | 
 | 1562 |   } | 
 | 1563 | } | 
 | 1564 |  | 
 | 1565 | void RosAlloc::RevokeThreadUnsafeCurrentRuns() { | 
 | 1566 |   // Revoke the current runs which share the same idx as thread local runs. | 
 | 1567 |   Thread* self = Thread::Current(); | 
 | 1568 |   for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) { | 
 | 1569 |     MutexLock mu(self, *size_bracket_locks_[idx]); | 
 | 1570 |     if (current_runs_[idx] != dedicated_full_run_) { | 
 | 1571 |       RevokeRun(self, idx, current_runs_[idx]); | 
 | 1572 |       current_runs_[idx] = dedicated_full_run_; | 
 | 1573 |     } | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1574 |   } | 
 | 1575 | } | 
 | 1576 |  | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1577 | size_t RosAlloc::RevokeAllThreadLocalRuns() { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1578 |   // This is called when a mutator thread won't allocate such as at | 
 | 1579 |   // the Zygote creation time or during the GC pause. | 
| Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 1580 |   MutexLock mu(Thread::Current(), *Locks::runtime_shutdown_lock_); | 
 | 1581 |   MutexLock mu2(Thread::Current(), *Locks::thread_list_lock_); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1582 |   std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1583 |   size_t free_bytes = 0U; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1584 |   for (Thread* thread : thread_list) { | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1585 |     free_bytes += RevokeThreadLocalRuns(thread); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1586 |   } | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1587 |   RevokeThreadUnsafeCurrentRuns(); | 
| Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 1588 |   return free_bytes; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1589 | } | 
 | 1590 |  | 
| Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 1591 | void RosAlloc::AssertThreadLocalRunsAreRevoked(Thread* thread) { | 
 | 1592 |   if (kIsDebugBuild) { | 
 | 1593 |     Thread* self = Thread::Current(); | 
 | 1594 |     // Avoid race conditions on the bulk free bit maps with BulkFree() (GC). | 
| Mathieu Chartier | a1c1c71 | 2014-06-23 17:53:09 -0700 | [diff] [blame] | 1595 |     ReaderMutexLock wmu(self, bulk_free_lock_); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1596 |     for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; idx++) { | 
| Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 1597 |       MutexLock mu(self, *size_bracket_locks_[idx]); | 
| Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1598 |       Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(idx)); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1599 |       DCHECK(thread_local_run == nullptr || thread_local_run == dedicated_full_run_); | 
| Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 1600 |     } | 
 | 1601 |   } | 
 | 1602 | } | 
 | 1603 |  | 
 | 1604 | void RosAlloc::AssertAllThreadLocalRunsAreRevoked() { | 
 | 1605 |   if (kIsDebugBuild) { | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1606 |     Thread* self = Thread::Current(); | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1607 |     MutexLock shutdown_mu(self, *Locks::runtime_shutdown_lock_); | 
 | 1608 |     MutexLock thread_list_mu(self, *Locks::thread_list_lock_); | 
| Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 1609 |     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); | 
 | 1610 |     for (Thread* t : thread_list) { | 
 | 1611 |       AssertThreadLocalRunsAreRevoked(t); | 
 | 1612 |     } | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1613 |     for (size_t idx = 0; idx < kNumThreadLocalSizeBrackets; ++idx) { | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1614 |       MutexLock brackets_mu(self, *size_bracket_locks_[idx]); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1615 |       CHECK_EQ(current_runs_[idx], dedicated_full_run_); | 
 | 1616 |     } | 
| Hiroshi Yamauchi | c93c530 | 2014-03-20 16:15:37 -0700 | [diff] [blame] | 1617 |   } | 
 | 1618 | } | 
 | 1619 |  | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1620 | void RosAlloc::Initialize() { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1621 |   // bracketSizes. | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1622 |   static_assert(kNumRegularSizeBrackets == kNumOfSizeBrackets - 2, | 
 | 1623 |                 "There should be two non-regular brackets"); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1624 |   for (size_t i = 0; i < kNumOfSizeBrackets; i++) { | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1625 |     if (i < kNumThreadLocalSizeBrackets) { | 
 | 1626 |       bracketSizes[i] = kThreadLocalBracketQuantumSize * (i + 1); | 
 | 1627 |     } else if (i < kNumRegularSizeBrackets) { | 
 | 1628 |       bracketSizes[i] = kBracketQuantumSize * (i - kNumThreadLocalSizeBrackets + 1) + | 
 | 1629 |           (kThreadLocalBracketQuantumSize *  kNumThreadLocalSizeBrackets); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1630 |     } else if (i == kNumOfSizeBrackets - 2) { | 
 | 1631 |       bracketSizes[i] = 1 * KB; | 
 | 1632 |     } else { | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1633 |       DCHECK_EQ(i, kNumOfSizeBrackets - 1); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1634 |       bracketSizes[i] = 2 * KB; | 
 | 1635 |     } | 
 | 1636 |     if (kTraceRosAlloc) { | 
 | 1637 |       LOG(INFO) << "bracketSizes[" << i << "]=" << bracketSizes[i]; | 
 | 1638 |     } | 
 | 1639 |   } | 
 | 1640 |   // numOfPages. | 
 | 1641 |   for (size_t i = 0; i < kNumOfSizeBrackets; i++) { | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1642 |     if (i < kNumThreadLocalSizeBrackets) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1643 |       numOfPages[i] = 1; | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1644 |     } else if (i < (kNumThreadLocalSizeBrackets + kNumRegularSizeBrackets) / 2) { | 
| Hiroshi Yamauchi | fc067bf | 2016-03-23 14:22:34 -0700 | [diff] [blame] | 1645 |       numOfPages[i] = 1; | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1646 |     } else if (i < kNumRegularSizeBrackets) { | 
| Hiroshi Yamauchi | fc067bf | 2016-03-23 14:22:34 -0700 | [diff] [blame] | 1647 |       numOfPages[i] = 1; | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1648 |     } else if (i == kNumOfSizeBrackets - 2) { | 
| Hiroshi Yamauchi | fc067bf | 2016-03-23 14:22:34 -0700 | [diff] [blame] | 1649 |       numOfPages[i] = 2; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1650 |     } else { | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1651 |       DCHECK_EQ(i, kNumOfSizeBrackets - 1); | 
| Hiroshi Yamauchi | fc067bf | 2016-03-23 14:22:34 -0700 | [diff] [blame] | 1652 |       numOfPages[i] = 4; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1653 |     } | 
 | 1654 |     if (kTraceRosAlloc) { | 
 | 1655 |       LOG(INFO) << "numOfPages[" << i << "]=" << numOfPages[i]; | 
 | 1656 |     } | 
 | 1657 |   } | 
 | 1658 |   // Compute numOfSlots and slotOffsets. | 
 | 1659 |   for (size_t i = 0; i < kNumOfSizeBrackets; i++) { | 
 | 1660 |     size_t bracket_size = bracketSizes[i]; | 
 | 1661 |     size_t run_size = kPageSize * numOfPages[i]; | 
 | 1662 |     size_t max_num_of_slots = run_size / bracket_size; | 
 | 1663 |     // Compute the actual number of slots by taking the header and | 
 | 1664 |     // alignment into account. | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1665 |     size_t fixed_header_size = RoundUp(Run::fixed_header_size(), sizeof(uint64_t)); | 
 | 1666 |     DCHECK_EQ(fixed_header_size, 80U); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1667 |     size_t header_size = 0; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1668 |     size_t num_of_slots = 0; | 
 | 1669 |     // Search for the maximum number of slots that allows enough space | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1670 |     // for the header. | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1671 |     for (int s = max_num_of_slots; s >= 0; s--) { | 
 | 1672 |       size_t tmp_slots_size = bracket_size * s; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1673 |       size_t tmp_unaligned_header_size = fixed_header_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1674 |       // Align up the unaligned header size. bracket_size may not be a power of two. | 
 | 1675 |       size_t tmp_header_size = (tmp_unaligned_header_size % bracket_size == 0) ? | 
 | 1676 |           tmp_unaligned_header_size : | 
 | 1677 |           tmp_unaligned_header_size + (bracket_size - tmp_unaligned_header_size % bracket_size); | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1678 |       DCHECK_EQ(tmp_header_size % bracket_size, 0U); | 
 | 1679 |       DCHECK_EQ(tmp_header_size % sizeof(uint64_t), 0U); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1680 |       if (tmp_slots_size + tmp_header_size <= run_size) { | 
 | 1681 |         // Found the right number of slots, that is, there was enough | 
 | 1682 |         // space for the header (including the bit maps.) | 
 | 1683 |         num_of_slots = s; | 
 | 1684 |         header_size = tmp_header_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1685 |         break; | 
 | 1686 |       } | 
 | 1687 |     } | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1688 |     DCHECK_GT(num_of_slots, 0U) << i; | 
 | 1689 |     DCHECK_GT(header_size, 0U) << i; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1690 |     // Add the padding for the alignment remainder. | 
 | 1691 |     header_size += run_size % bracket_size; | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1692 |     DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1693 |     numOfSlots[i] = num_of_slots; | 
 | 1694 |     headerSizes[i] = header_size; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1695 |     if (kTraceRosAlloc) { | 
 | 1696 |       LOG(INFO) << "numOfSlots[" << i << "]=" << numOfSlots[i] | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1697 |                 << ", headerSizes[" << i << "]=" << headerSizes[i]; | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1698 |     } | 
 | 1699 |   } | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1700 |   // Set up the dedicated full run so that nobody can successfully allocate from it. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1701 |   if (kIsDebugBuild) { | 
 | 1702 |     dedicated_full_run_->magic_num_ = kMagicNum; | 
 | 1703 |   } | 
 | 1704 |   // It doesn't matter which size bracket we use since the main goal is to have the allocation | 
 | 1705 |   // fail 100% of the time you attempt to allocate into the dedicated full run. | 
 | 1706 |   dedicated_full_run_->size_bracket_idx_ = 0; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1707 |   DCHECK_EQ(dedicated_full_run_->FreeList()->Size(), 0U);  // It looks full. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1708 |   dedicated_full_run_->SetIsThreadLocal(true); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1709 |  | 
 | 1710 |   // The smallest bracket size must be at least as large as the sizeof(Slot). | 
 | 1711 |   DCHECK_LE(sizeof(Slot), bracketSizes[0]) << "sizeof(Slot) <= the smallest bracket size"; | 
| Hiroshi Yamauchi | 7ed9c56 | 2016-02-02 15:22:09 -0800 | [diff] [blame] | 1712 |   // Check the invariants between the max bracket sizes and the number of brackets. | 
 | 1713 |   DCHECK_EQ(kMaxThreadLocalBracketSize, bracketSizes[kNumThreadLocalSizeBrackets - 1]); | 
 | 1714 |   DCHECK_EQ(kMaxRegularBracketSize, bracketSizes[kNumRegularSizeBrackets - 1]); | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1715 | } | 
 | 1716 |  | 
| Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 1717 | void RosAlloc::BytesAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED, | 
 | 1718 |                                       size_t used_bytes, void* arg) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1719 |   if (used_bytes == 0) { | 
 | 1720 |     return; | 
 | 1721 |   } | 
 | 1722 |   size_t* bytes_allocated = reinterpret_cast<size_t*>(arg); | 
 | 1723 |   *bytes_allocated += used_bytes; | 
 | 1724 | } | 
 | 1725 |  | 
| Ian Rogers | 6a3c1fc | 2014-10-31 00:33:20 -0700 | [diff] [blame] | 1726 | void RosAlloc::ObjectsAllocatedCallback(void* start ATTRIBUTE_UNUSED, void* end ATTRIBUTE_UNUSED, | 
 | 1727 |                                         size_t used_bytes, void* arg) { | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 1728 |   if (used_bytes == 0) { | 
 | 1729 |     return; | 
 | 1730 |   } | 
 | 1731 |   size_t* objects_allocated = reinterpret_cast<size_t*>(arg); | 
 | 1732 |   ++(*objects_allocated); | 
 | 1733 | } | 
 | 1734 |  | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1735 | void RosAlloc::Verify() { | 
 | 1736 |   Thread* self = Thread::Current(); | 
 | 1737 |   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self)) | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1738 |       << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__; | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1739 |   MutexLock thread_list_mu(self, *Locks::thread_list_lock_); | 
| Mathieu Chartier | a1c1c71 | 2014-06-23 17:53:09 -0700 | [diff] [blame] | 1740 |   ReaderMutexLock wmu(self, bulk_free_lock_); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1741 |   std::vector<Run*> runs; | 
 | 1742 |   { | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1743 |     MutexLock lock_mu(self, lock_); | 
| Hiroshi Yamauchi | 26d69ff | 2014-02-27 11:27:10 -0800 | [diff] [blame] | 1744 |     size_t pm_end = page_map_size_; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1745 |     size_t i = 0; | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 1746 |     size_t memory_tool_modifier =  is_running_on_memory_tool_ ? | 
 | 1747 |         2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes :  // Redzones before and after. | 
| Andreas Gampe | fef16ad | 2015-02-19 16:44:32 -0800 | [diff] [blame] | 1748 |         0; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1749 |     while (i < pm_end) { | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1750 |       uint8_t pm = page_map_[i]; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1751 |       switch (pm) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1752 |         case kPageMapReleased: | 
 | 1753 |           // Fall-through. | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1754 |         case kPageMapEmpty: { | 
 | 1755 |           // The start of a free page run. | 
 | 1756 |           FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1757 |           DCHECK_EQ(fpr->magic_num_, kMagicNumFree); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1758 |           CHECK(free_page_runs_.find(fpr) != free_page_runs_.end()) | 
 | 1759 |               << "An empty page must belong to the free page run set"; | 
 | 1760 |           size_t fpr_size = fpr->ByteSize(this); | 
| Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 1761 |           CHECK_ALIGNED(fpr_size, kPageSize) | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1762 |               << "A free page run size isn't page-aligned : " << fpr_size; | 
 | 1763 |           size_t num_pages = fpr_size / kPageSize; | 
 | 1764 |           CHECK_GT(num_pages, static_cast<uintptr_t>(0)) | 
 | 1765 |               << "A free page run size must be > 0 : " << fpr_size; | 
 | 1766 |           for (size_t j = i + 1; j < i + num_pages; ++j) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1767 |             CHECK(IsFreePage(j)) | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1768 |                 << "A mismatch between the page map table for kPageMapEmpty " | 
 | 1769 |                 << " at page index " << j | 
 | 1770 |                 << " and the free page run size : page index range : " | 
 | 1771 |                 << i << " to " << (i + num_pages) << std::endl << DumpPageMap(); | 
 | 1772 |           } | 
 | 1773 |           i += num_pages; | 
 | 1774 |           CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end | 
 | 1775 |                               << std::endl << DumpPageMap(); | 
 | 1776 |           break; | 
 | 1777 |         } | 
 | 1778 |         case kPageMapLargeObject: { | 
 | 1779 |           // The start of a large object. | 
 | 1780 |           size_t num_pages = 1; | 
 | 1781 |           size_t idx = i + 1; | 
 | 1782 |           while (idx < pm_end && page_map_[idx] == kPageMapLargeObjectPart) { | 
 | 1783 |             num_pages++; | 
 | 1784 |             idx++; | 
 | 1785 |           } | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 1786 |           uint8_t* start = base_ + i * kPageSize; | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 1787 |           if (is_running_on_memory_tool_) { | 
 | 1788 |             start += ::art::gc::space::kDefaultMemoryToolRedZoneBytes; | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 1789 |           } | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1790 |           mirror::Object* obj = reinterpret_cast<mirror::Object*>(start); | 
 | 1791 |           size_t obj_size = obj->SizeOf(); | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 1792 |           CHECK_GT(obj_size + memory_tool_modifier, kLargeSizeThreshold) | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1793 |               << "A rosalloc large object size must be > " << kLargeSizeThreshold; | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 1794 |           CHECK_EQ(num_pages, RoundUp(obj_size + memory_tool_modifier, kPageSize) / kPageSize) | 
 | 1795 |               << "A rosalloc large object size " << obj_size + memory_tool_modifier | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1796 |               << " does not match the page map table " << (num_pages * kPageSize) | 
 | 1797 |               << std::endl << DumpPageMap(); | 
 | 1798 |           i += num_pages; | 
 | 1799 |           CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end | 
 | 1800 |                               << std::endl << DumpPageMap(); | 
 | 1801 |           break; | 
 | 1802 |         } | 
 | 1803 |         case kPageMapLargeObjectPart: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1804 |           LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap(); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1805 |           break; | 
 | 1806 |         case kPageMapRun: { | 
 | 1807 |           // The start of a run. | 
 | 1808 |           Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize); | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1809 |           DCHECK_EQ(run->magic_num_, kMagicNum); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1810 |           size_t idx = run->size_bracket_idx_; | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1811 |           CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1812 |           size_t num_pages = numOfPages[idx]; | 
 | 1813 |           CHECK_GT(num_pages, static_cast<uintptr_t>(0)) | 
 | 1814 |               << "Run size must be > 0 : " << num_pages; | 
 | 1815 |           for (size_t j = i + 1; j < i + num_pages; ++j) { | 
 | 1816 |             CHECK_EQ(page_map_[j], kPageMapRunPart) | 
 | 1817 |                 << "A mismatch between the page map table for kPageMapRunPart " | 
 | 1818 |                 << " at page index " << j | 
 | 1819 |                 << " and the run size : page index range " << i << " to " << (i + num_pages) | 
 | 1820 |                 << std::endl << DumpPageMap(); | 
 | 1821 |           } | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1822 |           // Don't verify the dedicated_full_run_ since it doesn't have any real allocations. | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1823 |           runs.push_back(run); | 
 | 1824 |           i += num_pages; | 
 | 1825 |           CHECK_LE(i, pm_end) << "Page map index " << i << " out of range < " << pm_end | 
 | 1826 |                               << std::endl << DumpPageMap(); | 
 | 1827 |           break; | 
 | 1828 |         } | 
 | 1829 |         case kPageMapRunPart: | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1830 |           // Fall-through. | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1831 |         default: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 1832 |           LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl << DumpPageMap(); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1833 |           break; | 
 | 1834 |       } | 
 | 1835 |     } | 
 | 1836 |   } | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1837 |   std::list<Thread*> threads = Runtime::Current()->GetThreadList()->GetList(); | 
 | 1838 |   for (Thread* thread : threads) { | 
 | 1839 |     for (size_t i = 0; i < kNumThreadLocalSizeBrackets; ++i) { | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1840 |       MutexLock brackets_mu(self, *size_bracket_locks_[i]); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1841 |       Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i)); | 
 | 1842 |       CHECK(thread_local_run != nullptr); | 
 | 1843 |       CHECK(thread_local_run->IsThreadLocal()); | 
 | 1844 |       CHECK(thread_local_run == dedicated_full_run_ || | 
 | 1845 |             thread_local_run->size_bracket_idx_ == i); | 
 | 1846 |     } | 
 | 1847 |   } | 
 | 1848 |   for (size_t i = 0; i < kNumOfSizeBrackets; i++) { | 
| Andreas Gampe | 277ccbd | 2014-11-03 21:36:10 -0800 | [diff] [blame] | 1849 |     MutexLock brackets_mu(self, *size_bracket_locks_[i]); | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1850 |     Run* current_run = current_runs_[i]; | 
 | 1851 |     CHECK(current_run != nullptr); | 
 | 1852 |     if (current_run != dedicated_full_run_) { | 
 | 1853 |       // The dedicated full run is currently marked as thread local. | 
 | 1854 |       CHECK(!current_run->IsThreadLocal()); | 
 | 1855 |       CHECK_EQ(current_run->size_bracket_idx_, i); | 
 | 1856 |     } | 
 | 1857 |   } | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1858 |   // Call Verify() here for the lock order. | 
 | 1859 |   for (auto& run : runs) { | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 1860 |     run->Verify(self, this, is_running_on_memory_tool_); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1861 |   } | 
 | 1862 | } | 
 | 1863 |  | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 1864 | void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc, bool running_on_memory_tool) { | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1865 |   DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump(); | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1866 |   const size_t idx = size_bracket_idx_; | 
| Ian Rogers | 5d05705 | 2014-03-12 14:32:27 -0700 | [diff] [blame] | 1867 |   CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump(); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1868 |   uint8_t* slot_base = reinterpret_cast<uint8_t*>(this) + headerSizes[idx]; | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1869 |   const size_t num_slots = numOfSlots[idx]; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1870 |   size_t bracket_size = IndexToBracketSize(idx); | 
 | 1871 |   CHECK_EQ(slot_base + num_slots * bracket_size, | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1872 |            reinterpret_cast<uint8_t*>(this) + numOfPages[idx] * kPageSize) | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1873 |       << "Mismatch in the end address of the run " << Dump(); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1874 |   // Check that the bulk free list is empty. It's only used during BulkFree(). | 
 | 1875 |   CHECK(IsBulkFreeListEmpty()) << "The bulk free isn't empty " << Dump(); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1876 |   // Check the thread local runs, the current runs, and the run sets. | 
| Mathieu Chartier | 73d1e17 | 2014-04-11 17:53:48 -0700 | [diff] [blame] | 1877 |   if (IsThreadLocal()) { | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1878 |     // If it's a thread local run, then it must be pointed to by an owner thread. | 
 | 1879 |     bool owner_found = false; | 
 | 1880 |     std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); | 
 | 1881 |     for (auto it = thread_list.begin(); it != thread_list.end(); ++it) { | 
 | 1882 |       Thread* thread = *it; | 
| Mathieu Chartier | 0651d41 | 2014-04-29 14:37:57 -0700 | [diff] [blame] | 1883 |       for (size_t i = 0; i < kNumThreadLocalSizeBrackets; i++) { | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1884 |         MutexLock mu(self, *rosalloc->size_bracket_locks_[i]); | 
| Ian Rogers | dd7624d | 2014-03-14 17:43:00 -0700 | [diff] [blame] | 1885 |         Run* thread_local_run = reinterpret_cast<Run*>(thread->GetRosAllocRun(i)); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1886 |         if (thread_local_run == this) { | 
 | 1887 |           CHECK(!owner_found) | 
 | 1888 |               << "A thread local run has more than one owner thread " << Dump(); | 
 | 1889 |           CHECK_EQ(i, idx) | 
 | 1890 |               << "A mismatching size bracket index in a thread local run " << Dump(); | 
 | 1891 |           owner_found = true; | 
 | 1892 |         } | 
 | 1893 |       } | 
 | 1894 |     } | 
 | 1895 |     CHECK(owner_found) << "A thread local run has no owner thread " << Dump(); | 
 | 1896 |   } else { | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1897 |     // If it's not thread local, check that the thread local free list is empty. | 
 | 1898 |     CHECK(IsThreadLocalFreeListEmpty()) | 
 | 1899 |         << "A non-thread-local run's thread local free list isn't empty " | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1900 |         << Dump(); | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1901 |     // Check if it's a current run for the size bracket. | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1902 |     bool is_current_run = false; | 
 | 1903 |     for (size_t i = 0; i < kNumOfSizeBrackets; i++) { | 
 | 1904 |       MutexLock mu(self, *rosalloc->size_bracket_locks_[i]); | 
 | 1905 |       Run* current_run = rosalloc->current_runs_[i]; | 
 | 1906 |       if (idx == i) { | 
 | 1907 |         if (this == current_run) { | 
 | 1908 |           is_current_run = true; | 
 | 1909 |         } | 
 | 1910 |       } else { | 
 | 1911 |         // If the size bucket index does not match, then it must not | 
 | 1912 |         // be a current run. | 
 | 1913 |         CHECK_NE(this, current_run) | 
 | 1914 |             << "A current run points to a run with a wrong size bracket index " << Dump(); | 
 | 1915 |       } | 
 | 1916 |     } | 
 | 1917 |     // If it's neither a thread local or current run, then it must be | 
 | 1918 |     // in a run set. | 
 | 1919 |     if (!is_current_run) { | 
 | 1920 |       MutexLock mu(self, rosalloc->lock_); | 
| Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 1921 |       auto& non_full_runs = rosalloc->non_full_runs_[idx]; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1922 |       // If it's all free, it must be a free page run rather than a run. | 
 | 1923 |       CHECK(!IsAllFree()) << "A free run must be in a free page run set " << Dump(); | 
 | 1924 |       if (!IsFull()) { | 
 | 1925 |         // If it's not full, it must in the non-full run set. | 
 | 1926 |         CHECK(non_full_runs.find(this) != non_full_runs.end()) | 
 | 1927 |             << "A non-full run isn't in the non-full run set " << Dump(); | 
 | 1928 |       } else { | 
 | 1929 |         // If it's full, it must in the full run set (debug build only.) | 
 | 1930 |         if (kIsDebugBuild) { | 
| Mathieu Chartier | 58553c7 | 2014-09-16 16:25:55 -0700 | [diff] [blame] | 1931 |           auto& full_runs = rosalloc->full_runs_[idx]; | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1932 |           CHECK(full_runs.find(this) != full_runs.end()) | 
 | 1933 |               << " A full run isn't in the full run set " << Dump(); | 
 | 1934 |         } | 
 | 1935 |       } | 
 | 1936 |     } | 
 | 1937 |   } | 
 | 1938 |   // Check each slot. | 
| Evgenii Stepanov | 1e13374 | 2015-05-20 12:30:59 -0700 | [diff] [blame] | 1939 |   size_t memory_tool_modifier = running_on_memory_tool ? | 
 | 1940 |       2 * ::art::gc::space::kDefaultMemoryToolRedZoneBytes : | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 1941 |       0U; | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1942 |   // TODO: reuse InspectAllSlots(). | 
 | 1943 |   std::unique_ptr<bool[]> is_free(new bool[num_slots]());  // zero initialized | 
 | 1944 |   // Mark the free slots and the remaining ones are allocated. | 
 | 1945 |   for (Slot* slot = free_list_.Head(); slot != nullptr; slot = slot->Next()) { | 
 | 1946 |     size_t slot_idx = SlotIndex(slot); | 
 | 1947 |     DCHECK_LT(slot_idx, num_slots); | 
 | 1948 |     is_free[slot_idx] = true; | 
 | 1949 |   } | 
 | 1950 |   if (IsThreadLocal()) { | 
 | 1951 |     for (Slot* slot = thread_local_free_list_.Head(); slot != nullptr; slot = slot->Next()) { | 
 | 1952 |       size_t slot_idx = SlotIndex(slot); | 
 | 1953 |       DCHECK_LT(slot_idx, num_slots); | 
 | 1954 |       is_free[slot_idx] = true; | 
 | 1955 |     } | 
 | 1956 |   } | 
 | 1957 |   for (size_t slot_idx = 0; slot_idx < num_slots; ++slot_idx) { | 
 | 1958 |     uint8_t* slot_addr = slot_base + slot_idx * bracket_size; | 
 | 1959 |     if (running_on_memory_tool) { | 
 | 1960 |       slot_addr += ::art::gc::space::kDefaultMemoryToolRedZoneBytes; | 
 | 1961 |     } | 
 | 1962 |     if (!is_free[slot_idx]) { | 
 | 1963 |       // The slot is allocated | 
 | 1964 |       mirror::Object* obj = reinterpret_cast<mirror::Object*>(slot_addr); | 
 | 1965 |       size_t obj_size = obj->SizeOf(); | 
 | 1966 |       CHECK_LE(obj_size + memory_tool_modifier, kLargeSizeThreshold) | 
 | 1967 |           << "A run slot contains a large object " << Dump(); | 
 | 1968 |       CHECK_EQ(SizeToIndex(obj_size + memory_tool_modifier), idx) | 
| David Sehr | 709b070 | 2016-10-13 09:12:37 -0700 | [diff] [blame] | 1969 |           << obj->PrettyTypeOf() << " " | 
| Hiroshi Yamauchi | 31bf42c | 2015-09-24 11:20:29 -0700 | [diff] [blame] | 1970 |           << "obj_size=" << obj_size << "(" << obj_size + memory_tool_modifier << "), idx=" << idx | 
 | 1971 |           << " A run slot contains an object with wrong size " << Dump(); | 
| Hiroshi Yamauchi | a4adbfd | 2014-02-04 18:12:17 -0800 | [diff] [blame] | 1972 |     } | 
 | 1973 |   } | 
 | 1974 | } | 
 | 1975 |  | 
| Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 1976 | size_t RosAlloc::ReleasePages() { | 
 | 1977 |   VLOG(heap) << "RosAlloc::ReleasePages()"; | 
 | 1978 |   DCHECK(!DoesReleaseAllPages()); | 
 | 1979 |   Thread* self = Thread::Current(); | 
 | 1980 |   size_t reclaimed_bytes = 0; | 
 | 1981 |   size_t i = 0; | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1982 |   // Check the page map size which might have changed due to grow/shrink. | 
 | 1983 |   while (i < page_map_size_) { | 
 | 1984 |     // Reading the page map without a lock is racy but the race is benign since it should only | 
 | 1985 |     // result in occasionally not releasing pages which we could release. | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 1986 |     uint8_t pm = page_map_[i]; | 
| Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 1987 |     switch (pm) { | 
| Mathieu Chartier | e28ed99 | 2014-07-10 10:16:44 -0700 | [diff] [blame] | 1988 |       case kPageMapReleased: | 
 | 1989 |         // Fall through. | 
| Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 1990 |       case kPageMapEmpty: { | 
| Mathieu Chartier | e28ed99 | 2014-07-10 10:16:44 -0700 | [diff] [blame] | 1991 |         // This is currently the start of a free page run. | 
 | 1992 |         // Acquire the lock to prevent other threads racing in and modifying the page map. | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1993 |         MutexLock mu(self, lock_); | 
 | 1994 |         // Check that it's still empty after we acquired the lock since another thread could have | 
 | 1995 |         // raced in and placed an allocation here. | 
| Mathieu Chartier | e28ed99 | 2014-07-10 10:16:44 -0700 | [diff] [blame] | 1996 |         if (IsFreePage(i)) { | 
 | 1997 |           // Free page runs can start with a released page if we coalesced a released page free | 
 | 1998 |           // page run with an empty page run. | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 1999 |           FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize); | 
| Mathieu Chartier | e28ed99 | 2014-07-10 10:16:44 -0700 | [diff] [blame] | 2000 |           // There is a race condition where FreePage can coalesce fpr with the previous | 
 | 2001 |           // free page run before we acquire lock_. In that case free_page_runs_.find will not find | 
 | 2002 |           // a run starting at fpr. To handle this race, we skip reclaiming the page range and go | 
 | 2003 |           // to the next page. | 
 | 2004 |           if (free_page_runs_.find(fpr) != free_page_runs_.end()) { | 
 | 2005 |             size_t fpr_size = fpr->ByteSize(this); | 
| Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 2006 |             DCHECK_ALIGNED(fpr_size, kPageSize); | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 2007 |             uint8_t* start = reinterpret_cast<uint8_t*>(fpr); | 
| Mathieu Chartier | e28ed99 | 2014-07-10 10:16:44 -0700 | [diff] [blame] | 2008 |             reclaimed_bytes += ReleasePageRange(start, start + fpr_size); | 
 | 2009 |             size_t pages = fpr_size / kPageSize; | 
 | 2010 |             CHECK_GT(pages, 0U) << "Infinite loop probable"; | 
 | 2011 |             i += pages; | 
 | 2012 |             DCHECK_LE(i, page_map_size_); | 
 | 2013 |             break; | 
 | 2014 |           } | 
| Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 2015 |         } | 
| Ian Rogers | fc787ec | 2014-10-09 21:56:44 -0700 | [diff] [blame] | 2016 |         FALLTHROUGH_INTENDED; | 
| Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 2017 |       } | 
 | 2018 |       case kPageMapLargeObject:      // Fall through. | 
 | 2019 |       case kPageMapLargeObjectPart:  // Fall through. | 
 | 2020 |       case kPageMapRun:              // Fall through. | 
 | 2021 |       case kPageMapRunPart:          // Fall through. | 
 | 2022 |         ++i; | 
 | 2023 |         break;  // Skip. | 
 | 2024 |       default: | 
| Maxim Kazantsev | 2fdeecb | 2014-10-16 10:55:47 +0700 | [diff] [blame] | 2025 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm); | 
| Hiroshi Yamauchi | d9a88de | 2014-04-07 13:52:31 -0700 | [diff] [blame] | 2026 |         break; | 
 | 2027 |     } | 
 | 2028 |   } | 
 | 2029 |   return reclaimed_bytes; | 
 | 2030 | } | 
 | 2031 |  | 
| Ian Rogers | 1373595 | 2014-10-08 12:43:28 -0700 | [diff] [blame] | 2032 | size_t RosAlloc::ReleasePageRange(uint8_t* start, uint8_t* end) { | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 2033 |   DCHECK_ALIGNED(start, kPageSize); | 
 | 2034 |   DCHECK_ALIGNED(end, kPageSize); | 
 | 2035 |   DCHECK_LT(start, end); | 
 | 2036 |   if (kIsDebugBuild) { | 
 | 2037 |     // In the debug build, the first page of a free page run | 
 | 2038 |     // contains a magic number for debugging. Exclude it. | 
 | 2039 |     start += kPageSize; | 
| Andreas Gampe | d757632 | 2014-10-24 22:13:45 -0700 | [diff] [blame] | 2040 |  | 
 | 2041 |     // Single pages won't be released. | 
 | 2042 |     if (start == end) { | 
 | 2043 |       return 0; | 
 | 2044 |     } | 
| Mathieu Chartier | a5b5c55 | 2014-06-24 14:48:59 -0700 | [diff] [blame] | 2045 |   } | 
 | 2046 |   if (!kMadviseZeroes) { | 
 | 2047 |     // TODO: Do this when we resurrect the page instead. | 
 | 2048 |     memset(start, 0, end - start); | 
 | 2049 |   } | 
 | 2050 |   CHECK_EQ(madvise(start, end - start, MADV_DONTNEED), 0); | 
 | 2051 |   size_t pm_idx = ToPageMapIndex(start); | 
 | 2052 |   size_t reclaimed_bytes = 0; | 
 | 2053 |   // Calculate reclaimed bytes and upate page map. | 
 | 2054 |   const size_t max_idx = pm_idx + (end - start) / kPageSize; | 
 | 2055 |   for (; pm_idx < max_idx; ++pm_idx) { | 
 | 2056 |     DCHECK(IsFreePage(pm_idx)); | 
 | 2057 |     if (page_map_[pm_idx] == kPageMapEmpty) { | 
 | 2058 |       // Mark the page as released and update how many bytes we released. | 
 | 2059 |       reclaimed_bytes += kPageSize; | 
 | 2060 |       page_map_[pm_idx] = kPageMapReleased; | 
 | 2061 |     } | 
 | 2062 |   } | 
 | 2063 |   return reclaimed_bytes; | 
 | 2064 | } | 
 | 2065 |  | 
| Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 2066 | void RosAlloc::LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) { | 
 | 2067 |   Thread* self = Thread::Current(); | 
 | 2068 |   size_t largest_continuous_free_pages = 0; | 
 | 2069 |   WriterMutexLock wmu(self, bulk_free_lock_); | 
 | 2070 |   MutexLock mu(self, lock_); | 
| Mathieu Chartier | a9033d7 | 2016-12-01 17:41:17 -0800 | [diff] [blame] | 2071 |   uint64_t total_free = 0; | 
| Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 2072 |   for (FreePageRun* fpr : free_page_runs_) { | 
 | 2073 |     largest_continuous_free_pages = std::max(largest_continuous_free_pages, | 
 | 2074 |                                              fpr->ByteSize(this)); | 
| Mathieu Chartier | a9033d7 | 2016-12-01 17:41:17 -0800 | [diff] [blame] | 2075 |     total_free += fpr->ByteSize(this); | 
| Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 2076 |   } | 
| Mathieu Chartier | a9033d7 | 2016-12-01 17:41:17 -0800 | [diff] [blame] | 2077 |   size_t required_bytes = 0; | 
 | 2078 |   const char* new_buffer_msg = ""; | 
| Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 2079 |   if (failed_alloc_bytes > kLargeSizeThreshold) { | 
 | 2080 |     // Large allocation. | 
| Mathieu Chartier | a9033d7 | 2016-12-01 17:41:17 -0800 | [diff] [blame] | 2081 |     required_bytes = RoundUp(failed_alloc_bytes, kPageSize); | 
| Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 2082 |   } else { | 
 | 2083 |     // Non-large allocation. | 
| Mathieu Chartier | a9033d7 | 2016-12-01 17:41:17 -0800 | [diff] [blame] | 2084 |     required_bytes = numOfPages[SizeToIndex(failed_alloc_bytes)] * kPageSize; | 
 | 2085 |     new_buffer_msg = " for a new buffer"; | 
 | 2086 |   } | 
 | 2087 |   if (required_bytes > largest_continuous_free_pages) { | 
 | 2088 |     os << "; failed due to fragmentation (" | 
 | 2089 |        << "required contiguous free " << required_bytes << " bytes" << new_buffer_msg | 
 | 2090 |        << ", largest contiguous free " << largest_continuous_free_pages << " bytes" | 
 | 2091 |        << ", total free pages " << total_free << " bytes" | 
 | 2092 |        << ", space footprint " << footprint_ << " bytes" | 
 | 2093 |        << ", space max capacity " << max_capacity_ << " bytes" | 
 | 2094 |        << ")" << std::endl; | 
| Hiroshi Yamauchi | 654dd48 | 2014-07-09 12:54:32 -0700 | [diff] [blame] | 2095 |   } | 
 | 2096 | } | 
 | 2097 |  | 
| Hiroshi Yamauchi | b62f2e6 | 2016-03-23 15:51:24 -0700 | [diff] [blame] | 2098 | void RosAlloc::DumpStats(std::ostream& os) { | 
 | 2099 |   Thread* self = Thread::Current(); | 
 | 2100 |   CHECK(Locks::mutator_lock_->IsExclusiveHeld(self)) | 
 | 2101 |       << "The mutator locks isn't exclusively locked at " << __PRETTY_FUNCTION__; | 
 | 2102 |   size_t num_large_objects = 0; | 
 | 2103 |   size_t num_pages_large_objects = 0; | 
 | 2104 |   // These arrays are zero initialized. | 
 | 2105 |   std::unique_ptr<size_t[]> num_runs(new size_t[kNumOfSizeBrackets]()); | 
 | 2106 |   std::unique_ptr<size_t[]> num_pages_runs(new size_t[kNumOfSizeBrackets]()); | 
 | 2107 |   std::unique_ptr<size_t[]> num_slots(new size_t[kNumOfSizeBrackets]()); | 
 | 2108 |   std::unique_ptr<size_t[]> num_used_slots(new size_t[kNumOfSizeBrackets]()); | 
 | 2109 |   std::unique_ptr<size_t[]> num_metadata_bytes(new size_t[kNumOfSizeBrackets]()); | 
 | 2110 |   ReaderMutexLock rmu(self, bulk_free_lock_); | 
 | 2111 |   MutexLock lock_mu(self, lock_); | 
 | 2112 |   for (size_t i = 0; i < page_map_size_; ) { | 
 | 2113 |     uint8_t pm = page_map_[i]; | 
 | 2114 |     switch (pm) { | 
 | 2115 |       case kPageMapReleased: | 
 | 2116 |       case kPageMapEmpty: | 
 | 2117 |         ++i; | 
 | 2118 |         break; | 
 | 2119 |       case kPageMapLargeObject: { | 
 | 2120 |         size_t num_pages = 1; | 
 | 2121 |         size_t idx = i + 1; | 
 | 2122 |         while (idx < page_map_size_ && page_map_[idx] == kPageMapLargeObjectPart) { | 
 | 2123 |           num_pages++; | 
 | 2124 |           idx++; | 
 | 2125 |         } | 
 | 2126 |         num_large_objects++; | 
 | 2127 |         num_pages_large_objects += num_pages; | 
 | 2128 |         i += num_pages; | 
 | 2129 |         break; | 
 | 2130 |       } | 
 | 2131 |       case kPageMapLargeObjectPart: | 
 | 2132 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl | 
 | 2133 |                    << DumpPageMap(); | 
 | 2134 |         break; | 
 | 2135 |       case kPageMapRun: { | 
 | 2136 |         Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize); | 
 | 2137 |         size_t idx = run->size_bracket_idx_; | 
 | 2138 |         size_t num_pages = numOfPages[idx]; | 
 | 2139 |         num_runs[idx]++; | 
 | 2140 |         num_pages_runs[idx] += num_pages; | 
 | 2141 |         num_slots[idx] += numOfSlots[idx]; | 
 | 2142 |         size_t num_free_slots = run->NumberOfFreeSlots(); | 
 | 2143 |         num_used_slots[idx] += numOfSlots[idx] - num_free_slots; | 
 | 2144 |         num_metadata_bytes[idx] += headerSizes[idx]; | 
 | 2145 |         i += num_pages; | 
 | 2146 |         break; | 
 | 2147 |       } | 
 | 2148 |       case kPageMapRunPart: | 
 | 2149 |         // Fall-through. | 
 | 2150 |       default: | 
 | 2151 |         LOG(FATAL) << "Unreachable - page map type: " << static_cast<int>(pm) << std::endl | 
 | 2152 |                    << DumpPageMap(); | 
 | 2153 |         break; | 
 | 2154 |     } | 
 | 2155 |   } | 
 | 2156 |   os << "RosAlloc stats:\n"; | 
 | 2157 |   for (size_t i = 0; i < kNumOfSizeBrackets; ++i) { | 
 | 2158 |     os << "Bracket " << i << " (" << bracketSizes[i] << "):" | 
 | 2159 |        << " #runs=" << num_runs[i] | 
 | 2160 |        << " #pages=" << num_pages_runs[i] | 
 | 2161 |        << " (" << PrettySize(num_pages_runs[i] * kPageSize) << ")" | 
 | 2162 |        << " #metadata_bytes=" << PrettySize(num_metadata_bytes[i]) | 
 | 2163 |        << " #slots=" << num_slots[i] << " (" << PrettySize(num_slots[i] * bracketSizes[i]) << ")" | 
 | 2164 |        << " #used_slots=" << num_used_slots[i] | 
 | 2165 |        << " (" << PrettySize(num_used_slots[i] * bracketSizes[i]) << ")\n"; | 
 | 2166 |   } | 
 | 2167 |   os << "Large #allocations=" << num_large_objects | 
 | 2168 |      << " #pages=" << num_pages_large_objects | 
 | 2169 |      << " (" << PrettySize(num_pages_large_objects * kPageSize) << ")\n"; | 
 | 2170 |   size_t total_num_pages = 0; | 
 | 2171 |   size_t total_metadata_bytes = 0; | 
 | 2172 |   size_t total_allocated_bytes = 0; | 
 | 2173 |   for (size_t i = 0; i < kNumOfSizeBrackets; ++i) { | 
 | 2174 |     total_num_pages += num_pages_runs[i]; | 
 | 2175 |     total_metadata_bytes += num_metadata_bytes[i]; | 
 | 2176 |     total_allocated_bytes += num_used_slots[i] * bracketSizes[i]; | 
 | 2177 |   } | 
 | 2178 |   total_num_pages += num_pages_large_objects; | 
 | 2179 |   total_allocated_bytes += num_pages_large_objects * kPageSize; | 
 | 2180 |   os << "Total #total_bytes=" << PrettySize(total_num_pages * kPageSize) | 
 | 2181 |      << " #metadata_bytes=" << PrettySize(total_metadata_bytes) | 
 | 2182 |      << " #used_bytes=" << PrettySize(total_allocated_bytes) << "\n"; | 
 | 2183 |   os << "\n"; | 
 | 2184 | } | 
 | 2185 |  | 
| Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 2186 | }  // namespace allocator | 
 | 2187 | }  // namespace gc | 
 | 2188 | }  // namespace art |