Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2014 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 17 | #include "bump_pointer_space-inl.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 18 | #include "bump_pointer_space.h" |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 19 | #include "gc/accounting/read_barrier_table.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 20 | #include "mirror/class-inl.h" |
Andreas Gampe | 8cf9cb3 | 2017-07-19 09:28:38 -0700 | [diff] [blame] | 21 | #include "mirror/object-inl.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 22 | #include "thread_list.h" |
| 23 | |
| 24 | namespace art { |
| 25 | namespace gc { |
| 26 | namespace space { |
| 27 | |
| 28 | // If a region has live objects whose size is less than this percent |
| 29 | // value of the region size, evaculate the region. |
| 30 | static constexpr uint kEvaculateLivePercentThreshold = 75U; |
| 31 | |
Mathieu Chartier | 49bac84 | 2017-06-27 14:04:41 -0700 | [diff] [blame] | 32 | // If we protect the cleared regions. |
Mathieu Chartier | c1b3c71 | 2017-06-29 09:47:17 -0700 | [diff] [blame] | 33 | // Only protect for target builds to prevent flaky test failures (b/63131961). |
| 34 | static constexpr bool kProtectClearedRegions = kIsTargetBuild; |
Mathieu Chartier | 49bac84 | 2017-06-27 14:04:41 -0700 | [diff] [blame] | 35 | |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 36 | MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity, |
| 37 | uint8_t* requested_begin) { |
| 38 | CHECK_ALIGNED(capacity, kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 39 | std::string error_msg; |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 40 | // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize |
| 41 | // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work. |
| 42 | std::unique_ptr<MemMap> mem_map; |
| 43 | while (true) { |
| 44 | mem_map.reset(MemMap::MapAnonymous(name.c_str(), |
| 45 | requested_begin, |
| 46 | capacity + kRegionSize, |
| 47 | PROT_READ | PROT_WRITE, |
| 48 | true, |
| 49 | false, |
| 50 | &error_msg)); |
| 51 | if (mem_map.get() != nullptr || requested_begin == nullptr) { |
| 52 | break; |
| 53 | } |
| 54 | // Retry with no specified request begin. |
| 55 | requested_begin = nullptr; |
| 56 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 57 | if (mem_map.get() == nullptr) { |
| 58 | LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size " |
| 59 | << PrettySize(capacity) << " with message " << error_msg; |
Andreas Gampe | 3fec9ac | 2016-09-13 10:47:28 -0700 | [diff] [blame] | 60 | MemMap::DumpMaps(LOG_STREAM(ERROR)); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 61 | return nullptr; |
| 62 | } |
Hiroshi Yamauchi | 3c3c4a1 | 2017-02-21 16:49:59 -0800 | [diff] [blame] | 63 | CHECK_EQ(mem_map->Size(), capacity + kRegionSize); |
| 64 | CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin()); |
| 65 | CHECK_EQ(mem_map->Size(), mem_map->BaseSize()); |
| 66 | if (IsAlignedParam(mem_map->Begin(), kRegionSize)) { |
| 67 | // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by |
| 68 | // kRegionSize at the end. |
| 69 | mem_map->SetSize(capacity); |
| 70 | } else { |
| 71 | // Got an unaligned map. Align the both ends. |
| 72 | mem_map->AlignBy(kRegionSize); |
| 73 | } |
| 74 | CHECK_ALIGNED(mem_map->Begin(), kRegionSize); |
| 75 | CHECK_ALIGNED(mem_map->End(), kRegionSize); |
| 76 | CHECK_EQ(mem_map->Size(), capacity); |
| 77 | return mem_map.release(); |
| 78 | } |
| 79 | |
| 80 | RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) { |
| 81 | return new RegionSpace(name, mem_map); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 82 | } |
| 83 | |
| 84 | RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map) |
| 85 | : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(), |
| 86 | kGcRetentionPolicyAlwaysCollect), |
| 87 | region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) { |
| 88 | size_t mem_map_size = mem_map->Size(); |
| 89 | CHECK_ALIGNED(mem_map_size, kRegionSize); |
| 90 | CHECK_ALIGNED(mem_map->Begin(), kRegionSize); |
| 91 | num_regions_ = mem_map_size / kRegionSize; |
| 92 | num_non_free_regions_ = 0U; |
| 93 | DCHECK_GT(num_regions_, 0U); |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 94 | non_free_region_index_limit_ = 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 95 | regions_.reset(new Region[num_regions_]); |
| 96 | uint8_t* region_addr = mem_map->Begin(); |
| 97 | for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) { |
Mathieu Chartier | 22c8e40 | 2016-11-05 13:32:08 -0700 | [diff] [blame] | 98 | regions_[i].Init(i, region_addr, region_addr + kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 99 | } |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 100 | mark_bitmap_.reset( |
| 101 | accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity())); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 102 | if (kIsDebugBuild) { |
| 103 | CHECK_EQ(regions_[0].Begin(), Begin()); |
| 104 | for (size_t i = 0; i < num_regions_; ++i) { |
| 105 | CHECK(regions_[i].IsFree()); |
| 106 | CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize); |
| 107 | if (i + 1 < num_regions_) { |
| 108 | CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin()); |
| 109 | } |
| 110 | } |
| 111 | CHECK_EQ(regions_[num_regions_ - 1].End(), Limit()); |
| 112 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 113 | DCHECK(!full_region_.IsFree()); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 114 | DCHECK(full_region_.IsAllocated()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 115 | current_region_ = &full_region_; |
| 116 | evac_region_ = nullptr; |
| 117 | size_t ignored; |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 118 | DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 119 | } |
| 120 | |
| 121 | size_t RegionSpace::FromSpaceSize() { |
| 122 | uint64_t num_regions = 0; |
| 123 | MutexLock mu(Thread::Current(), region_lock_); |
| 124 | for (size_t i = 0; i < num_regions_; ++i) { |
| 125 | Region* r = ®ions_[i]; |
| 126 | if (r->IsInFromSpace()) { |
| 127 | ++num_regions; |
| 128 | } |
| 129 | } |
| 130 | return num_regions * kRegionSize; |
| 131 | } |
| 132 | |
| 133 | size_t RegionSpace::UnevacFromSpaceSize() { |
| 134 | uint64_t num_regions = 0; |
| 135 | MutexLock mu(Thread::Current(), region_lock_); |
| 136 | for (size_t i = 0; i < num_regions_; ++i) { |
| 137 | Region* r = ®ions_[i]; |
| 138 | if (r->IsInUnevacFromSpace()) { |
| 139 | ++num_regions; |
| 140 | } |
| 141 | } |
| 142 | return num_regions * kRegionSize; |
| 143 | } |
| 144 | |
| 145 | size_t RegionSpace::ToSpaceSize() { |
| 146 | uint64_t num_regions = 0; |
| 147 | MutexLock mu(Thread::Current(), region_lock_); |
| 148 | for (size_t i = 0; i < num_regions_; ++i) { |
| 149 | Region* r = ®ions_[i]; |
| 150 | if (r->IsInToSpace()) { |
| 151 | ++num_regions; |
| 152 | } |
| 153 | } |
| 154 | return num_regions * kRegionSize; |
| 155 | } |
| 156 | |
| 157 | inline bool RegionSpace::Region::ShouldBeEvacuated() { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 158 | DCHECK((IsAllocated() || IsLarge()) && IsInToSpace()); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 159 | // if the region was allocated after the start of the |
| 160 | // previous GC or the live ratio is below threshold, evacuate |
| 161 | // it. |
| 162 | bool result; |
| 163 | if (is_newly_allocated_) { |
| 164 | result = true; |
| 165 | } else { |
| 166 | bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1); |
| 167 | if (is_live_percent_valid) { |
Mathieu Chartier | 1ebf8d3 | 2016-06-09 11:51:27 -0700 | [diff] [blame] | 168 | DCHECK(IsInToSpace()); |
| 169 | DCHECK(!IsLargeTail()); |
| 170 | DCHECK_NE(live_bytes_, static_cast<size_t>(-1)); |
| 171 | DCHECK_LE(live_bytes_, BytesAllocated()); |
| 172 | const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize); |
| 173 | DCHECK_LE(live_bytes_, bytes_allocated); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 174 | if (IsAllocated()) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 175 | // Side node: live_percent == 0 does not necessarily mean |
| 176 | // there's no live objects due to rounding (there may be a |
| 177 | // few). |
Mathieu Chartier | 1ebf8d3 | 2016-06-09 11:51:27 -0700 | [diff] [blame] | 178 | result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 179 | } else { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 180 | DCHECK(IsLarge()); |
Mathieu Chartier | 1ebf8d3 | 2016-06-09 11:51:27 -0700 | [diff] [blame] | 181 | result = live_bytes_ == 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 182 | } |
| 183 | } else { |
| 184 | result = false; |
| 185 | } |
| 186 | } |
| 187 | return result; |
| 188 | } |
| 189 | |
| 190 | // Determine which regions to evacuate and mark them as |
| 191 | // from-space. Mark the rest as unevacuated from-space. |
| 192 | void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) { |
| 193 | ++time_; |
| 194 | if (kUseTableLookupReadBarrier) { |
| 195 | DCHECK(rb_table->IsAllCleared()); |
| 196 | rb_table->SetAll(); |
| 197 | } |
| 198 | MutexLock mu(Thread::Current(), region_lock_); |
| 199 | size_t num_expected_large_tails = 0; |
| 200 | bool prev_large_evacuated = false; |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 201 | VerifyNonFreeRegionLimit(); |
| 202 | const size_t iter_limit = kUseTableLookupReadBarrier |
| 203 | ? num_regions_ |
| 204 | : std::min(num_regions_, non_free_region_index_limit_); |
| 205 | for (size_t i = 0; i < iter_limit; ++i) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 206 | Region* r = ®ions_[i]; |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 207 | RegionState state = r->State(); |
| 208 | RegionType type = r->Type(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 209 | if (!r->IsFree()) { |
| 210 | DCHECK(r->IsInToSpace()); |
| 211 | if (LIKELY(num_expected_large_tails == 0U)) { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 212 | DCHECK((state == RegionState::kRegionStateAllocated || |
| 213 | state == RegionState::kRegionStateLarge) && |
| 214 | type == RegionType::kRegionTypeToSpace); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 215 | bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated(); |
| 216 | if (should_evacuate) { |
| 217 | r->SetAsFromSpace(); |
| 218 | DCHECK(r->IsInFromSpace()); |
| 219 | } else { |
| 220 | r->SetAsUnevacFromSpace(); |
| 221 | DCHECK(r->IsInUnevacFromSpace()); |
| 222 | } |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 223 | if (UNLIKELY(state == RegionState::kRegionStateLarge && |
| 224 | type == RegionType::kRegionTypeToSpace)) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 225 | prev_large_evacuated = should_evacuate; |
| 226 | num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1; |
| 227 | DCHECK_GT(num_expected_large_tails, 0U); |
| 228 | } |
| 229 | } else { |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 230 | DCHECK(state == RegionState::kRegionStateLargeTail && |
| 231 | type == RegionType::kRegionTypeToSpace); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 232 | if (prev_large_evacuated) { |
| 233 | r->SetAsFromSpace(); |
| 234 | DCHECK(r->IsInFromSpace()); |
| 235 | } else { |
| 236 | r->SetAsUnevacFromSpace(); |
| 237 | DCHECK(r->IsInUnevacFromSpace()); |
| 238 | } |
| 239 | --num_expected_large_tails; |
| 240 | } |
| 241 | } else { |
| 242 | DCHECK_EQ(num_expected_large_tails, 0U); |
| 243 | if (kUseTableLookupReadBarrier) { |
| 244 | // Clear the rb table for to-space regions. |
| 245 | rb_table->Clear(r->Begin(), r->End()); |
| 246 | } |
| 247 | } |
| 248 | } |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 249 | DCHECK_EQ(num_expected_large_tails, 0U); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 250 | current_region_ = &full_region_; |
| 251 | evac_region_ = &full_region_; |
| 252 | } |
| 253 | |
Mathieu Chartier | f39c091 | 2017-06-30 09:40:50 -0700 | [diff] [blame] | 254 | static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) { |
| 255 | ZeroAndReleasePages(begin, end - begin); |
| 256 | if (kProtectClearedRegions) { |
Mathieu Chartier | 8d8de0c | 2017-10-04 09:35:30 -0700 | [diff] [blame] | 257 | CheckedCall(mprotect, __FUNCTION__, begin, end - begin, PROT_NONE); |
Mathieu Chartier | f39c091 | 2017-06-30 09:40:50 -0700 | [diff] [blame] | 258 | } |
| 259 | } |
| 260 | |
Mathieu Chartier | 371b047 | 2017-02-27 16:37:21 -0800 | [diff] [blame] | 261 | void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) { |
| 262 | DCHECK(cleared_bytes != nullptr); |
| 263 | DCHECK(cleared_objects != nullptr); |
| 264 | *cleared_bytes = 0; |
| 265 | *cleared_objects = 0; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 266 | MutexLock mu(Thread::Current(), region_lock_); |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 267 | VerifyNonFreeRegionLimit(); |
| 268 | size_t new_non_free_region_index_limit = 0; |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 269 | |
| 270 | // Combine zeroing and releasing pages to reduce how often madvise is called. This helps |
| 271 | // reduce contention on the mmap semaphore. b/62194020 |
| 272 | // clear_region adds a region to the current block. If the region is not adjacent, the |
| 273 | // clear block is zeroed, released, and a new block begins. |
| 274 | uint8_t* clear_block_begin = nullptr; |
| 275 | uint8_t* clear_block_end = nullptr; |
| 276 | auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) { |
| 277 | r->Clear(/*zero_and_release_pages*/false); |
| 278 | if (clear_block_end != r->Begin()) { |
Mathieu Chartier | f39c091 | 2017-06-30 09:40:50 -0700 | [diff] [blame] | 279 | ZeroAndProtectRegion(clear_block_begin, clear_block_end); |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 280 | clear_block_begin = r->Begin(); |
| 281 | } |
| 282 | clear_block_end = r->End(); |
| 283 | }; |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 284 | for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 285 | Region* r = ®ions_[i]; |
| 286 | if (r->IsInFromSpace()) { |
Mathieu Chartier | 371b047 | 2017-02-27 16:37:21 -0800 | [diff] [blame] | 287 | *cleared_bytes += r->BytesAllocated(); |
| 288 | *cleared_objects += r->ObjectsAllocated(); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 289 | --num_non_free_regions_; |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 290 | clear_region(r); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 291 | } else if (r->IsInUnevacFromSpace()) { |
Mathieu Chartier | 371b047 | 2017-02-27 16:37:21 -0800 | [diff] [blame] | 292 | if (r->LiveBytes() == 0) { |
Nicolas Geoffray | a5fb204 | 2017-07-08 15:54:30 +0100 | [diff] [blame] | 293 | DCHECK(!r->IsLargeTail()); |
Mathieu Chartier | 371b047 | 2017-02-27 16:37:21 -0800 | [diff] [blame] | 294 | // Special case for 0 live bytes, this means all of the objects in the region are dead and |
| 295 | // we can clear it. This is important for large objects since we must not visit dead ones in |
| 296 | // RegionSpace::Walk because they may contain dangling references to invalid objects. |
| 297 | // It is also better to clear these regions now instead of at the end of the next GC to |
| 298 | // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal |
| 299 | // live percent evacuation logic. |
| 300 | size_t free_regions = 1; |
| 301 | // Also release RAM for large tails. |
| 302 | while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) { |
| 303 | DCHECK(r->IsLarge()); |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 304 | clear_region(®ions_[i + free_regions]); |
Mathieu Chartier | 371b047 | 2017-02-27 16:37:21 -0800 | [diff] [blame] | 305 | ++free_regions; |
| 306 | } |
| 307 | *cleared_bytes += r->BytesAllocated(); |
| 308 | *cleared_objects += r->ObjectsAllocated(); |
| 309 | num_non_free_regions_ -= free_regions; |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 310 | clear_region(r); |
Mathieu Chartier | 371b047 | 2017-02-27 16:37:21 -0800 | [diff] [blame] | 311 | GetLiveBitmap()->ClearRange( |
| 312 | reinterpret_cast<mirror::Object*>(r->Begin()), |
| 313 | reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize)); |
| 314 | continue; |
| 315 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 316 | r->SetUnevacFromSpaceAsToSpace(); |
Nicolas Geoffray | a5fb204 | 2017-07-08 15:54:30 +0100 | [diff] [blame] | 317 | if (r->AllAllocatedBytesAreLive()) { |
| 318 | // Try to optimize the number of ClearRange calls by checking whether the next regions |
| 319 | // can also be cleared. |
| 320 | size_t regions_to_clear_bitmap = 1; |
| 321 | while (i + regions_to_clear_bitmap < num_regions_) { |
| 322 | Region* const cur = ®ions_[i + regions_to_clear_bitmap]; |
| 323 | if (!cur->AllAllocatedBytesAreLive()) { |
| 324 | DCHECK(!cur->IsLargeTail()); |
| 325 | break; |
| 326 | } |
| 327 | CHECK(cur->IsInUnevacFromSpace()); |
| 328 | cur->SetUnevacFromSpaceAsToSpace(); |
| 329 | ++regions_to_clear_bitmap; |
| 330 | } |
| 331 | |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 332 | GetLiveBitmap()->ClearRange( |
| 333 | reinterpret_cast<mirror::Object*>(r->Begin()), |
Nicolas Geoffray | a5fb204 | 2017-07-08 15:54:30 +0100 | [diff] [blame] | 334 | reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize)); |
| 335 | // Skip over extra regions we cleared the bitmaps: we don't need to clear them, as they |
| 336 | // are unevac region sthat are live. |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 337 | // Subtract one for the for loop. |
Nicolas Geoffray | a5fb204 | 2017-07-08 15:54:30 +0100 | [diff] [blame] | 338 | i += regions_to_clear_bitmap - 1; |
Mathieu Chartier | 7ec38dc | 2016-10-07 15:24:46 -0700 | [diff] [blame] | 339 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 340 | } |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 341 | // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above. |
| 342 | Region* last_checked_region = ®ions_[i]; |
| 343 | if (!last_checked_region->IsFree()) { |
| 344 | new_non_free_region_index_limit = std::max(new_non_free_region_index_limit, |
| 345 | last_checked_region->Idx() + 1); |
| 346 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 347 | } |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 348 | // Clear pages for the last block since clearing happens when a new block opens. |
| 349 | ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin); |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 350 | // Update non_free_region_index_limit_. |
| 351 | SetNonFreeRegionLimit(new_non_free_region_index_limit); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 352 | evac_region_ = nullptr; |
| 353 | } |
| 354 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 355 | void RegionSpace::LogFragmentationAllocFailure(std::ostream& os, |
| 356 | size_t /* failed_alloc_bytes */) { |
| 357 | size_t max_contiguous_allocation = 0; |
| 358 | MutexLock mu(Thread::Current(), region_lock_); |
| 359 | if (current_region_->End() - current_region_->Top() > 0) { |
| 360 | max_contiguous_allocation = current_region_->End() - current_region_->Top(); |
| 361 | } |
| 362 | if (num_non_free_regions_ * 2 < num_regions_) { |
| 363 | // We reserve half of the regions for evaluation only. If we |
| 364 | // occupy more than half the regions, do not report the free |
| 365 | // regions as available. |
| 366 | size_t max_contiguous_free_regions = 0; |
| 367 | size_t num_contiguous_free_regions = 0; |
| 368 | bool prev_free_region = false; |
| 369 | for (size_t i = 0; i < num_regions_; ++i) { |
| 370 | Region* r = ®ions_[i]; |
| 371 | if (r->IsFree()) { |
| 372 | if (!prev_free_region) { |
| 373 | CHECK_EQ(num_contiguous_free_regions, 0U); |
| 374 | prev_free_region = true; |
| 375 | } |
| 376 | ++num_contiguous_free_regions; |
| 377 | } else { |
| 378 | if (prev_free_region) { |
| 379 | CHECK_NE(num_contiguous_free_regions, 0U); |
| 380 | max_contiguous_free_regions = std::max(max_contiguous_free_regions, |
| 381 | num_contiguous_free_regions); |
| 382 | num_contiguous_free_regions = 0U; |
| 383 | prev_free_region = false; |
| 384 | } |
| 385 | } |
| 386 | } |
| 387 | max_contiguous_allocation = std::max(max_contiguous_allocation, |
| 388 | max_contiguous_free_regions * kRegionSize); |
| 389 | } |
| 390 | os << "; failed due to fragmentation (largest possible contiguous allocation " |
| 391 | << max_contiguous_allocation << " bytes)"; |
| 392 | // Caller's job to print failed_alloc_bytes. |
| 393 | } |
| 394 | |
| 395 | void RegionSpace::Clear() { |
| 396 | MutexLock mu(Thread::Current(), region_lock_); |
| 397 | for (size_t i = 0; i < num_regions_; ++i) { |
| 398 | Region* r = ®ions_[i]; |
| 399 | if (!r->IsFree()) { |
| 400 | --num_non_free_regions_; |
| 401 | } |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 402 | r->Clear(/*zero_and_release_pages*/true); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 403 | } |
Hiroshi Yamauchi | 6711cd8 | 2017-02-23 15:11:56 -0800 | [diff] [blame] | 404 | SetNonFreeRegionLimit(0); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 405 | current_region_ = &full_region_; |
| 406 | evac_region_ = &full_region_; |
| 407 | } |
| 408 | |
| 409 | void RegionSpace::Dump(std::ostream& os) const { |
| 410 | os << GetName() << " " |
| 411 | << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit()); |
| 412 | } |
| 413 | |
| 414 | void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) { |
| 415 | DCHECK(Contains(large_obj)); |
Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 416 | DCHECK_ALIGNED(large_obj, kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 417 | MutexLock mu(Thread::Current(), region_lock_); |
| 418 | uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj); |
| 419 | uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize); |
| 420 | CHECK_LT(begin_addr, end_addr); |
| 421 | for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) { |
| 422 | Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr)); |
| 423 | if (addr == begin_addr) { |
| 424 | DCHECK(reg->IsLarge()); |
| 425 | } else { |
| 426 | DCHECK(reg->IsLargeTail()); |
| 427 | } |
Mathieu Chartier | 7c928f0 | 2017-06-05 17:23:44 -0700 | [diff] [blame] | 428 | reg->Clear(/*zero_and_release_pages*/true); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 429 | --num_non_free_regions_; |
| 430 | } |
| 431 | if (end_addr < Limit()) { |
| 432 | // If we aren't at the end of the space, check that the next region is not a large tail. |
| 433 | Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr)); |
| 434 | DCHECK(!following_reg->IsLargeTail()); |
| 435 | } |
| 436 | } |
| 437 | |
| 438 | void RegionSpace::DumpRegions(std::ostream& os) { |
| 439 | MutexLock mu(Thread::Current(), region_lock_); |
| 440 | for (size_t i = 0; i < num_regions_; ++i) { |
| 441 | regions_[i].Dump(os); |
| 442 | } |
| 443 | } |
| 444 | |
| 445 | void RegionSpace::DumpNonFreeRegions(std::ostream& os) { |
| 446 | MutexLock mu(Thread::Current(), region_lock_); |
| 447 | for (size_t i = 0; i < num_regions_; ++i) { |
| 448 | Region* reg = ®ions_[i]; |
| 449 | if (!reg->IsFree()) { |
| 450 | reg->Dump(os); |
| 451 | } |
| 452 | } |
| 453 | } |
| 454 | |
| 455 | void RegionSpace::RecordAlloc(mirror::Object* ref) { |
| 456 | CHECK(ref != nullptr); |
| 457 | Region* r = RefToRegion(ref); |
Mathieu Chartier | 22c8e40 | 2016-11-05 13:32:08 -0700 | [diff] [blame] | 458 | r->objects_allocated_.FetchAndAddSequentiallyConsistent(1); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 459 | } |
| 460 | |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 461 | bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 462 | MutexLock mu(self, region_lock_); |
| 463 | RevokeThreadLocalBuffersLocked(self); |
| 464 | // Retain sufficient free regions for full evacuation. |
Mathieu Chartier | 420823f | 2017-06-27 21:24:04 -0700 | [diff] [blame] | 465 | |
| 466 | Region* r = AllocateRegion(/*for_evac*/ false); |
| 467 | if (r != nullptr) { |
| 468 | r->is_a_tlab_ = true; |
| 469 | r->thread_ = self; |
| 470 | r->SetTop(r->End()); |
| 471 | self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End()); |
| 472 | return true; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 473 | } |
| 474 | return false; |
| 475 | } |
| 476 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 477 | size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 478 | MutexLock mu(Thread::Current(), region_lock_); |
| 479 | RevokeThreadLocalBuffersLocked(thread); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 480 | return 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 481 | } |
| 482 | |
| 483 | void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { |
| 484 | uint8_t* tlab_start = thread->GetTlabStart(); |
| 485 | DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr); |
| 486 | if (tlab_start != nullptr) { |
Roland Levillain | 14d9057 | 2015-07-16 10:52:26 +0100 | [diff] [blame] | 487 | DCHECK_ALIGNED(tlab_start, kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 488 | Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start)); |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 489 | DCHECK(r->IsAllocated()); |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 490 | DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 491 | r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(), |
| 492 | thread->GetThreadLocalBytesAllocated()); |
| 493 | r->is_a_tlab_ = false; |
| 494 | r->thread_ = nullptr; |
| 495 | } |
Mathieu Chartier | 6bc7774 | 2017-04-18 17:46:23 -0700 | [diff] [blame] | 496 | thread->SetTlab(nullptr, nullptr, nullptr); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 497 | } |
| 498 | |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 499 | size_t RegionSpace::RevokeAllThreadLocalBuffers() { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 500 | Thread* self = Thread::Current(); |
| 501 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 502 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 503 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 504 | for (Thread* thread : thread_list) { |
| 505 | RevokeThreadLocalBuffers(thread); |
| 506 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 507 | return 0U; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 508 | } |
| 509 | |
| 510 | void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) { |
| 511 | if (kIsDebugBuild) { |
| 512 | DCHECK(!thread->HasTlab()); |
| 513 | } |
| 514 | } |
| 515 | |
| 516 | void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() { |
| 517 | if (kIsDebugBuild) { |
| 518 | Thread* self = Thread::Current(); |
| 519 | MutexLock mu(self, *Locks::runtime_shutdown_lock_); |
| 520 | MutexLock mu2(self, *Locks::thread_list_lock_); |
| 521 | std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList(); |
| 522 | for (Thread* thread : thread_list) { |
| 523 | AssertThreadLocalBuffersAreRevoked(thread); |
| 524 | } |
| 525 | } |
| 526 | } |
| 527 | |
| 528 | void RegionSpace::Region::Dump(std::ostream& os) const { |
Mathieu Chartier | 22c8e40 | 2016-11-05 13:32:08 -0700 | [diff] [blame] | 529 | os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" |
| 530 | << reinterpret_cast<void*>(Top()) |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 531 | << "-" << reinterpret_cast<void*>(end_) |
Hiroshi Yamauchi | d25f842 | 2015-01-30 16:25:12 -0800 | [diff] [blame] | 532 | << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_) |
| 533 | << " objects_allocated=" << objects_allocated_ |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 534 | << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_ |
| 535 | << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n"; |
| 536 | } |
| 537 | |
Andreas Gampe | d490129 | 2017-05-30 18:41:34 -0700 | [diff] [blame] | 538 | size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) { |
| 539 | size_t num_bytes = obj->SizeOf(); |
| 540 | if (usable_size != nullptr) { |
| 541 | if (LIKELY(num_bytes <= kRegionSize)) { |
| 542 | DCHECK(RefToRegion(obj)->IsAllocated()); |
| 543 | *usable_size = RoundUp(num_bytes, kAlignment); |
| 544 | } else { |
| 545 | DCHECK(RefToRegion(obj)->IsLarge()); |
| 546 | *usable_size = RoundUp(num_bytes, kRegionSize); |
| 547 | } |
| 548 | } |
| 549 | return num_bytes; |
| 550 | } |
| 551 | |
Mathieu Chartier | 420823f | 2017-06-27 21:24:04 -0700 | [diff] [blame] | 552 | void RegionSpace::Region::Clear(bool zero_and_release_pages) { |
| 553 | top_.StoreRelaxed(begin_); |
| 554 | state_ = RegionState::kRegionStateFree; |
| 555 | type_ = RegionType::kRegionTypeNone; |
| 556 | objects_allocated_.StoreRelaxed(0); |
| 557 | alloc_time_ = 0; |
| 558 | live_bytes_ = static_cast<size_t>(-1); |
| 559 | if (zero_and_release_pages) { |
Mathieu Chartier | f39c091 | 2017-06-30 09:40:50 -0700 | [diff] [blame] | 560 | ZeroAndProtectRegion(begin_, end_); |
Mathieu Chartier | 49bac84 | 2017-06-27 14:04:41 -0700 | [diff] [blame] | 561 | } |
Mathieu Chartier | 420823f | 2017-06-27 21:24:04 -0700 | [diff] [blame] | 562 | is_newly_allocated_ = false; |
| 563 | is_a_tlab_ = false; |
| 564 | thread_ = nullptr; |
| 565 | } |
| 566 | |
| 567 | RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) { |
| 568 | if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) { |
| 569 | return nullptr; |
| 570 | } |
| 571 | for (size_t i = 0; i < num_regions_; ++i) { |
| 572 | Region* r = ®ions_[i]; |
| 573 | if (r->IsFree()) { |
| 574 | r->Unfree(this, time_); |
| 575 | ++num_non_free_regions_; |
| 576 | if (!for_evac) { |
| 577 | // Evac doesn't count as newly allocated. |
| 578 | r->SetNewlyAllocated(); |
| 579 | } |
| 580 | return r; |
| 581 | } |
| 582 | } |
| 583 | return nullptr; |
| 584 | } |
| 585 | |
| 586 | void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) { |
| 587 | DCHECK(IsFree()); |
| 588 | alloc_time_ = alloc_time; |
| 589 | region_space->AdjustNonFreeRegionLimit(idx_); |
| 590 | type_ = RegionType::kRegionTypeToSpace; |
Mathieu Chartier | 49bac84 | 2017-06-27 14:04:41 -0700 | [diff] [blame] | 591 | if (kProtectClearedRegions) { |
Mathieu Chartier | 8d8de0c | 2017-10-04 09:35:30 -0700 | [diff] [blame] | 592 | CheckedCall(mprotect, __FUNCTION__, Begin(), kRegionSize, PROT_READ | PROT_WRITE); |
Mathieu Chartier | 49bac84 | 2017-06-27 14:04:41 -0700 | [diff] [blame] | 593 | } |
Mathieu Chartier | 420823f | 2017-06-27 21:24:04 -0700 | [diff] [blame] | 594 | } |
| 595 | |
| 596 | void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) { |
| 597 | MarkAsAllocated(region_space, alloc_time); |
| 598 | state_ = RegionState::kRegionStateAllocated; |
| 599 | } |
| 600 | |
| 601 | void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) { |
| 602 | MarkAsAllocated(region_space, alloc_time); |
| 603 | state_ = RegionState::kRegionStateLarge; |
| 604 | } |
| 605 | |
| 606 | void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) { |
| 607 | MarkAsAllocated(region_space, alloc_time); |
| 608 | state_ = RegionState::kRegionStateLargeTail; |
| 609 | } |
| 610 | |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 611 | } // namespace space |
| 612 | } // namespace gc |
| 613 | } // namespace art |