blob: b2e1fa52692207ff2fb777c4b1815f7c1a5ac6dd [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080017#include "bump_pointer_space-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070018#include "bump_pointer_space.h"
Andreas Gamped4901292017-05-30 18:41:34 -070019#include "gc/accounting/read_barrier_table.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080020#include "mirror/class-inl.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070021#include "mirror/object-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022#include "thread_list.h"
23
24namespace art {
25namespace gc {
26namespace space {
27
28// If a region has live objects whose size is less than this percent
29// value of the region size, evaculate the region.
30static constexpr uint kEvaculateLivePercentThreshold = 75U;
31
Mathieu Chartier49bac842017-06-27 14:04:41 -070032// If we protect the cleared regions.
Mathieu Chartierc1b3c712017-06-29 09:47:17 -070033// Only protect for target builds to prevent flaky test failures (b/63131961).
34static constexpr bool kProtectClearedRegions = kIsTargetBuild;
Mathieu Chartier49bac842017-06-27 14:04:41 -070035
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -080036MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
37 uint8_t* requested_begin) {
38 CHECK_ALIGNED(capacity, kRegionSize);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080039 std::string error_msg;
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -080040 // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
41 // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
42 std::unique_ptr<MemMap> mem_map;
43 while (true) {
44 mem_map.reset(MemMap::MapAnonymous(name.c_str(),
45 requested_begin,
46 capacity + kRegionSize,
47 PROT_READ | PROT_WRITE,
48 true,
49 false,
50 &error_msg));
51 if (mem_map.get() != nullptr || requested_begin == nullptr) {
52 break;
53 }
54 // Retry with no specified request begin.
55 requested_begin = nullptr;
56 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 if (mem_map.get() == nullptr) {
58 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
59 << PrettySize(capacity) << " with message " << error_msg;
Andreas Gampe3fec9ac2016-09-13 10:47:28 -070060 MemMap::DumpMaps(LOG_STREAM(ERROR));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080061 return nullptr;
62 }
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -080063 CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
64 CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
65 CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
66 if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
67 // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
68 // kRegionSize at the end.
69 mem_map->SetSize(capacity);
70 } else {
71 // Got an unaligned map. Align the both ends.
72 mem_map->AlignBy(kRegionSize);
73 }
74 CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
75 CHECK_ALIGNED(mem_map->End(), kRegionSize);
76 CHECK_EQ(mem_map->Size(), capacity);
77 return mem_map.release();
78}
79
80RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
81 return new RegionSpace(name, mem_map);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080082}
83
84RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
85 : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
86 kGcRetentionPolicyAlwaysCollect),
87 region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
88 size_t mem_map_size = mem_map->Size();
89 CHECK_ALIGNED(mem_map_size, kRegionSize);
90 CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
91 num_regions_ = mem_map_size / kRegionSize;
92 num_non_free_regions_ = 0U;
93 DCHECK_GT(num_regions_, 0U);
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -080094 non_free_region_index_limit_ = 0U;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080095 regions_.reset(new Region[num_regions_]);
96 uint8_t* region_addr = mem_map->Begin();
97 for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
Mathieu Chartier22c8e402016-11-05 13:32:08 -070098 regions_[i].Init(i, region_addr, region_addr + kRegionSize);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080099 }
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700100 mark_bitmap_.reset(
101 accounting::ContinuousSpaceBitmap::Create("region space live bitmap", Begin(), Capacity()));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800102 if (kIsDebugBuild) {
103 CHECK_EQ(regions_[0].Begin(), Begin());
104 for (size_t i = 0; i < num_regions_; ++i) {
105 CHECK(regions_[i].IsFree());
106 CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
107 if (i + 1 < num_regions_) {
108 CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
109 }
110 }
111 CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
112 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800113 DCHECK(!full_region_.IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800114 DCHECK(full_region_.IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800115 current_region_ = &full_region_;
116 evac_region_ = nullptr;
117 size_t ignored;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700118 DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800119}
120
121size_t RegionSpace::FromSpaceSize() {
122 uint64_t num_regions = 0;
123 MutexLock mu(Thread::Current(), region_lock_);
124 for (size_t i = 0; i < num_regions_; ++i) {
125 Region* r = &regions_[i];
126 if (r->IsInFromSpace()) {
127 ++num_regions;
128 }
129 }
130 return num_regions * kRegionSize;
131}
132
133size_t RegionSpace::UnevacFromSpaceSize() {
134 uint64_t num_regions = 0;
135 MutexLock mu(Thread::Current(), region_lock_);
136 for (size_t i = 0; i < num_regions_; ++i) {
137 Region* r = &regions_[i];
138 if (r->IsInUnevacFromSpace()) {
139 ++num_regions;
140 }
141 }
142 return num_regions * kRegionSize;
143}
144
145size_t RegionSpace::ToSpaceSize() {
146 uint64_t num_regions = 0;
147 MutexLock mu(Thread::Current(), region_lock_);
148 for (size_t i = 0; i < num_regions_; ++i) {
149 Region* r = &regions_[i];
150 if (r->IsInToSpace()) {
151 ++num_regions;
152 }
153 }
154 return num_regions * kRegionSize;
155}
156
157inline bool RegionSpace::Region::ShouldBeEvacuated() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800158 DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800159 // if the region was allocated after the start of the
160 // previous GC or the live ratio is below threshold, evacuate
161 // it.
162 bool result;
163 if (is_newly_allocated_) {
164 result = true;
165 } else {
166 bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1);
167 if (is_live_percent_valid) {
Mathieu Chartier1ebf8d32016-06-09 11:51:27 -0700168 DCHECK(IsInToSpace());
169 DCHECK(!IsLargeTail());
170 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
171 DCHECK_LE(live_bytes_, BytesAllocated());
172 const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
173 DCHECK_LE(live_bytes_, bytes_allocated);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800174 if (IsAllocated()) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800175 // Side node: live_percent == 0 does not necessarily mean
176 // there's no live objects due to rounding (there may be a
177 // few).
Mathieu Chartier1ebf8d32016-06-09 11:51:27 -0700178 result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800179 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800180 DCHECK(IsLarge());
Mathieu Chartier1ebf8d32016-06-09 11:51:27 -0700181 result = live_bytes_ == 0U;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800182 }
183 } else {
184 result = false;
185 }
186 }
187 return result;
188}
189
190// Determine which regions to evacuate and mark them as
191// from-space. Mark the rest as unevacuated from-space.
192void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
193 ++time_;
194 if (kUseTableLookupReadBarrier) {
195 DCHECK(rb_table->IsAllCleared());
196 rb_table->SetAll();
197 }
198 MutexLock mu(Thread::Current(), region_lock_);
199 size_t num_expected_large_tails = 0;
200 bool prev_large_evacuated = false;
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800201 VerifyNonFreeRegionLimit();
202 const size_t iter_limit = kUseTableLookupReadBarrier
203 ? num_regions_
204 : std::min(num_regions_, non_free_region_index_limit_);
205 for (size_t i = 0; i < iter_limit; ++i) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800206 Region* r = &regions_[i];
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800207 RegionState state = r->State();
208 RegionType type = r->Type();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800209 if (!r->IsFree()) {
210 DCHECK(r->IsInToSpace());
211 if (LIKELY(num_expected_large_tails == 0U)) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800212 DCHECK((state == RegionState::kRegionStateAllocated ||
213 state == RegionState::kRegionStateLarge) &&
214 type == RegionType::kRegionTypeToSpace);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800215 bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
216 if (should_evacuate) {
217 r->SetAsFromSpace();
218 DCHECK(r->IsInFromSpace());
219 } else {
220 r->SetAsUnevacFromSpace();
221 DCHECK(r->IsInUnevacFromSpace());
222 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800223 if (UNLIKELY(state == RegionState::kRegionStateLarge &&
224 type == RegionType::kRegionTypeToSpace)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800225 prev_large_evacuated = should_evacuate;
226 num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
227 DCHECK_GT(num_expected_large_tails, 0U);
228 }
229 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800230 DCHECK(state == RegionState::kRegionStateLargeTail &&
231 type == RegionType::kRegionTypeToSpace);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800232 if (prev_large_evacuated) {
233 r->SetAsFromSpace();
234 DCHECK(r->IsInFromSpace());
235 } else {
236 r->SetAsUnevacFromSpace();
237 DCHECK(r->IsInUnevacFromSpace());
238 }
239 --num_expected_large_tails;
240 }
241 } else {
242 DCHECK_EQ(num_expected_large_tails, 0U);
243 if (kUseTableLookupReadBarrier) {
244 // Clear the rb table for to-space regions.
245 rb_table->Clear(r->Begin(), r->End());
246 }
247 }
248 }
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800249 DCHECK_EQ(num_expected_large_tails, 0U);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800250 current_region_ = &full_region_;
251 evac_region_ = &full_region_;
252}
253
Mathieu Chartierf39c0912017-06-30 09:40:50 -0700254static void ZeroAndProtectRegion(uint8_t* begin, uint8_t* end) {
255 ZeroAndReleasePages(begin, end - begin);
256 if (kProtectClearedRegions) {
257 mprotect(begin, end - begin, PROT_NONE);
258 }
259}
260
Mathieu Chartier371b0472017-02-27 16:37:21 -0800261void RegionSpace::ClearFromSpace(uint64_t* cleared_bytes, uint64_t* cleared_objects) {
262 DCHECK(cleared_bytes != nullptr);
263 DCHECK(cleared_objects != nullptr);
264 *cleared_bytes = 0;
265 *cleared_objects = 0;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800266 MutexLock mu(Thread::Current(), region_lock_);
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800267 VerifyNonFreeRegionLimit();
268 size_t new_non_free_region_index_limit = 0;
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700269
270 // Combine zeroing and releasing pages to reduce how often madvise is called. This helps
271 // reduce contention on the mmap semaphore. b/62194020
272 // clear_region adds a region to the current block. If the region is not adjacent, the
273 // clear block is zeroed, released, and a new block begins.
274 uint8_t* clear_block_begin = nullptr;
275 uint8_t* clear_block_end = nullptr;
276 auto clear_region = [&clear_block_begin, &clear_block_end](Region* r) {
277 r->Clear(/*zero_and_release_pages*/false);
278 if (clear_block_end != r->Begin()) {
Mathieu Chartierf39c0912017-06-30 09:40:50 -0700279 ZeroAndProtectRegion(clear_block_begin, clear_block_end);
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700280 clear_block_begin = r->Begin();
281 }
282 clear_block_end = r->End();
283 };
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800284 for (size_t i = 0; i < std::min(num_regions_, non_free_region_index_limit_); ++i) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800285 Region* r = &regions_[i];
286 if (r->IsInFromSpace()) {
Mathieu Chartier371b0472017-02-27 16:37:21 -0800287 *cleared_bytes += r->BytesAllocated();
288 *cleared_objects += r->ObjectsAllocated();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800289 --num_non_free_regions_;
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700290 clear_region(r);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800291 } else if (r->IsInUnevacFromSpace()) {
Mathieu Chartier371b0472017-02-27 16:37:21 -0800292 if (r->LiveBytes() == 0) {
Nicolas Geoffraya5fb2042017-07-08 15:54:30 +0100293 DCHECK(!r->IsLargeTail());
Mathieu Chartier371b0472017-02-27 16:37:21 -0800294 // Special case for 0 live bytes, this means all of the objects in the region are dead and
295 // we can clear it. This is important for large objects since we must not visit dead ones in
296 // RegionSpace::Walk because they may contain dangling references to invalid objects.
297 // It is also better to clear these regions now instead of at the end of the next GC to
298 // save RAM. If we don't clear the regions here, they will be cleared next GC by the normal
299 // live percent evacuation logic.
300 size_t free_regions = 1;
301 // Also release RAM for large tails.
302 while (i + free_regions < num_regions_ && regions_[i + free_regions].IsLargeTail()) {
303 DCHECK(r->IsLarge());
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700304 clear_region(&regions_[i + free_regions]);
Mathieu Chartier371b0472017-02-27 16:37:21 -0800305 ++free_regions;
306 }
307 *cleared_bytes += r->BytesAllocated();
308 *cleared_objects += r->ObjectsAllocated();
309 num_non_free_regions_ -= free_regions;
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700310 clear_region(r);
Mathieu Chartier371b0472017-02-27 16:37:21 -0800311 GetLiveBitmap()->ClearRange(
312 reinterpret_cast<mirror::Object*>(r->Begin()),
313 reinterpret_cast<mirror::Object*>(r->Begin() + free_regions * kRegionSize));
314 continue;
315 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800316 r->SetUnevacFromSpaceAsToSpace();
Nicolas Geoffraya5fb2042017-07-08 15:54:30 +0100317 if (r->AllAllocatedBytesAreLive()) {
318 // Try to optimize the number of ClearRange calls by checking whether the next regions
319 // can also be cleared.
320 size_t regions_to_clear_bitmap = 1;
321 while (i + regions_to_clear_bitmap < num_regions_) {
322 Region* const cur = &regions_[i + regions_to_clear_bitmap];
323 if (!cur->AllAllocatedBytesAreLive()) {
324 DCHECK(!cur->IsLargeTail());
325 break;
326 }
327 CHECK(cur->IsInUnevacFromSpace());
328 cur->SetUnevacFromSpaceAsToSpace();
329 ++regions_to_clear_bitmap;
330 }
331
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700332 GetLiveBitmap()->ClearRange(
333 reinterpret_cast<mirror::Object*>(r->Begin()),
Nicolas Geoffraya5fb2042017-07-08 15:54:30 +0100334 reinterpret_cast<mirror::Object*>(r->Begin() + regions_to_clear_bitmap * kRegionSize));
335 // Skip over extra regions we cleared the bitmaps: we don't need to clear them, as they
336 // are unevac region sthat are live.
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700337 // Subtract one for the for loop.
Nicolas Geoffraya5fb2042017-07-08 15:54:30 +0100338 i += regions_to_clear_bitmap - 1;
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700339 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800340 }
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800341 // Note r != last_checked_region if r->IsInUnevacFromSpace() was true above.
342 Region* last_checked_region = &regions_[i];
343 if (!last_checked_region->IsFree()) {
344 new_non_free_region_index_limit = std::max(new_non_free_region_index_limit,
345 last_checked_region->Idx() + 1);
346 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800347 }
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700348 // Clear pages for the last block since clearing happens when a new block opens.
349 ZeroAndReleasePages(clear_block_begin, clear_block_end - clear_block_begin);
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800350 // Update non_free_region_index_limit_.
351 SetNonFreeRegionLimit(new_non_free_region_index_limit);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800352 evac_region_ = nullptr;
353}
354
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800355void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
356 size_t /* failed_alloc_bytes */) {
357 size_t max_contiguous_allocation = 0;
358 MutexLock mu(Thread::Current(), region_lock_);
359 if (current_region_->End() - current_region_->Top() > 0) {
360 max_contiguous_allocation = current_region_->End() - current_region_->Top();
361 }
362 if (num_non_free_regions_ * 2 < num_regions_) {
363 // We reserve half of the regions for evaluation only. If we
364 // occupy more than half the regions, do not report the free
365 // regions as available.
366 size_t max_contiguous_free_regions = 0;
367 size_t num_contiguous_free_regions = 0;
368 bool prev_free_region = false;
369 for (size_t i = 0; i < num_regions_; ++i) {
370 Region* r = &regions_[i];
371 if (r->IsFree()) {
372 if (!prev_free_region) {
373 CHECK_EQ(num_contiguous_free_regions, 0U);
374 prev_free_region = true;
375 }
376 ++num_contiguous_free_regions;
377 } else {
378 if (prev_free_region) {
379 CHECK_NE(num_contiguous_free_regions, 0U);
380 max_contiguous_free_regions = std::max(max_contiguous_free_regions,
381 num_contiguous_free_regions);
382 num_contiguous_free_regions = 0U;
383 prev_free_region = false;
384 }
385 }
386 }
387 max_contiguous_allocation = std::max(max_contiguous_allocation,
388 max_contiguous_free_regions * kRegionSize);
389 }
390 os << "; failed due to fragmentation (largest possible contiguous allocation "
391 << max_contiguous_allocation << " bytes)";
392 // Caller's job to print failed_alloc_bytes.
393}
394
395void RegionSpace::Clear() {
396 MutexLock mu(Thread::Current(), region_lock_);
397 for (size_t i = 0; i < num_regions_; ++i) {
398 Region* r = &regions_[i];
399 if (!r->IsFree()) {
400 --num_non_free_regions_;
401 }
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700402 r->Clear(/*zero_and_release_pages*/true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800403 }
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800404 SetNonFreeRegionLimit(0);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800405 current_region_ = &full_region_;
406 evac_region_ = &full_region_;
407}
408
409void RegionSpace::Dump(std::ostream& os) const {
410 os << GetName() << " "
411 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
412}
413
414void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
415 DCHECK(Contains(large_obj));
Roland Levillain14d90572015-07-16 10:52:26 +0100416 DCHECK_ALIGNED(large_obj, kRegionSize);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800417 MutexLock mu(Thread::Current(), region_lock_);
418 uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
419 uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
420 CHECK_LT(begin_addr, end_addr);
421 for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
422 Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
423 if (addr == begin_addr) {
424 DCHECK(reg->IsLarge());
425 } else {
426 DCHECK(reg->IsLargeTail());
427 }
Mathieu Chartier7c928f02017-06-05 17:23:44 -0700428 reg->Clear(/*zero_and_release_pages*/true);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800429 --num_non_free_regions_;
430 }
431 if (end_addr < Limit()) {
432 // If we aren't at the end of the space, check that the next region is not a large tail.
433 Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
434 DCHECK(!following_reg->IsLargeTail());
435 }
436}
437
438void RegionSpace::DumpRegions(std::ostream& os) {
439 MutexLock mu(Thread::Current(), region_lock_);
440 for (size_t i = 0; i < num_regions_; ++i) {
441 regions_[i].Dump(os);
442 }
443}
444
445void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
446 MutexLock mu(Thread::Current(), region_lock_);
447 for (size_t i = 0; i < num_regions_; ++i) {
448 Region* reg = &regions_[i];
449 if (!reg->IsFree()) {
450 reg->Dump(os);
451 }
452 }
453}
454
455void RegionSpace::RecordAlloc(mirror::Object* ref) {
456 CHECK(ref != nullptr);
457 Region* r = RefToRegion(ref);
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700458 r->objects_allocated_.FetchAndAddSequentiallyConsistent(1);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800459}
460
Mathieu Chartier6bc77742017-04-18 17:46:23 -0700461bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800462 MutexLock mu(self, region_lock_);
463 RevokeThreadLocalBuffersLocked(self);
464 // Retain sufficient free regions for full evacuation.
Mathieu Chartier420823f2017-06-27 21:24:04 -0700465
466 Region* r = AllocateRegion(/*for_evac*/ false);
467 if (r != nullptr) {
468 r->is_a_tlab_ = true;
469 r->thread_ = self;
470 r->SetTop(r->End());
471 self->SetTlab(r->Begin(), r->Begin() + min_bytes, r->End());
472 return true;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800473 }
474 return false;
475}
476
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700477size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800478 MutexLock mu(Thread::Current(), region_lock_);
479 RevokeThreadLocalBuffersLocked(thread);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700480 return 0U;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800481}
482
483void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
484 uint8_t* tlab_start = thread->GetTlabStart();
485 DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
486 if (tlab_start != nullptr) {
Roland Levillain14d90572015-07-16 10:52:26 +0100487 DCHECK_ALIGNED(tlab_start, kRegionSize);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800488 Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800489 DCHECK(r->IsAllocated());
Mathieu Chartier6bc77742017-04-18 17:46:23 -0700490 DCHECK_LE(thread->GetThreadLocalBytesAllocated(), kRegionSize);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800491 r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
492 thread->GetThreadLocalBytesAllocated());
493 r->is_a_tlab_ = false;
494 r->thread_ = nullptr;
495 }
Mathieu Chartier6bc77742017-04-18 17:46:23 -0700496 thread->SetTlab(nullptr, nullptr, nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800497}
498
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700499size_t RegionSpace::RevokeAllThreadLocalBuffers() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800500 Thread* self = Thread::Current();
501 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
502 MutexLock mu2(self, *Locks::thread_list_lock_);
503 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
504 for (Thread* thread : thread_list) {
505 RevokeThreadLocalBuffers(thread);
506 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700507 return 0U;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800508}
509
510void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
511 if (kIsDebugBuild) {
512 DCHECK(!thread->HasTlab());
513 }
514}
515
516void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
517 if (kIsDebugBuild) {
518 Thread* self = Thread::Current();
519 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
520 MutexLock mu2(self, *Locks::thread_list_lock_);
521 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
522 for (Thread* thread : thread_list) {
523 AssertThreadLocalBuffersAreRevoked(thread);
524 }
525 }
526}
527
528void RegionSpace::Region::Dump(std::ostream& os) const {
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700529 os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-"
530 << reinterpret_cast<void*>(Top())
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800531 << "-" << reinterpret_cast<void*>(end_)
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800532 << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_)
533 << " objects_allocated=" << objects_allocated_
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800534 << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_
535 << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
536}
537
Andreas Gamped4901292017-05-30 18:41:34 -0700538size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size) {
539 size_t num_bytes = obj->SizeOf();
540 if (usable_size != nullptr) {
541 if (LIKELY(num_bytes <= kRegionSize)) {
542 DCHECK(RefToRegion(obj)->IsAllocated());
543 *usable_size = RoundUp(num_bytes, kAlignment);
544 } else {
545 DCHECK(RefToRegion(obj)->IsLarge());
546 *usable_size = RoundUp(num_bytes, kRegionSize);
547 }
548 }
549 return num_bytes;
550}
551
Mathieu Chartier420823f2017-06-27 21:24:04 -0700552void RegionSpace::Region::Clear(bool zero_and_release_pages) {
553 top_.StoreRelaxed(begin_);
554 state_ = RegionState::kRegionStateFree;
555 type_ = RegionType::kRegionTypeNone;
556 objects_allocated_.StoreRelaxed(0);
557 alloc_time_ = 0;
558 live_bytes_ = static_cast<size_t>(-1);
559 if (zero_and_release_pages) {
Mathieu Chartierf39c0912017-06-30 09:40:50 -0700560 ZeroAndProtectRegion(begin_, end_);
Mathieu Chartier49bac842017-06-27 14:04:41 -0700561 }
Mathieu Chartier420823f2017-06-27 21:24:04 -0700562 is_newly_allocated_ = false;
563 is_a_tlab_ = false;
564 thread_ = nullptr;
565}
566
567RegionSpace::Region* RegionSpace::AllocateRegion(bool for_evac) {
568 if (!for_evac && (num_non_free_regions_ + 1) * 2 > num_regions_) {
569 return nullptr;
570 }
571 for (size_t i = 0; i < num_regions_; ++i) {
572 Region* r = &regions_[i];
573 if (r->IsFree()) {
574 r->Unfree(this, time_);
575 ++num_non_free_regions_;
576 if (!for_evac) {
577 // Evac doesn't count as newly allocated.
578 r->SetNewlyAllocated();
579 }
580 return r;
581 }
582 }
583 return nullptr;
584}
585
586void RegionSpace::Region::MarkAsAllocated(RegionSpace* region_space, uint32_t alloc_time) {
587 DCHECK(IsFree());
588 alloc_time_ = alloc_time;
589 region_space->AdjustNonFreeRegionLimit(idx_);
590 type_ = RegionType::kRegionTypeToSpace;
Mathieu Chartier49bac842017-06-27 14:04:41 -0700591 if (kProtectClearedRegions) {
592 mprotect(Begin(), kRegionSize, PROT_READ | PROT_WRITE);
593 }
Mathieu Chartier420823f2017-06-27 21:24:04 -0700594}
595
596void RegionSpace::Region::Unfree(RegionSpace* region_space, uint32_t alloc_time) {
597 MarkAsAllocated(region_space, alloc_time);
598 state_ = RegionState::kRegionStateAllocated;
599}
600
601void RegionSpace::Region::UnfreeLarge(RegionSpace* region_space, uint32_t alloc_time) {
602 MarkAsAllocated(region_space, alloc_time);
603 state_ = RegionState::kRegionStateLarge;
604}
605
606void RegionSpace::Region::UnfreeLargeTail(RegionSpace* region_space, uint32_t alloc_time) {
607 MarkAsAllocated(region_space, alloc_time);
608 state_ = RegionState::kRegionStateLargeTail;
609}
610
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800611} // namespace space
612} // namespace gc
613} // namespace art