blob: 23cae7c8210e87cc4f16ce04d9ea719b69af45fe [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "bump_pointer_space.h"
18#include "bump_pointer_space-inl.h"
19#include "mirror/object-inl.h"
20#include "mirror/class-inl.h"
21#include "thread_list.h"
22
23namespace art {
24namespace gc {
25namespace space {
26
27// If a region has live objects whose size is less than this percent
28// value of the region size, evaculate the region.
29static constexpr uint kEvaculateLivePercentThreshold = 75U;
30
31RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity,
32 uint8_t* requested_begin) {
33 capacity = RoundUp(capacity, kRegionSize);
34 std::string error_msg;
35 std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
Vladimir Marko5c42c292015-02-25 12:02:49 +000036 PROT_READ | PROT_WRITE, true, false,
37 &error_msg));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080038 if (mem_map.get() == nullptr) {
39 LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
40 << PrettySize(capacity) << " with message " << error_msg;
Andreas Gampe3fec9ac2016-09-13 10:47:28 -070041 MemMap::DumpMaps(LOG_STREAM(ERROR));
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080042 return nullptr;
43 }
44 return new RegionSpace(name, mem_map.release());
45}
46
47RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
48 : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(),
49 kGcRetentionPolicyAlwaysCollect),
50 region_lock_("Region lock", kRegionSpaceRegionLock), time_(1U) {
51 size_t mem_map_size = mem_map->Size();
52 CHECK_ALIGNED(mem_map_size, kRegionSize);
53 CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
54 num_regions_ = mem_map_size / kRegionSize;
55 num_non_free_regions_ = 0U;
56 DCHECK_GT(num_regions_, 0U);
57 regions_.reset(new Region[num_regions_]);
58 uint8_t* region_addr = mem_map->Begin();
59 for (size_t i = 0; i < num_regions_; ++i, region_addr += kRegionSize) {
60 regions_[i] = Region(i, region_addr, region_addr + kRegionSize);
61 }
62 if (kIsDebugBuild) {
63 CHECK_EQ(regions_[0].Begin(), Begin());
64 for (size_t i = 0; i < num_regions_; ++i) {
65 CHECK(regions_[i].IsFree());
66 CHECK_EQ(static_cast<size_t>(regions_[i].End() - regions_[i].Begin()), kRegionSize);
67 if (i + 1 < num_regions_) {
68 CHECK_EQ(regions_[i].End(), regions_[i + 1].Begin());
69 }
70 }
71 CHECK_EQ(regions_[num_regions_ - 1].End(), Limit());
72 }
73 full_region_ = Region();
74 DCHECK(!full_region_.IsFree());
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080075 DCHECK(full_region_.IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080076 current_region_ = &full_region_;
77 evac_region_ = nullptr;
78 size_t ignored;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070079 DCHECK(full_region_.Alloc(kAlignment, &ignored, nullptr, &ignored) == nullptr);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080080}
81
82size_t RegionSpace::FromSpaceSize() {
83 uint64_t num_regions = 0;
84 MutexLock mu(Thread::Current(), region_lock_);
85 for (size_t i = 0; i < num_regions_; ++i) {
86 Region* r = &regions_[i];
87 if (r->IsInFromSpace()) {
88 ++num_regions;
89 }
90 }
91 return num_regions * kRegionSize;
92}
93
94size_t RegionSpace::UnevacFromSpaceSize() {
95 uint64_t num_regions = 0;
96 MutexLock mu(Thread::Current(), region_lock_);
97 for (size_t i = 0; i < num_regions_; ++i) {
98 Region* r = &regions_[i];
99 if (r->IsInUnevacFromSpace()) {
100 ++num_regions;
101 }
102 }
103 return num_regions * kRegionSize;
104}
105
106size_t RegionSpace::ToSpaceSize() {
107 uint64_t num_regions = 0;
108 MutexLock mu(Thread::Current(), region_lock_);
109 for (size_t i = 0; i < num_regions_; ++i) {
110 Region* r = &regions_[i];
111 if (r->IsInToSpace()) {
112 ++num_regions;
113 }
114 }
115 return num_regions * kRegionSize;
116}
117
118inline bool RegionSpace::Region::ShouldBeEvacuated() {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800119 DCHECK((IsAllocated() || IsLarge()) && IsInToSpace());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800120 // if the region was allocated after the start of the
121 // previous GC or the live ratio is below threshold, evacuate
122 // it.
123 bool result;
124 if (is_newly_allocated_) {
125 result = true;
126 } else {
127 bool is_live_percent_valid = live_bytes_ != static_cast<size_t>(-1);
128 if (is_live_percent_valid) {
Mathieu Chartier1ebf8d32016-06-09 11:51:27 -0700129 DCHECK(IsInToSpace());
130 DCHECK(!IsLargeTail());
131 DCHECK_NE(live_bytes_, static_cast<size_t>(-1));
132 DCHECK_LE(live_bytes_, BytesAllocated());
133 const size_t bytes_allocated = RoundUp(BytesAllocated(), kRegionSize);
134 DCHECK_LE(live_bytes_, bytes_allocated);
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800135 if (IsAllocated()) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 // Side node: live_percent == 0 does not necessarily mean
137 // there's no live objects due to rounding (there may be a
138 // few).
Mathieu Chartier1ebf8d32016-06-09 11:51:27 -0700139 result = live_bytes_ * 100U < kEvaculateLivePercentThreshold * bytes_allocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800140 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800141 DCHECK(IsLarge());
Mathieu Chartier1ebf8d32016-06-09 11:51:27 -0700142 result = live_bytes_ == 0U;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800143 }
144 } else {
145 result = false;
146 }
147 }
148 return result;
149}
150
151// Determine which regions to evacuate and mark them as
152// from-space. Mark the rest as unevacuated from-space.
153void RegionSpace::SetFromSpace(accounting::ReadBarrierTable* rb_table, bool force_evacuate_all) {
154 ++time_;
155 if (kUseTableLookupReadBarrier) {
156 DCHECK(rb_table->IsAllCleared());
157 rb_table->SetAll();
158 }
159 MutexLock mu(Thread::Current(), region_lock_);
160 size_t num_expected_large_tails = 0;
161 bool prev_large_evacuated = false;
162 for (size_t i = 0; i < num_regions_; ++i) {
163 Region* r = &regions_[i];
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800164 RegionState state = r->State();
165 RegionType type = r->Type();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800166 if (!r->IsFree()) {
167 DCHECK(r->IsInToSpace());
168 if (LIKELY(num_expected_large_tails == 0U)) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800169 DCHECK((state == RegionState::kRegionStateAllocated ||
170 state == RegionState::kRegionStateLarge) &&
171 type == RegionType::kRegionTypeToSpace);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800172 bool should_evacuate = force_evacuate_all || r->ShouldBeEvacuated();
173 if (should_evacuate) {
174 r->SetAsFromSpace();
175 DCHECK(r->IsInFromSpace());
176 } else {
177 r->SetAsUnevacFromSpace();
178 DCHECK(r->IsInUnevacFromSpace());
179 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800180 if (UNLIKELY(state == RegionState::kRegionStateLarge &&
181 type == RegionType::kRegionTypeToSpace)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800182 prev_large_evacuated = should_evacuate;
183 num_expected_large_tails = RoundUp(r->BytesAllocated(), kRegionSize) / kRegionSize - 1;
184 DCHECK_GT(num_expected_large_tails, 0U);
185 }
186 } else {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800187 DCHECK(state == RegionState::kRegionStateLargeTail &&
188 type == RegionType::kRegionTypeToSpace);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800189 if (prev_large_evacuated) {
190 r->SetAsFromSpace();
191 DCHECK(r->IsInFromSpace());
192 } else {
193 r->SetAsUnevacFromSpace();
194 DCHECK(r->IsInUnevacFromSpace());
195 }
196 --num_expected_large_tails;
197 }
198 } else {
199 DCHECK_EQ(num_expected_large_tails, 0U);
200 if (kUseTableLookupReadBarrier) {
201 // Clear the rb table for to-space regions.
202 rb_table->Clear(r->Begin(), r->End());
203 }
204 }
205 }
206 current_region_ = &full_region_;
207 evac_region_ = &full_region_;
208}
209
210void RegionSpace::ClearFromSpace() {
211 MutexLock mu(Thread::Current(), region_lock_);
212 for (size_t i = 0; i < num_regions_; ++i) {
213 Region* r = &regions_[i];
214 if (r->IsInFromSpace()) {
215 r->Clear();
216 --num_non_free_regions_;
217 } else if (r->IsInUnevacFromSpace()) {
218 r->SetUnevacFromSpaceAsToSpace();
219 }
220 }
221 evac_region_ = nullptr;
222}
223
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800224void RegionSpace::LogFragmentationAllocFailure(std::ostream& os,
225 size_t /* failed_alloc_bytes */) {
226 size_t max_contiguous_allocation = 0;
227 MutexLock mu(Thread::Current(), region_lock_);
228 if (current_region_->End() - current_region_->Top() > 0) {
229 max_contiguous_allocation = current_region_->End() - current_region_->Top();
230 }
231 if (num_non_free_regions_ * 2 < num_regions_) {
232 // We reserve half of the regions for evaluation only. If we
233 // occupy more than half the regions, do not report the free
234 // regions as available.
235 size_t max_contiguous_free_regions = 0;
236 size_t num_contiguous_free_regions = 0;
237 bool prev_free_region = false;
238 for (size_t i = 0; i < num_regions_; ++i) {
239 Region* r = &regions_[i];
240 if (r->IsFree()) {
241 if (!prev_free_region) {
242 CHECK_EQ(num_contiguous_free_regions, 0U);
243 prev_free_region = true;
244 }
245 ++num_contiguous_free_regions;
246 } else {
247 if (prev_free_region) {
248 CHECK_NE(num_contiguous_free_regions, 0U);
249 max_contiguous_free_regions = std::max(max_contiguous_free_regions,
250 num_contiguous_free_regions);
251 num_contiguous_free_regions = 0U;
252 prev_free_region = false;
253 }
254 }
255 }
256 max_contiguous_allocation = std::max(max_contiguous_allocation,
257 max_contiguous_free_regions * kRegionSize);
258 }
259 os << "; failed due to fragmentation (largest possible contiguous allocation "
260 << max_contiguous_allocation << " bytes)";
261 // Caller's job to print failed_alloc_bytes.
262}
263
264void RegionSpace::Clear() {
265 MutexLock mu(Thread::Current(), region_lock_);
266 for (size_t i = 0; i < num_regions_; ++i) {
267 Region* r = &regions_[i];
268 if (!r->IsFree()) {
269 --num_non_free_regions_;
270 }
271 r->Clear();
272 }
273 current_region_ = &full_region_;
274 evac_region_ = &full_region_;
275}
276
277void RegionSpace::Dump(std::ostream& os) const {
278 os << GetName() << " "
279 << reinterpret_cast<void*>(Begin()) << "-" << reinterpret_cast<void*>(Limit());
280}
281
282void RegionSpace::FreeLarge(mirror::Object* large_obj, size_t bytes_allocated) {
283 DCHECK(Contains(large_obj));
Roland Levillain14d90572015-07-16 10:52:26 +0100284 DCHECK_ALIGNED(large_obj, kRegionSize);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800285 MutexLock mu(Thread::Current(), region_lock_);
286 uint8_t* begin_addr = reinterpret_cast<uint8_t*>(large_obj);
287 uint8_t* end_addr = AlignUp(reinterpret_cast<uint8_t*>(large_obj) + bytes_allocated, kRegionSize);
288 CHECK_LT(begin_addr, end_addr);
289 for (uint8_t* addr = begin_addr; addr < end_addr; addr += kRegionSize) {
290 Region* reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(addr));
291 if (addr == begin_addr) {
292 DCHECK(reg->IsLarge());
293 } else {
294 DCHECK(reg->IsLargeTail());
295 }
296 reg->Clear();
297 --num_non_free_regions_;
298 }
299 if (end_addr < Limit()) {
300 // If we aren't at the end of the space, check that the next region is not a large tail.
301 Region* following_reg = RefToRegionLocked(reinterpret_cast<mirror::Object*>(end_addr));
302 DCHECK(!following_reg->IsLargeTail());
303 }
304}
305
306void RegionSpace::DumpRegions(std::ostream& os) {
307 MutexLock mu(Thread::Current(), region_lock_);
308 for (size_t i = 0; i < num_regions_; ++i) {
309 regions_[i].Dump(os);
310 }
311}
312
313void RegionSpace::DumpNonFreeRegions(std::ostream& os) {
314 MutexLock mu(Thread::Current(), region_lock_);
315 for (size_t i = 0; i < num_regions_; ++i) {
316 Region* reg = &regions_[i];
317 if (!reg->IsFree()) {
318 reg->Dump(os);
319 }
320 }
321}
322
323void RegionSpace::RecordAlloc(mirror::Object* ref) {
324 CHECK(ref != nullptr);
325 Region* r = RefToRegion(ref);
326 reinterpret_cast<Atomic<uint64_t>*>(&r->objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
327}
328
329bool RegionSpace::AllocNewTlab(Thread* self) {
330 MutexLock mu(self, region_lock_);
331 RevokeThreadLocalBuffersLocked(self);
332 // Retain sufficient free regions for full evacuation.
333 if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
334 return false;
335 }
336 for (size_t i = 0; i < num_regions_; ++i) {
337 Region* r = &regions_[i];
338 if (r->IsFree()) {
339 r->Unfree(time_);
340 ++num_non_free_regions_;
Mathieu Chartier84d94252016-08-27 13:27:06 -0700341 r->SetNewlyAllocated();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800342 r->SetTop(r->End());
343 r->is_a_tlab_ = true;
344 r->thread_ = self;
345 self->SetTlab(r->Begin(), r->End());
346 return true;
347 }
348 }
349 return false;
350}
351
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700352size_t RegionSpace::RevokeThreadLocalBuffers(Thread* thread) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800353 MutexLock mu(Thread::Current(), region_lock_);
354 RevokeThreadLocalBuffersLocked(thread);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700355 return 0U;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800356}
357
358void RegionSpace::RevokeThreadLocalBuffersLocked(Thread* thread) {
359 uint8_t* tlab_start = thread->GetTlabStart();
360 DCHECK_EQ(thread->HasTlab(), tlab_start != nullptr);
361 if (tlab_start != nullptr) {
Roland Levillain14d90572015-07-16 10:52:26 +0100362 DCHECK_ALIGNED(tlab_start, kRegionSize);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800363 Region* r = RefToRegionLocked(reinterpret_cast<mirror::Object*>(tlab_start));
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800364 DCHECK(r->IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800365 DCHECK_EQ(thread->GetThreadLocalBytesAllocated(), kRegionSize);
366 r->RecordThreadLocalAllocations(thread->GetThreadLocalObjectsAllocated(),
367 thread->GetThreadLocalBytesAllocated());
368 r->is_a_tlab_ = false;
369 r->thread_ = nullptr;
370 }
371 thread->SetTlab(nullptr, nullptr);
372}
373
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700374size_t RegionSpace::RevokeAllThreadLocalBuffers() {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800375 Thread* self = Thread::Current();
376 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
377 MutexLock mu2(self, *Locks::thread_list_lock_);
378 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
379 for (Thread* thread : thread_list) {
380 RevokeThreadLocalBuffers(thread);
381 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700382 return 0U;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800383}
384
385void RegionSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
386 if (kIsDebugBuild) {
387 DCHECK(!thread->HasTlab());
388 }
389}
390
391void RegionSpace::AssertAllThreadLocalBuffersAreRevoked() {
392 if (kIsDebugBuild) {
393 Thread* self = Thread::Current();
394 MutexLock mu(self, *Locks::runtime_shutdown_lock_);
395 MutexLock mu2(self, *Locks::thread_list_lock_);
396 std::list<Thread*> thread_list = Runtime::Current()->GetThreadList()->GetList();
397 for (Thread* thread : thread_list) {
398 AssertThreadLocalBuffersAreRevoked(thread);
399 }
400 }
401}
402
403void RegionSpace::Region::Dump(std::ostream& os) const {
404 os << "Region[" << idx_ << "]=" << reinterpret_cast<void*>(begin_) << "-" << reinterpret_cast<void*>(top_)
405 << "-" << reinterpret_cast<void*>(end_)
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800406 << " state=" << static_cast<uint>(state_) << " type=" << static_cast<uint>(type_)
407 << " objects_allocated=" << objects_allocated_
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800408 << " alloc_time=" << alloc_time_ << " live_bytes=" << live_bytes_
409 << " is_newly_allocated=" << is_newly_allocated_ << " is_a_tlab=" << is_a_tlab_ << " thread=" << thread_ << "\n";
410}
411
412} // namespace space
413} // namespace gc
414} // namespace art