blob: 2fba4a8bd15fc7feec7a2e0a7dcebbb75fa6b9f1 [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
19
20#include "region_space.h"
Andreas Gampeb486a982017-06-01 13:45:54 -070021#include "thread-current-inl.h"
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080022
23namespace art {
24namespace gc {
25namespace space {
26
27inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070028 size_t* usable_size,
29 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080030 num_bytes = RoundUp(num_bytes, kAlignment);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070031 return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
32 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080033}
34
35inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
36 size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070037 size_t* usable_size,
38 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080039 Locks::mutator_lock_->AssertExclusiveHeld(self);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070040 return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080041}
42
43template<bool kForEvac>
44inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070045 size_t* usable_size,
46 size_t* bytes_tl_bulk_allocated) {
Roland Levillain14d90572015-07-16 10:52:26 +010047 DCHECK_ALIGNED(num_bytes, kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080048 mirror::Object* obj;
49 if (LIKELY(num_bytes <= kRegionSize)) {
50 // Non-large object.
Mathieu Chartier420823f2017-06-27 21:24:04 -070051 obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
52 bytes_allocated,
53 usable_size,
54 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080055 if (LIKELY(obj != nullptr)) {
56 return obj;
57 }
58 MutexLock mu(Thread::Current(), region_lock_);
59 // Retry with current region since another thread may have updated it.
Mathieu Chartier420823f2017-06-27 21:24:04 -070060 obj = (kForEvac ? evac_region_ : current_region_)->Alloc(num_bytes,
61 bytes_allocated,
62 usable_size,
63 bytes_tl_bulk_allocated);
Mathieu Chartierb645c3062017-06-28 04:22:04 +000064 if (LIKELY(obj != nullptr)) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080065 return obj;
66 }
Mathieu Chartier420823f2017-06-27 21:24:04 -070067 Region* r = AllocateRegion(kForEvac);
68 if (LIKELY(r != nullptr)) {
Mathieu Chartier478986c2017-06-30 09:40:50 -070069 obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
70 CHECK(obj != nullptr);
71 // Do our allocation before setting the region, this makes sure no threads race ahead
72 // and fill in the region before we allocate the object. b/63153464
Mathieu Chartier420823f2017-06-27 21:24:04 -070073 if (kForEvac) {
74 evac_region_ = r;
75 } else {
76 current_region_ = r;
Mathieu Chartierb645c3062017-06-28 04:22:04 +000077 }
Mathieu Chartier420823f2017-06-27 21:24:04 -070078 return obj;
Mathieu Chartierb645c3062017-06-28 04:22:04 +000079 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080080 } else {
81 // Large object.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070082 obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
83 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080084 if (LIKELY(obj != nullptr)) {
85 return obj;
86 }
87 }
88 return nullptr;
89}
90
91inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070092 size_t* usable_size,
93 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -080094 DCHECK(IsAllocated() && IsInToSpace());
Roland Levillain14d90572015-07-16 10:52:26 +010095 DCHECK_ALIGNED(num_bytes, kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080096 uint8_t* old_top;
97 uint8_t* new_top;
98 do {
Mathieu Chartier22c8e402016-11-05 13:32:08 -070099 old_top = top_.LoadRelaxed();
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800100 new_top = old_top + num_bytes;
101 if (UNLIKELY(new_top > end_)) {
102 return nullptr;
103 }
Mathieu Chartier22c8e402016-11-05 13:32:08 -0700104 } while (!top_.CompareExchangeWeakRelaxed(old_top, new_top));
105 objects_allocated_.FetchAndAddRelaxed(1);
106 DCHECK_LE(Top(), end_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800107 DCHECK_LT(old_top, end_);
108 DCHECK_LE(new_top, end_);
109 *bytes_allocated = num_bytes;
110 if (usable_size != nullptr) {
111 *usable_size = num_bytes;
112 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700113 *bytes_tl_bulk_allocated = num_bytes;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800114 return reinterpret_cast<mirror::Object*>(old_top);
115}
116
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800117template<RegionSpace::RegionType kRegionType>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800118uint64_t RegionSpace::GetBytesAllocatedInternal() {
119 uint64_t bytes = 0;
120 MutexLock mu(Thread::Current(), region_lock_);
121 for (size_t i = 0; i < num_regions_; ++i) {
122 Region* r = &regions_[i];
123 if (r->IsFree()) {
124 continue;
125 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800126 switch (kRegionType) {
127 case RegionType::kRegionTypeAll:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800128 bytes += r->BytesAllocated();
129 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800130 case RegionType::kRegionTypeFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800131 if (r->IsInFromSpace()) {
132 bytes += r->BytesAllocated();
133 }
134 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800135 case RegionType::kRegionTypeUnevacFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 if (r->IsInUnevacFromSpace()) {
137 bytes += r->BytesAllocated();
138 }
139 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800140 case RegionType::kRegionTypeToSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800141 if (r->IsInToSpace()) {
142 bytes += r->BytesAllocated();
143 }
144 break;
145 default:
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800146 LOG(FATAL) << "Unexpected space type : " << kRegionType;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800147 }
148 }
149 return bytes;
150}
151
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800152template<RegionSpace::RegionType kRegionType>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800153uint64_t RegionSpace::GetObjectsAllocatedInternal() {
154 uint64_t bytes = 0;
155 MutexLock mu(Thread::Current(), region_lock_);
156 for (size_t i = 0; i < num_regions_; ++i) {
157 Region* r = &regions_[i];
158 if (r->IsFree()) {
159 continue;
160 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800161 switch (kRegionType) {
162 case RegionType::kRegionTypeAll:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800163 bytes += r->ObjectsAllocated();
164 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800165 case RegionType::kRegionTypeFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800166 if (r->IsInFromSpace()) {
167 bytes += r->ObjectsAllocated();
168 }
169 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800170 case RegionType::kRegionTypeUnevacFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800171 if (r->IsInUnevacFromSpace()) {
172 bytes += r->ObjectsAllocated();
173 }
174 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800175 case RegionType::kRegionTypeToSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800176 if (r->IsInToSpace()) {
177 bytes += r->ObjectsAllocated();
178 }
179 break;
180 default:
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800181 LOG(FATAL) << "Unexpected space type : " << kRegionType;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800182 }
183 }
184 return bytes;
185}
186
187template<bool kToSpaceOnly>
188void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
189 // TODO: MutexLock on region_lock_ won't work due to lock order
190 // issues (the classloader classes lock and the monitor lock). We
191 // call this with threads suspended.
192 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
193 for (size_t i = 0; i < num_regions_; ++i) {
194 Region* r = &regions_[i];
195 if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
196 continue;
197 }
198 if (r->IsLarge()) {
Mathieu Chartier371b0472017-02-27 16:37:21 -0800199 // Avoid visiting dead large objects since they may contain dangling pointers to the
200 // from-space.
201 DCHECK_GT(r->LiveBytes(), 0u) << "Visiting dead large object";
202 mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
203 DCHECK(obj->GetClass() != nullptr);
204 callback(obj, arg);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800205 } else if (r->IsLargeTail()) {
206 // Do nothing.
207 } else {
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700208 // For newly allocated and evacuated regions, live bytes will be -1.
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800209 uint8_t* pos = r->Begin();
210 uint8_t* top = r->Top();
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700211 const bool need_bitmap =
212 r->LiveBytes() != static_cast<size_t>(-1) &&
213 r->LiveBytes() != static_cast<size_t>(top - pos);
214 if (need_bitmap) {
215 GetLiveBitmap()->VisitMarkedRange(
216 reinterpret_cast<uintptr_t>(pos),
217 reinterpret_cast<uintptr_t>(top),
218 [callback, arg](mirror::Object* obj) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800219 callback(obj, arg);
Mathieu Chartier7ec38dc2016-10-07 15:24:46 -0700220 });
221 } else {
222 while (pos < top) {
223 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
224 if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
225 callback(obj, arg);
226 pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
227 } else {
228 break;
229 }
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800230 }
231 }
232 }
233 }
234}
235
236inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
237 const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
238 return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
239}
240
241template<bool kForEvac>
242mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700243 size_t* usable_size,
244 size_t* bytes_tl_bulk_allocated) {
Roland Levillain14d90572015-07-16 10:52:26 +0100245 DCHECK_ALIGNED(num_bytes, kAlignment);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800246 DCHECK_GT(num_bytes, kRegionSize);
247 size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
248 DCHECK_GT(num_regs, 0U);
249 DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
250 DCHECK_LE(num_bytes, num_regs * kRegionSize);
251 MutexLock mu(Thread::Current(), region_lock_);
252 if (!kForEvac) {
253 // Retain sufficient free regions for full evacuation.
254 if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
255 return nullptr;
256 }
257 }
258 // Find a large enough contiguous free regions.
259 size_t left = 0;
260 while (left + num_regs - 1 < num_regions_) {
261 bool found = true;
262 size_t right = left;
263 DCHECK_LT(right, left + num_regs)
264 << "The inner loop Should iterate at least once";
265 while (right < left + num_regs) {
266 if (regions_[right].IsFree()) {
267 ++right;
268 } else {
269 found = false;
270 break;
271 }
272 }
273 if (found) {
274 // right points to the one region past the last free region.
275 DCHECK_EQ(left + num_regs, right);
276 Region* first_reg = &regions_[left];
277 DCHECK(first_reg->IsFree());
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800278 first_reg->UnfreeLarge(this, time_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800279 ++num_non_free_regions_;
Nicolas Geoffray4b361a82017-07-06 15:30:10 +0100280 size_t allocated = num_regs * kRegionSize;
281 // We make 'top' all usable bytes, as the caller of this
282 // allocation may use all of 'usable_size' (see mirror::Array::Alloc).
283 first_reg->SetTop(first_reg->Begin() + allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800284 for (size_t p = left + 1; p < right; ++p) {
285 DCHECK_LT(p, num_regions_);
286 DCHECK(regions_[p].IsFree());
Hiroshi Yamauchi6711cd82017-02-23 15:11:56 -0800287 regions_[p].UnfreeLargeTail(this, time_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800288 ++num_non_free_regions_;
289 }
Nicolas Geoffray4b361a82017-07-06 15:30:10 +0100290 *bytes_allocated = allocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800291 if (usable_size != nullptr) {
Nicolas Geoffray4b361a82017-07-06 15:30:10 +0100292 *usable_size = allocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800293 }
Nicolas Geoffray4b361a82017-07-06 15:30:10 +0100294 *bytes_tl_bulk_allocated = allocated;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800295 return reinterpret_cast<mirror::Object*>(first_reg->Begin());
296 } else {
297 // right points to the non-free region. Start with the one after it.
298 left = right + 1;
299 }
300 }
301 return nullptr;
302}
303
Mathieu Chartier6bc77742017-04-18 17:46:23 -0700304inline size_t RegionSpace::Region::BytesAllocated() const {
305 if (IsLarge()) {
306 DCHECK_LT(begin_ + kRegionSize, Top());
307 return static_cast<size_t>(Top() - begin_);
308 } else if (IsLargeTail()) {
309 DCHECK_EQ(begin_, Top());
310 return 0;
311 } else {
312 DCHECK(IsAllocated()) << static_cast<uint>(state_);
313 DCHECK_LE(begin_, Top());
314 size_t bytes;
315 if (is_a_tlab_) {
316 bytes = thread_->GetThreadLocalBytesAllocated();
317 } else {
318 bytes = static_cast<size_t>(Top() - begin_);
319 }
320 DCHECK_LE(bytes, kRegionSize);
321 return bytes;
322 }
323}
324
325
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800326} // namespace space
327} // namespace gc
328} // namespace art
329
330#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_