blob: a4ed7187c0ac813abc676a1a718e59d2a207b604 [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
19
20#include "region_space.h"
21
22namespace art {
23namespace gc {
24namespace space {
25
26inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
27 size_t* usable_size) {
28 num_bytes = RoundUp(num_bytes, kAlignment);
29 return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size);
30}
31
32inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
33 size_t* bytes_allocated,
34 size_t* usable_size) {
35 Locks::mutator_lock_->AssertExclusiveHeld(self);
36 return Alloc(self, num_bytes, bytes_allocated, usable_size);
37}
38
39template<bool kForEvac>
40inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
41 size_t* usable_size) {
42 DCHECK(IsAligned<kAlignment>(num_bytes));
43 mirror::Object* obj;
44 if (LIKELY(num_bytes <= kRegionSize)) {
45 // Non-large object.
46 if (!kForEvac) {
47 obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
48 } else {
49 DCHECK(evac_region_ != nullptr);
50 obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
51 }
52 if (LIKELY(obj != nullptr)) {
53 return obj;
54 }
55 MutexLock mu(Thread::Current(), region_lock_);
56 // Retry with current region since another thread may have updated it.
57 if (!kForEvac) {
58 obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size);
59 } else {
60 obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size);
61 }
62 if (LIKELY(obj != nullptr)) {
63 return obj;
64 }
65 if (!kForEvac) {
66 // Retain sufficient free regions for full evacuation.
67 if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
68 return nullptr;
69 }
70 for (size_t i = 0; i < num_regions_; ++i) {
71 Region* r = &regions_[i];
72 if (r->IsFree()) {
73 r->Unfree(time_);
74 r->SetNewlyAllocated();
75 ++num_non_free_regions_;
76 obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
77 CHECK(obj != nullptr);
78 current_region_ = r;
79 return obj;
80 }
81 }
82 } else {
83 for (size_t i = 0; i < num_regions_; ++i) {
84 Region* r = &regions_[i];
85 if (r->IsFree()) {
86 r->Unfree(time_);
87 ++num_non_free_regions_;
88 obj = r->Alloc(num_bytes, bytes_allocated, usable_size);
89 CHECK(obj != nullptr);
90 evac_region_ = r;
91 return obj;
92 }
93 }
94 }
95 } else {
96 // Large object.
97 obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size);
98 if (LIKELY(obj != nullptr)) {
99 return obj;
100 }
101 }
102 return nullptr;
103}
104
105inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
106 size_t* usable_size) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800107 DCHECK(IsAllocated() && IsInToSpace());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800108 DCHECK(IsAligned<kAlignment>(num_bytes));
109 Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
110 uint8_t* old_top;
111 uint8_t* new_top;
112 do {
113 old_top = atomic_top->LoadRelaxed();
114 new_top = old_top + num_bytes;
115 if (UNLIKELY(new_top > end_)) {
116 return nullptr;
117 }
118 } while (!atomic_top->CompareExchangeWeakSequentiallyConsistent(old_top, new_top));
119 reinterpret_cast<Atomic<uint64_t>*>(&objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
120 DCHECK_LE(atomic_top->LoadRelaxed(), end_);
121 DCHECK_LT(old_top, end_);
122 DCHECK_LE(new_top, end_);
123 *bytes_allocated = num_bytes;
124 if (usable_size != nullptr) {
125 *usable_size = num_bytes;
126 }
127 return reinterpret_cast<mirror::Object*>(old_top);
128}
129
130inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
131 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
132 size_t num_bytes = obj->SizeOf();
133 if (usable_size != nullptr) {
134 if (LIKELY(num_bytes <= kRegionSize)) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800135 DCHECK(RefToRegion(obj)->IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800136 *usable_size = RoundUp(num_bytes, kAlignment);
137 } else {
138 DCHECK(RefToRegion(obj)->IsLarge());
139 *usable_size = RoundUp(num_bytes, kRegionSize);
140 }
141 }
142 return num_bytes;
143}
144
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800145template<RegionSpace::RegionType kRegionType>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800146uint64_t RegionSpace::GetBytesAllocatedInternal() {
147 uint64_t bytes = 0;
148 MutexLock mu(Thread::Current(), region_lock_);
149 for (size_t i = 0; i < num_regions_; ++i) {
150 Region* r = &regions_[i];
151 if (r->IsFree()) {
152 continue;
153 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800154 switch (kRegionType) {
155 case RegionType::kRegionTypeAll:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800156 bytes += r->BytesAllocated();
157 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800158 case RegionType::kRegionTypeFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800159 if (r->IsInFromSpace()) {
160 bytes += r->BytesAllocated();
161 }
162 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800163 case RegionType::kRegionTypeUnevacFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800164 if (r->IsInUnevacFromSpace()) {
165 bytes += r->BytesAllocated();
166 }
167 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800168 case RegionType::kRegionTypeToSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800169 if (r->IsInToSpace()) {
170 bytes += r->BytesAllocated();
171 }
172 break;
173 default:
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800174 LOG(FATAL) << "Unexpected space type : " << kRegionType;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800175 }
176 }
177 return bytes;
178}
179
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800180template<RegionSpace::RegionType kRegionType>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800181uint64_t RegionSpace::GetObjectsAllocatedInternal() {
182 uint64_t bytes = 0;
183 MutexLock mu(Thread::Current(), region_lock_);
184 for (size_t i = 0; i < num_regions_; ++i) {
185 Region* r = &regions_[i];
186 if (r->IsFree()) {
187 continue;
188 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800189 switch (kRegionType) {
190 case RegionType::kRegionTypeAll:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800191 bytes += r->ObjectsAllocated();
192 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800193 case RegionType::kRegionTypeFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800194 if (r->IsInFromSpace()) {
195 bytes += r->ObjectsAllocated();
196 }
197 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800198 case RegionType::kRegionTypeUnevacFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800199 if (r->IsInUnevacFromSpace()) {
200 bytes += r->ObjectsAllocated();
201 }
202 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800203 case RegionType::kRegionTypeToSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800204 if (r->IsInToSpace()) {
205 bytes += r->ObjectsAllocated();
206 }
207 break;
208 default:
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800209 LOG(FATAL) << "Unexpected space type : " << kRegionType;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800210 }
211 }
212 return bytes;
213}
214
215template<bool kToSpaceOnly>
216void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
217 // TODO: MutexLock on region_lock_ won't work due to lock order
218 // issues (the classloader classes lock and the monitor lock). We
219 // call this with threads suspended.
220 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
221 for (size_t i = 0; i < num_regions_; ++i) {
222 Region* r = &regions_[i];
223 if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
224 continue;
225 }
226 if (r->IsLarge()) {
227 mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
228 if (obj->GetClass() != nullptr) {
229 callback(obj, arg);
230 }
231 } else if (r->IsLargeTail()) {
232 // Do nothing.
233 } else {
234 uint8_t* pos = r->Begin();
235 uint8_t* top = r->Top();
236 while (pos < top) {
237 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
238 if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
239 callback(obj, arg);
240 pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
241 } else {
242 break;
243 }
244 }
245 }
246 }
247}
248
249inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
250 const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
251 return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
252}
253
254template<bool kForEvac>
255mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
256 size_t* usable_size) {
257 DCHECK(IsAligned<kAlignment>(num_bytes));
258 DCHECK_GT(num_bytes, kRegionSize);
259 size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
260 DCHECK_GT(num_regs, 0U);
261 DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
262 DCHECK_LE(num_bytes, num_regs * kRegionSize);
263 MutexLock mu(Thread::Current(), region_lock_);
264 if (!kForEvac) {
265 // Retain sufficient free regions for full evacuation.
266 if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
267 return nullptr;
268 }
269 }
270 // Find a large enough contiguous free regions.
271 size_t left = 0;
272 while (left + num_regs - 1 < num_regions_) {
273 bool found = true;
274 size_t right = left;
275 DCHECK_LT(right, left + num_regs)
276 << "The inner loop Should iterate at least once";
277 while (right < left + num_regs) {
278 if (regions_[right].IsFree()) {
279 ++right;
280 } else {
281 found = false;
282 break;
283 }
284 }
285 if (found) {
286 // right points to the one region past the last free region.
287 DCHECK_EQ(left + num_regs, right);
288 Region* first_reg = &regions_[left];
289 DCHECK(first_reg->IsFree());
290 first_reg->UnfreeLarge(time_);
291 ++num_non_free_regions_;
292 first_reg->SetTop(first_reg->Begin() + num_bytes);
293 for (size_t p = left + 1; p < right; ++p) {
294 DCHECK_LT(p, num_regions_);
295 DCHECK(regions_[p].IsFree());
296 regions_[p].UnfreeLargeTail(time_);
297 ++num_non_free_regions_;
298 }
299 *bytes_allocated = num_bytes;
300 if (usable_size != nullptr) {
301 *usable_size = num_regs * kRegionSize;
302 }
303 return reinterpret_cast<mirror::Object*>(first_reg->Begin());
304 } else {
305 // right points to the non-free region. Start with the one after it.
306 left = right + 1;
307 }
308 }
309 return nullptr;
310}
311
312} // namespace space
313} // namespace gc
314} // namespace art
315
316#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_