blob: 1cdf69dbe597e3bdcb68ce95d7b22cba760e9d5a [file] [log] [blame]
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -08001/*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
18#define ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_
19
20#include "region_space.h"
21
22namespace art {
23namespace gc {
24namespace space {
25
26inline mirror::Object* RegionSpace::Alloc(Thread*, size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070027 size_t* usable_size,
28 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080029 num_bytes = RoundUp(num_bytes, kAlignment);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070030 return AllocNonvirtual<false>(num_bytes, bytes_allocated, usable_size,
31 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080032}
33
34inline mirror::Object* RegionSpace::AllocThreadUnsafe(Thread* self, size_t num_bytes,
35 size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070036 size_t* usable_size,
37 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080038 Locks::mutator_lock_->AssertExclusiveHeld(self);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070039 return Alloc(self, num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080040}
41
42template<bool kForEvac>
43inline mirror::Object* RegionSpace::AllocNonvirtual(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070044 size_t* usable_size,
45 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080046 DCHECK(IsAligned<kAlignment>(num_bytes));
47 mirror::Object* obj;
48 if (LIKELY(num_bytes <= kRegionSize)) {
49 // Non-large object.
50 if (!kForEvac) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070051 obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
52 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080053 } else {
54 DCHECK(evac_region_ != nullptr);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070055 obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
56 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080057 }
58 if (LIKELY(obj != nullptr)) {
59 return obj;
60 }
61 MutexLock mu(Thread::Current(), region_lock_);
62 // Retry with current region since another thread may have updated it.
63 if (!kForEvac) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070064 obj = current_region_->Alloc(num_bytes, bytes_allocated, usable_size,
65 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080066 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070067 obj = evac_region_->Alloc(num_bytes, bytes_allocated, usable_size,
68 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080069 }
70 if (LIKELY(obj != nullptr)) {
71 return obj;
72 }
73 if (!kForEvac) {
74 // Retain sufficient free regions for full evacuation.
75 if ((num_non_free_regions_ + 1) * 2 > num_regions_) {
76 return nullptr;
77 }
78 for (size_t i = 0; i < num_regions_; ++i) {
79 Region* r = &regions_[i];
80 if (r->IsFree()) {
81 r->Unfree(time_);
82 r->SetNewlyAllocated();
83 ++num_non_free_regions_;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070084 obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080085 CHECK(obj != nullptr);
86 current_region_ = r;
87 return obj;
88 }
89 }
90 } else {
91 for (size_t i = 0; i < num_regions_; ++i) {
92 Region* r = &regions_[i];
93 if (r->IsFree()) {
94 r->Unfree(time_);
95 ++num_non_free_regions_;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070096 obj = r->Alloc(num_bytes, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -080097 CHECK(obj != nullptr);
98 evac_region_ = r;
99 return obj;
100 }
101 }
102 }
103 } else {
104 // Large object.
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700105 obj = AllocLarge<kForEvac>(num_bytes, bytes_allocated, usable_size,
106 bytes_tl_bulk_allocated);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800107 if (LIKELY(obj != nullptr)) {
108 return obj;
109 }
110 }
111 return nullptr;
112}
113
114inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700115 size_t* usable_size,
116 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800117 DCHECK(IsAllocated() && IsInToSpace());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800118 DCHECK(IsAligned<kAlignment>(num_bytes));
119 Atomic<uint8_t*>* atomic_top = reinterpret_cast<Atomic<uint8_t*>*>(&top_);
120 uint8_t* old_top;
121 uint8_t* new_top;
122 do {
123 old_top = atomic_top->LoadRelaxed();
124 new_top = old_top + num_bytes;
125 if (UNLIKELY(new_top > end_)) {
126 return nullptr;
127 }
128 } while (!atomic_top->CompareExchangeWeakSequentiallyConsistent(old_top, new_top));
129 reinterpret_cast<Atomic<uint64_t>*>(&objects_allocated_)->FetchAndAddSequentiallyConsistent(1);
130 DCHECK_LE(atomic_top->LoadRelaxed(), end_);
131 DCHECK_LT(old_top, end_);
132 DCHECK_LE(new_top, end_);
133 *bytes_allocated = num_bytes;
134 if (usable_size != nullptr) {
135 *usable_size = num_bytes;
136 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700137 *bytes_tl_bulk_allocated = num_bytes;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800138 return reinterpret_cast<mirror::Object*>(old_top);
139}
140
141inline size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable_size)
142 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
143 size_t num_bytes = obj->SizeOf();
144 if (usable_size != nullptr) {
145 if (LIKELY(num_bytes <= kRegionSize)) {
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800146 DCHECK(RefToRegion(obj)->IsAllocated());
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800147 *usable_size = RoundUp(num_bytes, kAlignment);
148 } else {
149 DCHECK(RefToRegion(obj)->IsLarge());
150 *usable_size = RoundUp(num_bytes, kRegionSize);
151 }
152 }
153 return num_bytes;
154}
155
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800156template<RegionSpace::RegionType kRegionType>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800157uint64_t RegionSpace::GetBytesAllocatedInternal() {
158 uint64_t bytes = 0;
159 MutexLock mu(Thread::Current(), region_lock_);
160 for (size_t i = 0; i < num_regions_; ++i) {
161 Region* r = &regions_[i];
162 if (r->IsFree()) {
163 continue;
164 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800165 switch (kRegionType) {
166 case RegionType::kRegionTypeAll:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800167 bytes += r->BytesAllocated();
168 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800169 case RegionType::kRegionTypeFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800170 if (r->IsInFromSpace()) {
171 bytes += r->BytesAllocated();
172 }
173 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800174 case RegionType::kRegionTypeUnevacFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800175 if (r->IsInUnevacFromSpace()) {
176 bytes += r->BytesAllocated();
177 }
178 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800179 case RegionType::kRegionTypeToSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800180 if (r->IsInToSpace()) {
181 bytes += r->BytesAllocated();
182 }
183 break;
184 default:
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800185 LOG(FATAL) << "Unexpected space type : " << kRegionType;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800186 }
187 }
188 return bytes;
189}
190
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800191template<RegionSpace::RegionType kRegionType>
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800192uint64_t RegionSpace::GetObjectsAllocatedInternal() {
193 uint64_t bytes = 0;
194 MutexLock mu(Thread::Current(), region_lock_);
195 for (size_t i = 0; i < num_regions_; ++i) {
196 Region* r = &regions_[i];
197 if (r->IsFree()) {
198 continue;
199 }
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800200 switch (kRegionType) {
201 case RegionType::kRegionTypeAll:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800202 bytes += r->ObjectsAllocated();
203 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800204 case RegionType::kRegionTypeFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800205 if (r->IsInFromSpace()) {
206 bytes += r->ObjectsAllocated();
207 }
208 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800209 case RegionType::kRegionTypeUnevacFromSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800210 if (r->IsInUnevacFromSpace()) {
211 bytes += r->ObjectsAllocated();
212 }
213 break;
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800214 case RegionType::kRegionTypeToSpace:
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800215 if (r->IsInToSpace()) {
216 bytes += r->ObjectsAllocated();
217 }
218 break;
219 default:
Hiroshi Yamauchid25f8422015-01-30 16:25:12 -0800220 LOG(FATAL) << "Unexpected space type : " << kRegionType;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800221 }
222 }
223 return bytes;
224}
225
226template<bool kToSpaceOnly>
227void RegionSpace::WalkInternal(ObjectCallback* callback, void* arg) {
228 // TODO: MutexLock on region_lock_ won't work due to lock order
229 // issues (the classloader classes lock and the monitor lock). We
230 // call this with threads suspended.
231 Locks::mutator_lock_->AssertExclusiveHeld(Thread::Current());
232 for (size_t i = 0; i < num_regions_; ++i) {
233 Region* r = &regions_[i];
234 if (r->IsFree() || (kToSpaceOnly && !r->IsInToSpace())) {
235 continue;
236 }
237 if (r->IsLarge()) {
238 mirror::Object* obj = reinterpret_cast<mirror::Object*>(r->Begin());
239 if (obj->GetClass() != nullptr) {
240 callback(obj, arg);
241 }
242 } else if (r->IsLargeTail()) {
243 // Do nothing.
244 } else {
245 uint8_t* pos = r->Begin();
246 uint8_t* top = r->Top();
247 while (pos < top) {
248 mirror::Object* obj = reinterpret_cast<mirror::Object*>(pos);
249 if (obj->GetClass<kDefaultVerifyFlags, kWithoutReadBarrier>() != nullptr) {
250 callback(obj, arg);
251 pos = reinterpret_cast<uint8_t*>(GetNextObject(obj));
252 } else {
253 break;
254 }
255 }
256 }
257 }
258}
259
260inline mirror::Object* RegionSpace::GetNextObject(mirror::Object* obj) {
261 const uintptr_t position = reinterpret_cast<uintptr_t>(obj) + obj->SizeOf();
262 return reinterpret_cast<mirror::Object*>(RoundUp(position, kAlignment));
263}
264
265template<bool kForEvac>
266mirror::Object* RegionSpace::AllocLarge(size_t num_bytes, size_t* bytes_allocated,
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700267 size_t* usable_size,
268 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800269 DCHECK(IsAligned<kAlignment>(num_bytes));
270 DCHECK_GT(num_bytes, kRegionSize);
271 size_t num_regs = RoundUp(num_bytes, kRegionSize) / kRegionSize;
272 DCHECK_GT(num_regs, 0U);
273 DCHECK_LT((num_regs - 1) * kRegionSize, num_bytes);
274 DCHECK_LE(num_bytes, num_regs * kRegionSize);
275 MutexLock mu(Thread::Current(), region_lock_);
276 if (!kForEvac) {
277 // Retain sufficient free regions for full evacuation.
278 if ((num_non_free_regions_ + num_regs) * 2 > num_regions_) {
279 return nullptr;
280 }
281 }
282 // Find a large enough contiguous free regions.
283 size_t left = 0;
284 while (left + num_regs - 1 < num_regions_) {
285 bool found = true;
286 size_t right = left;
287 DCHECK_LT(right, left + num_regs)
288 << "The inner loop Should iterate at least once";
289 while (right < left + num_regs) {
290 if (regions_[right].IsFree()) {
291 ++right;
292 } else {
293 found = false;
294 break;
295 }
296 }
297 if (found) {
298 // right points to the one region past the last free region.
299 DCHECK_EQ(left + num_regs, right);
300 Region* first_reg = &regions_[left];
301 DCHECK(first_reg->IsFree());
302 first_reg->UnfreeLarge(time_);
303 ++num_non_free_regions_;
304 first_reg->SetTop(first_reg->Begin() + num_bytes);
305 for (size_t p = left + 1; p < right; ++p) {
306 DCHECK_LT(p, num_regions_);
307 DCHECK(regions_[p].IsFree());
308 regions_[p].UnfreeLargeTail(time_);
309 ++num_non_free_regions_;
310 }
311 *bytes_allocated = num_bytes;
312 if (usable_size != nullptr) {
313 *usable_size = num_regs * kRegionSize;
314 }
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700315 *bytes_tl_bulk_allocated = num_bytes;
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800316 return reinterpret_cast<mirror::Object*>(first_reg->Begin());
317 } else {
318 // right points to the non-free region. Start with the one after it.
319 left = right + 1;
320 }
321 }
322 return nullptr;
323}
324
325} // namespace space
326} // namespace gc
327} // namespace art
328
329#endif // ART_RUNTIME_GC_SPACE_REGION_SPACE_INL_H_