blob: e089ef203f991b0b3ecad6c5cf25548c8a67f38d [file] [log] [blame]
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_HEAP_INL_H_
18#define ART_RUNTIME_GC_HEAP_INL_H_
19
20#include "heap.h"
21
22#include "debugger.h"
Mathieu Chartier590fee92013-09-13 13:46:47 -070023#include "gc/space/bump_pointer_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070024#include "gc/space/dlmalloc_space-inl.h"
25#include "gc/space/large_object_space.h"
Hiroshi Yamauchicf58d4a2013-09-26 14:21:22 -070026#include "gc/space/rosalloc_space-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070027#include "object_utils.h"
28#include "runtime.h"
29#include "thread.h"
30#include "thread-inl.h"
Mathieu Chartier4e305412014-02-19 10:54:44 -080031#include "verify_object-inl.h"
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070032
33namespace art {
34namespace gc {
35
Mathieu Chartier692fafd2013-11-29 17:24:40 -080036template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor>
Mathieu Chartier1febddf2013-11-20 12:33:14 -080037inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass,
38 size_t byte_count, AllocatorType allocator,
39 const PreFenceVisitor& pre_fence_visitor) {
40 DebugCheckPreconditionsForAllocObject(klass, byte_count);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -070041 // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are
42 // done in the runnable state where suspension is expected.
43 DCHECK_EQ(self->GetState(), kRunnable);
44 self->AssertThreadSuspensionIsAllowable();
Mathieu Chartierc528dba2013-11-26 12:00:11 -080045 // Need to check that we arent the large object allocator since the large object allocation code
46 // path this function. If we didn't check we would have an infinite loop.
Mathieu Chartier692fafd2013-11-29 17:24:40 -080047 if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) {
Mathieu Chartierc528dba2013-11-26 12:00:11 -080048 return AllocLargeObject<kInstrumented, PreFenceVisitor>(self, klass, byte_count,
49 pre_fence_visitor);
50 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080051 mirror::Object* obj;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080052 AllocationTimer alloc_timer(this, &obj);
Ian Rogers6fac4472014-02-25 17:01:10 -080053 size_t bytes_allocated, usable_size;
54 obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
55 &usable_size);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080056 if (UNLIKELY(obj == nullptr)) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080057 bool is_current_allocator = allocator == GetCurrentAllocator();
Ian Rogers6fac4472014-02-25 17:01:10 -080058 obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size,
59 &klass);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080060 if (obj == nullptr) {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -080061 bool after_is_current_allocator = allocator == GetCurrentAllocator();
62 if (is_current_allocator && !after_is_current_allocator) {
63 // If the allocator changed, we need to restart the allocation.
64 return AllocObject<kInstrumented>(self, klass, byte_count);
65 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080066 return nullptr;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080067 }
68 }
Ian Rogers6fac4472014-02-25 17:01:10 -080069 DCHECK_GT(bytes_allocated, 0u);
70 DCHECK_GT(usable_size, 0u);
Mathieu Chartier1febddf2013-11-20 12:33:14 -080071 obj->SetClass(klass);
Hiroshi Yamauchi9d04a202014-01-31 13:35:49 -080072 if (kUseBrooksPointer) {
73 obj->SetBrooksPointer(obj);
74 obj->AssertSelfBrooksPointer();
75 }
Ian Rogers6fac4472014-02-25 17:01:10 -080076 pre_fence_visitor(obj, usable_size);
Ian Rogersa55cf412014-02-27 00:31:26 -080077 if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
Ian Rogers6fac4472014-02-25 17:01:10 -080078 CHECK_LE(obj->SizeOf(), usable_size);
79 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080080 const size_t new_num_bytes_allocated =
Ian Rogersb122a4b2013-11-19 18:00:50 -080081 static_cast<size_t>(num_bytes_allocated_.FetchAndAdd(bytes_allocated)) + bytes_allocated;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080082 // TODO: Deprecate.
83 if (kInstrumented) {
84 if (Runtime::Current()->HasStatsEnabled()) {
85 RuntimeStats* thread_stats = self->GetStats();
86 ++thread_stats->allocated_objects;
87 thread_stats->allocated_bytes += bytes_allocated;
88 RuntimeStats* global_stats = Runtime::Current()->GetStats();
89 ++global_stats->allocated_objects;
90 global_stats->allocated_bytes += bytes_allocated;
91 }
92 } else {
93 DCHECK(!Runtime::Current()->HasStatsEnabled());
94 }
95 if (AllocatorHasAllocationStack(allocator)) {
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -080096 PushOnAllocationStack(self, obj);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -080097 }
98 if (kInstrumented) {
99 if (Dbg::IsAllocTrackingEnabled()) {
Mathieu Chartier1febddf2013-11-20 12:33:14 -0800100 Dbg::RecordAllocation(klass, bytes_allocated);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800101 }
102 } else {
103 DCHECK(!Dbg::IsAllocTrackingEnabled());
104 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800105 // concurrent_gc_ isn't known at compile time so we can optimize by not checking it for
106 // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be
107 // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since
108 // the allocator_type should be constant propagated.
109 if (AllocatorMayHaveConcurrentGC(allocator) && concurrent_gc_) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800110 CheckConcurrentGC(self, new_num_bytes_allocated, obj);
111 }
Mathieu Chartier4e305412014-02-19 10:54:44 -0800112 VerifyObject(obj);
113 self->VerifyStack();
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800114 return obj;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700115}
116
Hiroshi Yamauchif5b0e202014-02-11 17:02:22 -0800117// The size of a thread-local allocation stack in the number of references.
118static constexpr size_t kThreadLocalAllocationStackSize = 128;
119
120inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object* obj) {
121 if (kUseThreadLocalAllocationStack) {
122 bool success = self->PushOnThreadLocalAllocationStack(obj);
123 if (UNLIKELY(!success)) {
124 // Slow path. Allocate a new thread-local allocation stack.
125 mirror::Object** start_address;
126 mirror::Object** end_address;
127 while (!allocation_stack_->AtomicBumpBack(kThreadLocalAllocationStackSize,
128 &start_address, &end_address)) {
129 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
130 }
131 self->SetThreadLocalAllocationStack(start_address, end_address);
132 // Retry on the new thread-local allocation stack.
133 success = self->PushOnThreadLocalAllocationStack(obj);
134 // Must succeed.
135 CHECK(success);
136 }
137 } else {
138 // This is safe to do since the GC will never free objects which are neither in the allocation
139 // stack or the live bitmap.
140 while (!allocation_stack_->AtomicPushBack(obj)) {
141 CollectGarbageInternal(collector::kGcTypeSticky, kGcCauseForAlloc, false);
142 }
143 }
144}
145
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800146template <bool kInstrumented, typename PreFenceVisitor>
147inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class* klass,
148 size_t byte_count,
149 const PreFenceVisitor& pre_fence_visitor) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800150 return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, klass, byte_count,
151 kAllocatorTypeLOS,
152 pre_fence_visitor);
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800153}
154
155template <const bool kInstrumented, const bool kGrow>
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800156inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type,
Ian Rogers6fac4472014-02-25 17:01:10 -0800157 size_t alloc_size, size_t* bytes_allocated,
158 size_t* usable_size) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800159 if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) {
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800160 return nullptr;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700161 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800162 mirror::Object* ret;
163 switch (allocator_type) {
164 case kAllocatorTypeBumpPointer: {
165 DCHECK(bump_pointer_space_ != nullptr);
166 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
167 ret = bump_pointer_space_->AllocNonvirtual(alloc_size);
168 if (LIKELY(ret != nullptr)) {
169 *bytes_allocated = alloc_size;
Ian Rogers6fac4472014-02-25 17:01:10 -0800170 *usable_size = alloc_size;
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800171 }
172 break;
173 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800174 case kAllocatorTypeRosAlloc: {
175 if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
176 // If running on valgrind, we should be using the instrumented path.
Ian Rogers6fac4472014-02-25 17:01:10 -0800177 ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800178 } else {
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800179 DCHECK(!running_on_valgrind_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800180 ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800181 }
182 break;
183 }
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800184 case kAllocatorTypeDlMalloc: {
185 if (kInstrumented && UNLIKELY(running_on_valgrind_)) {
186 // If running on valgrind, we should be using the instrumented path.
Ian Rogers6fac4472014-02-25 17:01:10 -0800187 ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800188 } else {
189 DCHECK(!running_on_valgrind_);
Ian Rogers6fac4472014-02-25 17:01:10 -0800190 ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800191 }
192 break;
193 }
194 case kAllocatorTypeNonMoving: {
Ian Rogers6fac4472014-02-25 17:01:10 -0800195 ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800196 break;
197 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800198 case kAllocatorTypeLOS: {
Ian Rogers6fac4472014-02-25 17:01:10 -0800199 ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size);
Hiroshi Yamauchi95a659f2013-11-22 14:43:45 -0800200 // Note that the bump pointer spaces aren't necessarily next to
201 // the other continuous spaces like the non-moving alloc space or
202 // the zygote space.
203 DCHECK(ret == nullptr || large_object_space_->Contains(ret));
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800204 break;
205 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800206 case kAllocatorTypeTLAB: {
207 alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment);
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800208 if (UNLIKELY(self->TlabSize() < alloc_size)) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800209 // Try allocating a new thread local buffer, if the allocaiton fails the space must be
210 // full so return nullptr.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800211 if (!bump_pointer_space_->AllocNewTlab(self, alloc_size + kDefaultTLABSize)) {
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800212 return nullptr;
213 }
214 }
215 // The allocation can't fail.
Mathieu Chartiere6da9af2013-12-16 11:54:42 -0800216 ret = self->AllocTlab(alloc_size);
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800217 DCHECK(ret != nullptr);
218 *bytes_allocated = alloc_size;
219 break;
220 }
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800221 default: {
222 LOG(FATAL) << "Invalid allocator type";
223 ret = nullptr;
224 }
225 }
226 return ret;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700227}
228
Mathieu Chartier590fee92013-09-13 13:46:47 -0700229inline void Heap::DebugCheckPreconditionsForAllocObject(mirror::Class* c, size_t byte_count) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700230 DCHECK(c == NULL || (c->IsClassClass() && byte_count >= sizeof(mirror::Class)) ||
231 (c->IsVariableSize() || c->GetObjectSize() == byte_count) ||
Ian Rogersdfb325e2013-10-30 01:00:44 -0700232 strlen(ClassHelper(c).GetDescriptor()) == 0);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700233 DCHECK_GE(byte_count, sizeof(mirror::Object));
234}
235
236inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr)
237 : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr) {
238 if (kMeasureAllocationTime) {
239 allocation_start_time_ = NanoTime() / kTimeAdjust;
240 }
241}
242
243inline Heap::AllocationTimer::~AllocationTimer() {
244 if (kMeasureAllocationTime) {
245 mirror::Object* allocated_obj = *allocated_obj_ptr_;
246 // Only if the allocation succeeded, record the time.
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800247 if (allocated_obj != nullptr) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700248 uint64_t allocation_end_time = NanoTime() / kTimeAdjust;
Ian Rogersb122a4b2013-11-19 18:00:50 -0800249 heap_->total_allocation_time_.FetchAndAdd(allocation_end_time - allocation_start_time_);
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700250 }
251 }
252};
253
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800254inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700255 // We need to have a zygote space or else our newly allocated large object can end up in the
256 // Zygote resulting in it being prematurely freed.
257 // We can only do this for primitive objects since large objects will not be within the card table
258 // range. This also means that we rely on SetClass not dirtying the object's card.
259 return byte_count >= kLargeObjectThreshold && have_zygote_space_ && c->IsPrimitiveArray();
260}
261
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800262template <bool kGrow>
263inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700264 size_t new_footprint = num_bytes_allocated_ + alloc_size;
265 if (UNLIKELY(new_footprint > max_allowed_footprint_)) {
266 if (UNLIKELY(new_footprint > growth_limit_)) {
267 return true;
268 }
Mathieu Chartier692fafd2013-11-29 17:24:40 -0800269 if (!AllocatorMayHaveConcurrentGC(allocator_type) || !concurrent_gc_) {
Mathieu Chartierc528dba2013-11-26 12:00:11 -0800270 if (!kGrow) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700271 return true;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700272 }
Mathieu Chartier7bf82af2013-12-06 16:51:45 -0800273 // TODO: Grow for allocation is racy, fix it.
274 VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to "
275 << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation";
276 max_allowed_footprint_ = new_footprint;
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700277 }
278 }
279 return false;
280}
281
Mathieu Chartiercbb2d202013-11-14 17:45:16 -0800282inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated,
283 mirror::Object* obj) {
Hiroshi Yamauchi3b4c1892013-09-12 21:33:12 -0700284 if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) {
285 // The SirtRef is necessary since the calls in RequestConcurrentGC are a safepoint.
286 SirtRef<mirror::Object> ref(self, obj);
287 RequestConcurrentGC(self);
288 }
289}
290
291} // namespace gc
292} // namespace art
293
294#endif // ART_RUNTIME_GC_HEAP_INL_H_