Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef ART_RUNTIME_GC_HEAP_INL_H_ |
| 18 | #define ART_RUNTIME_GC_HEAP_INL_H_ |
| 19 | |
| 20 | #include "heap.h" |
| 21 | |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 22 | #include "base/time_utils.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 23 | #include "debugger.h" |
Hiroshi Yamauchi | 38e68e9 | 2014-03-07 13:59:08 -0800 | [diff] [blame] | 24 | #include "gc/accounting/card_table-inl.h" |
| 25 | #include "gc/collector/semi_space.h" |
Mathieu Chartier | 590fee9 | 2013-09-13 13:46:47 -0700 | [diff] [blame] | 26 | #include "gc/space/bump_pointer_space-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 27 | #include "gc/space/dlmalloc_space-inl.h" |
| 28 | #include "gc/space/large_object_space.h" |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 29 | #include "gc/space/region_space-inl.h" |
Hiroshi Yamauchi | cf58d4a | 2013-09-26 14:21:22 -0700 | [diff] [blame] | 30 | #include "gc/space/rosalloc_space-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 31 | #include "runtime.h" |
Mathieu Chartier | eb8167a | 2014-05-07 15:43:14 -0700 | [diff] [blame] | 32 | #include "handle_scope-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 33 | #include "thread-inl.h" |
Vladimir Marko | 80afd02 | 2015-05-19 18:08:00 +0100 | [diff] [blame] | 34 | #include "utils.h" |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 35 | #include "verify_object-inl.h" |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 36 | |
| 37 | namespace art { |
| 38 | namespace gc { |
| 39 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 40 | template <bool kInstrumented, bool kCheckLargeObject, typename PreFenceVisitor> |
Mathieu Chartier | 1febddf | 2013-11-20 12:33:14 -0800 | [diff] [blame] | 41 | inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, mirror::Class* klass, |
| 42 | size_t byte_count, AllocatorType allocator, |
| 43 | const PreFenceVisitor& pre_fence_visitor) { |
Mathieu Chartier | c645f1d | 2014-03-06 18:11:53 -0800 | [diff] [blame] | 44 | if (kIsDebugBuild) { |
| 45 | CheckPreconditionsForAllocObject(klass, byte_count); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 46 | // Since allocation can cause a GC which will need to SuspendAll, make sure all allocations are |
| 47 | // done in the runnable state where suspension is expected. |
| 48 | CHECK_EQ(self->GetState(), kRunnable); |
| 49 | self->AssertThreadSuspensionIsAllowable(); |
Mathieu Chartier | c645f1d | 2014-03-06 18:11:53 -0800 | [diff] [blame] | 50 | } |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 51 | // Need to check that we arent the large object allocator since the large object allocation code |
| 52 | // path this function. If we didn't check we would have an infinite loop. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 53 | mirror::Object* obj; |
Mathieu Chartier | 446f9ee | 2014-12-01 15:00:27 -0800 | [diff] [blame] | 54 | if (kCheckLargeObject && UNLIKELY(ShouldAllocLargeObject(klass, byte_count))) { |
| 55 | obj = AllocLargeObject<kInstrumented, PreFenceVisitor>(self, &klass, byte_count, |
| 56 | pre_fence_visitor); |
| 57 | if (obj != nullptr) { |
| 58 | return obj; |
| 59 | } else { |
| 60 | // There should be an OOM exception, since we are retrying, clear it. |
| 61 | self->ClearException(); |
| 62 | } |
| 63 | // If the large object allocation failed, try to use the normal spaces (main space, |
| 64 | // non moving space). This can happen if there is significant virtual address space |
| 65 | // fragmentation. |
| 66 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 67 | AllocationTimer alloc_timer(this, &obj); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 68 | // bytes allocated for the (individual) object. |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 69 | size_t bytes_allocated; |
| 70 | size_t usable_size; |
| 71 | size_t new_num_bytes_allocated = 0; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 72 | if (allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 73 | byte_count = RoundUp(byte_count, space::BumpPointerSpace::kAlignment); |
| 74 | } |
| 75 | // If we have a thread local allocation we don't need to update bytes allocated. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 76 | if ((allocator == kAllocatorTypeTLAB || allocator == kAllocatorTypeRegionTLAB) && |
| 77 | byte_count <= self->TlabSize()) { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 78 | obj = self->AllocTlab(byte_count); |
Mathieu Chartier | fd22d5b | 2014-07-14 10:16:05 -0700 | [diff] [blame] | 79 | DCHECK(obj != nullptr) << "AllocTlab can't fail"; |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 80 | obj->SetClass(klass); |
| 81 | if (kUseBakerOrBrooksReadBarrier) { |
| 82 | if (kUseBrooksReadBarrier) { |
| 83 | obj->SetReadBarrierPointer(obj); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 84 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 85 | obj->AssertReadBarrierPointer(); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 86 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 87 | bytes_allocated = byte_count; |
Mathieu Chartier | fd22d5b | 2014-07-14 10:16:05 -0700 | [diff] [blame] | 88 | usable_size = bytes_allocated; |
| 89 | pre_fence_visitor(obj, usable_size); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 90 | QuasiAtomic::ThreadFenceForConstructor(); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 91 | } else if (!kInstrumented && allocator == kAllocatorTypeRosAlloc && |
| 92 | (obj = rosalloc_space_->AllocThreadLocal(self, byte_count, &bytes_allocated)) && |
| 93 | LIKELY(obj != nullptr)) { |
| 94 | DCHECK(!running_on_valgrind_); |
| 95 | obj->SetClass(klass); |
| 96 | if (kUseBakerOrBrooksReadBarrier) { |
| 97 | if (kUseBrooksReadBarrier) { |
| 98 | obj->SetReadBarrierPointer(obj); |
| 99 | } |
| 100 | obj->AssertReadBarrierPointer(); |
| 101 | } |
| 102 | usable_size = bytes_allocated; |
| 103 | pre_fence_visitor(obj, usable_size); |
| 104 | QuasiAtomic::ThreadFenceForConstructor(); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 105 | } else { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 106 | // bytes allocated that takes bulk thread-local buffer allocations into account. |
| 107 | size_t bytes_tl_bulk_allocated = 0; |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 108 | obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 109 | &usable_size, &bytes_tl_bulk_allocated); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 110 | if (UNLIKELY(obj == nullptr)) { |
| 111 | bool is_current_allocator = allocator == GetCurrentAllocator(); |
| 112 | obj = AllocateInternalWithGc(self, allocator, byte_count, &bytes_allocated, &usable_size, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 113 | &bytes_tl_bulk_allocated, &klass); |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 114 | if (obj == nullptr) { |
| 115 | bool after_is_current_allocator = allocator == GetCurrentAllocator(); |
Mathieu Chartier | 8e70519 | 2014-08-20 18:19:23 -0700 | [diff] [blame] | 116 | // If there is a pending exception, fail the allocation right away since the next one |
| 117 | // could cause OOM and abort the runtime. |
| 118 | if (!self->IsExceptionPending() && is_current_allocator && !after_is_current_allocator) { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 119 | // If the allocator changed, we need to restart the allocation. |
| 120 | return AllocObject<kInstrumented>(self, klass, byte_count, pre_fence_visitor); |
| 121 | } |
| 122 | return nullptr; |
| 123 | } |
Hiroshi Yamauchi | 624468c | 2014-03-31 15:14:47 -0700 | [diff] [blame] | 124 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 125 | DCHECK_GT(bytes_allocated, 0u); |
| 126 | DCHECK_GT(usable_size, 0u); |
| 127 | obj->SetClass(klass); |
| 128 | if (kUseBakerOrBrooksReadBarrier) { |
| 129 | if (kUseBrooksReadBarrier) { |
| 130 | obj->SetReadBarrierPointer(obj); |
| 131 | } |
| 132 | obj->AssertReadBarrierPointer(); |
| 133 | } |
| 134 | if (collector::SemiSpace::kUseRememberedSet && UNLIKELY(allocator == kAllocatorTypeNonMoving)) { |
| 135 | // (Note this if statement will be constant folded away for the |
| 136 | // fast-path quick entry points.) Because SetClass() has no write |
| 137 | // barrier, if a non-moving space allocation, we need a write |
| 138 | // barrier as the class pointer may point to the bump pointer |
| 139 | // space (where the class pointer is an "old-to-young" reference, |
| 140 | // though rare) under the GSS collector with the remembered set |
| 141 | // enabled. We don't need this for kAllocatorTypeRosAlloc/DlMalloc |
| 142 | // cases because we don't directly allocate into the main alloc |
| 143 | // space (besides promotions) under the SS/GSS collector. |
| 144 | WriteBarrierField(obj, mirror::Object::ClassOffset(), klass); |
| 145 | } |
| 146 | pre_fence_visitor(obj, usable_size); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 147 | new_num_bytes_allocated = static_cast<size_t>( |
| 148 | num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_tl_bulk_allocated)) |
| 149 | + bytes_tl_bulk_allocated; |
Hiroshi Yamauchi | 9d04a20 | 2014-01-31 13:35:49 -0800 | [diff] [blame] | 150 | } |
Mathieu Chartier | fd22d5b | 2014-07-14 10:16:05 -0700 | [diff] [blame] | 151 | if (kIsDebugBuild && Runtime::Current()->IsStarted()) { |
| 152 | CHECK_LE(obj->SizeOf(), usable_size); |
| 153 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 154 | // TODO: Deprecate. |
| 155 | if (kInstrumented) { |
| 156 | if (Runtime::Current()->HasStatsEnabled()) { |
| 157 | RuntimeStats* thread_stats = self->GetStats(); |
| 158 | ++thread_stats->allocated_objects; |
| 159 | thread_stats->allocated_bytes += bytes_allocated; |
| 160 | RuntimeStats* global_stats = Runtime::Current()->GetStats(); |
| 161 | ++global_stats->allocated_objects; |
| 162 | global_stats->allocated_bytes += bytes_allocated; |
| 163 | } |
| 164 | } else { |
| 165 | DCHECK(!Runtime::Current()->HasStatsEnabled()); |
| 166 | } |
| 167 | if (AllocatorHasAllocationStack(allocator)) { |
Hiroshi Yamauchi | 4cd662e | 2014-04-03 16:28:10 -0700 | [diff] [blame] | 168 | PushOnAllocationStack(self, &obj); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 169 | } |
| 170 | if (kInstrumented) { |
| 171 | if (Dbg::IsAllocTrackingEnabled()) { |
Ian Rogers | 844506b | 2014-09-12 19:59:33 -0700 | [diff] [blame] | 172 | Dbg::RecordAllocation(self, klass, bytes_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 173 | } |
| 174 | } else { |
| 175 | DCHECK(!Dbg::IsAllocTrackingEnabled()); |
| 176 | } |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 177 | // IsConcurrentGc() isn't known at compile time so we can optimize by not checking it for |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 178 | // the BumpPointer or TLAB allocators. This is nice since it allows the entire if statement to be |
| 179 | // optimized out. And for the other allocators, AllocatorMayHaveConcurrentGC is a constant since |
| 180 | // the allocator_type should be constant propagated. |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 181 | if (AllocatorMayHaveConcurrentGC(allocator) && IsGcConcurrent()) { |
Mathieu Chartier | f517f1a | 2014-03-06 15:52:27 -0800 | [diff] [blame] | 182 | CheckConcurrentGC(self, new_num_bytes_allocated, &obj); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 183 | } |
Mathieu Chartier | 4e30541 | 2014-02-19 10:54:44 -0800 | [diff] [blame] | 184 | VerifyObject(obj); |
| 185 | self->VerifyStack(); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 186 | return obj; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 187 | } |
| 188 | |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 189 | // The size of a thread-local allocation stack in the number of references. |
| 190 | static constexpr size_t kThreadLocalAllocationStackSize = 128; |
| 191 | |
Hiroshi Yamauchi | 4cd662e | 2014-04-03 16:28:10 -0700 | [diff] [blame] | 192 | inline void Heap::PushOnAllocationStack(Thread* self, mirror::Object** obj) { |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 193 | if (kUseThreadLocalAllocationStack) { |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 194 | if (UNLIKELY(!self->PushOnThreadLocalAllocationStack(*obj))) { |
| 195 | PushOnThreadLocalAllocationStackWithInternalGC(self, obj); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 196 | } |
Mathieu Chartier | c179016 | 2014-05-23 10:54:50 -0700 | [diff] [blame] | 197 | } else if (UNLIKELY(!allocation_stack_->AtomicPushBack(*obj))) { |
| 198 | PushOnAllocationStackWithInternalGC(self, obj); |
Hiroshi Yamauchi | f5b0e20 | 2014-02-11 17:02:22 -0800 | [diff] [blame] | 199 | } |
| 200 | } |
| 201 | |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 202 | template <bool kInstrumented, typename PreFenceVisitor> |
Mathieu Chartier | 446f9ee | 2014-12-01 15:00:27 -0800 | [diff] [blame] | 203 | inline mirror::Object* Heap::AllocLargeObject(Thread* self, mirror::Class** klass, |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 204 | size_t byte_count, |
| 205 | const PreFenceVisitor& pre_fence_visitor) { |
Mathieu Chartier | 446f9ee | 2014-12-01 15:00:27 -0800 | [diff] [blame] | 206 | // Save and restore the class in case it moves. |
| 207 | StackHandleScope<1> hs(self); |
| 208 | auto klass_wrapper = hs.NewHandleWrapper(klass); |
| 209 | return AllocObjectWithAllocator<kInstrumented, false, PreFenceVisitor>(self, *klass, byte_count, |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 210 | kAllocatorTypeLOS, |
| 211 | pre_fence_visitor); |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | template <const bool kInstrumented, const bool kGrow> |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 215 | inline mirror::Object* Heap::TryToAllocate(Thread* self, AllocatorType allocator_type, |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 216 | size_t alloc_size, size_t* bytes_allocated, |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 217 | size_t* usable_size, |
| 218 | size_t* bytes_tl_bulk_allocated) { |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 219 | if (allocator_type != kAllocatorTypeTLAB && allocator_type != kAllocatorTypeRegionTLAB && |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 220 | allocator_type != kAllocatorTypeRosAlloc && |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 221 | UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 222 | return nullptr; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 223 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 224 | mirror::Object* ret; |
| 225 | switch (allocator_type) { |
| 226 | case kAllocatorTypeBumpPointer: { |
| 227 | DCHECK(bump_pointer_space_ != nullptr); |
| 228 | alloc_size = RoundUp(alloc_size, space::BumpPointerSpace::kAlignment); |
| 229 | ret = bump_pointer_space_->AllocNonvirtual(alloc_size); |
| 230 | if (LIKELY(ret != nullptr)) { |
| 231 | *bytes_allocated = alloc_size; |
Ian Rogers | 6fac447 | 2014-02-25 17:01:10 -0800 | [diff] [blame] | 232 | *usable_size = alloc_size; |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 233 | *bytes_tl_bulk_allocated = alloc_size; |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 234 | } |
| 235 | break; |
| 236 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 237 | case kAllocatorTypeRosAlloc: { |
| 238 | if (kInstrumented && UNLIKELY(running_on_valgrind_)) { |
| 239 | // If running on valgrind, we should be using the instrumented path. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 240 | size_t max_bytes_tl_bulk_allocated = rosalloc_space_->MaxBytesBulkAllocatedFor(alloc_size); |
| 241 | if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, |
| 242 | max_bytes_tl_bulk_allocated))) { |
| 243 | return nullptr; |
| 244 | } |
| 245 | ret = rosalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 246 | bytes_tl_bulk_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 247 | } else { |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 248 | DCHECK(!running_on_valgrind_); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 249 | size_t max_bytes_tl_bulk_allocated = |
| 250 | rosalloc_space_->MaxBytesBulkAllocatedForNonvirtual(alloc_size); |
| 251 | if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, |
| 252 | max_bytes_tl_bulk_allocated))) { |
| 253 | return nullptr; |
| 254 | } |
| 255 | if (!kInstrumented) { |
| 256 | DCHECK(!rosalloc_space_->CanAllocThreadLocal(self, alloc_size)); |
| 257 | } |
| 258 | ret = rosalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size, |
| 259 | bytes_tl_bulk_allocated); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 260 | } |
| 261 | break; |
| 262 | } |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 263 | case kAllocatorTypeDlMalloc: { |
| 264 | if (kInstrumented && UNLIKELY(running_on_valgrind_)) { |
| 265 | // If running on valgrind, we should be using the instrumented path. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 266 | ret = dlmalloc_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 267 | bytes_tl_bulk_allocated); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 268 | } else { |
| 269 | DCHECK(!running_on_valgrind_); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 270 | ret = dlmalloc_space_->AllocNonvirtual(self, alloc_size, bytes_allocated, usable_size, |
| 271 | bytes_tl_bulk_allocated); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 272 | } |
| 273 | break; |
| 274 | } |
| 275 | case kAllocatorTypeNonMoving: { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 276 | ret = non_moving_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 277 | bytes_tl_bulk_allocated); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 278 | break; |
| 279 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 280 | case kAllocatorTypeLOS: { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 281 | ret = large_object_space_->Alloc(self, alloc_size, bytes_allocated, usable_size, |
| 282 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 95a659f | 2013-11-22 14:43:45 -0800 | [diff] [blame] | 283 | // Note that the bump pointer spaces aren't necessarily next to |
| 284 | // the other continuous spaces like the non-moving alloc space or |
| 285 | // the zygote space. |
| 286 | DCHECK(ret == nullptr || large_object_space_->Contains(ret)); |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 287 | break; |
| 288 | } |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 289 | case kAllocatorTypeTLAB: { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 290 | DCHECK_ALIGNED(alloc_size, space::BumpPointerSpace::kAlignment); |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 291 | if (UNLIKELY(self->TlabSize() < alloc_size)) { |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 292 | const size_t new_tlab_size = alloc_size + kDefaultTLABSize; |
| 293 | if (UNLIKELY(IsOutOfMemoryOnAllocation<kGrow>(allocator_type, new_tlab_size))) { |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 294 | return nullptr; |
| 295 | } |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 296 | // Try allocating a new thread local buffer, if the allocaiton fails the space must be |
Mathieu Chartier | 2cebb24 | 2015-04-21 16:50:40 -0700 | [diff] [blame] | 297 | // full so return null. |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 298 | if (!bump_pointer_space_->AllocNewTlab(self, new_tlab_size)) { |
| 299 | return nullptr; |
| 300 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 301 | *bytes_tl_bulk_allocated = new_tlab_size; |
Mathieu Chartier | 14cc9be | 2014-07-11 10:26:37 -0700 | [diff] [blame] | 302 | } else { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 303 | *bytes_tl_bulk_allocated = 0; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 304 | } |
| 305 | // The allocation can't fail. |
Mathieu Chartier | e6da9af | 2013-12-16 11:54:42 -0800 | [diff] [blame] | 306 | ret = self->AllocTlab(alloc_size); |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 307 | DCHECK(ret != nullptr); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 308 | *bytes_allocated = alloc_size; |
Hiroshi Yamauchi | 5ccd498 | 2014-03-11 12:19:04 -0700 | [diff] [blame] | 309 | *usable_size = alloc_size; |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 310 | break; |
| 311 | } |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 312 | case kAllocatorTypeRegion: { |
| 313 | DCHECK(region_space_ != nullptr); |
| 314 | alloc_size = RoundUp(alloc_size, space::RegionSpace::kAlignment); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 315 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 316 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 317 | break; |
| 318 | } |
| 319 | case kAllocatorTypeRegionTLAB: { |
| 320 | DCHECK(region_space_ != nullptr); |
| 321 | DCHECK_ALIGNED(alloc_size, space::RegionSpace::kAlignment); |
| 322 | if (UNLIKELY(self->TlabSize() < alloc_size)) { |
| 323 | if (space::RegionSpace::kRegionSize >= alloc_size) { |
| 324 | // Non-large. Check OOME for a tlab. |
| 325 | if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, space::RegionSpace::kRegionSize))) { |
| 326 | // Try to allocate a tlab. |
| 327 | if (!region_space_->AllocNewTlab(self)) { |
| 328 | // Failed to allocate a tlab. Try non-tlab. |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 329 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 330 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 331 | return ret; |
| 332 | } |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 333 | *bytes_tl_bulk_allocated = space::RegionSpace::kRegionSize; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 334 | // Fall-through. |
| 335 | } else { |
| 336 | // Check OOME for a non-tlab allocation. |
| 337 | if (!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size)) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 338 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 339 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 340 | return ret; |
| 341 | } else { |
| 342 | // Neither tlab or non-tlab works. Give up. |
| 343 | return nullptr; |
| 344 | } |
| 345 | } |
| 346 | } else { |
| 347 | // Large. Check OOME. |
| 348 | if (LIKELY(!IsOutOfMemoryOnAllocation<kGrow>(allocator_type, alloc_size))) { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 349 | ret = region_space_->AllocNonvirtual<false>(alloc_size, bytes_allocated, usable_size, |
| 350 | bytes_tl_bulk_allocated); |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 351 | return ret; |
| 352 | } else { |
| 353 | return nullptr; |
| 354 | } |
| 355 | } |
| 356 | } else { |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 357 | *bytes_tl_bulk_allocated = 0; // Allocated in an existing buffer. |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 358 | } |
| 359 | // The allocation can't fail. |
| 360 | ret = self->AllocTlab(alloc_size); |
| 361 | DCHECK(ret != nullptr); |
Hiroshi Yamauchi | 4460a84 | 2015-03-09 11:57:48 -0700 | [diff] [blame] | 362 | *bytes_allocated = alloc_size; |
Hiroshi Yamauchi | 2cd334a | 2015-01-09 14:03:35 -0800 | [diff] [blame] | 363 | *usable_size = alloc_size; |
| 364 | break; |
| 365 | } |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 366 | default: { |
| 367 | LOG(FATAL) << "Invalid allocator type"; |
| 368 | ret = nullptr; |
| 369 | } |
| 370 | } |
| 371 | return ret; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 372 | } |
| 373 | |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 374 | inline Heap::AllocationTimer::AllocationTimer(Heap* heap, mirror::Object** allocated_obj_ptr) |
Mathieu Chartier | 3130cdf | 2015-05-03 15:20:23 -0700 | [diff] [blame] | 375 | : heap_(heap), allocated_obj_ptr_(allocated_obj_ptr), |
| 376 | allocation_start_time_(kMeasureAllocationTime ? NanoTime() / kTimeAdjust : 0u) { } |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 377 | |
| 378 | inline Heap::AllocationTimer::~AllocationTimer() { |
| 379 | if (kMeasureAllocationTime) { |
| 380 | mirror::Object* allocated_obj = *allocated_obj_ptr_; |
| 381 | // Only if the allocation succeeded, record the time. |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 382 | if (allocated_obj != nullptr) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 383 | uint64_t allocation_end_time = NanoTime() / kTimeAdjust; |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 384 | heap_->total_allocation_time_.FetchAndAddSequentiallyConsistent(allocation_end_time - allocation_start_time_); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 385 | } |
| 386 | } |
Andreas Gampe | c8ccf68 | 2014-09-29 20:07:43 -0700 | [diff] [blame] | 387 | } |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 388 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 389 | inline bool Heap::ShouldAllocLargeObject(mirror::Class* c, size_t byte_count) const { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 390 | // We need to have a zygote space or else our newly allocated large object can end up in the |
| 391 | // Zygote resulting in it being prematurely freed. |
| 392 | // We can only do this for primitive objects since large objects will not be within the card table |
| 393 | // range. This also means that we rely on SetClass not dirtying the object's card. |
Mathieu Chartier | bd0a653 | 2014-02-27 11:14:21 -0800 | [diff] [blame] | 394 | return byte_count >= large_object_threshold_ && c->IsPrimitiveArray(); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 395 | } |
| 396 | |
Mathieu Chartier | 692fafd | 2013-11-29 17:24:40 -0800 | [diff] [blame] | 397 | template <bool kGrow> |
| 398 | inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size) { |
Ian Rogers | 3e5cf30 | 2014-05-20 16:40:37 -0700 | [diff] [blame] | 399 | size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 400 | if (UNLIKELY(new_footprint > max_allowed_footprint_)) { |
| 401 | if (UNLIKELY(new_footprint > growth_limit_)) { |
| 402 | return true; |
| 403 | } |
Hiroshi Yamauchi | 3e41780 | 2014-03-20 12:03:02 -0700 | [diff] [blame] | 404 | if (!AllocatorMayHaveConcurrentGC(allocator_type) || !IsGcConcurrent()) { |
Mathieu Chartier | c528dba | 2013-11-26 12:00:11 -0800 | [diff] [blame] | 405 | if (!kGrow) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 406 | return true; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 407 | } |
Mathieu Chartier | 7bf82af | 2013-12-06 16:51:45 -0800 | [diff] [blame] | 408 | // TODO: Grow for allocation is racy, fix it. |
| 409 | VLOG(heap) << "Growing heap from " << PrettySize(max_allowed_footprint_) << " to " |
| 410 | << PrettySize(new_footprint) << " for a " << PrettySize(alloc_size) << " allocation"; |
| 411 | max_allowed_footprint_ = new_footprint; |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 412 | } |
| 413 | } |
| 414 | return false; |
| 415 | } |
| 416 | |
Mathieu Chartier | cbb2d20 | 2013-11-14 17:45:16 -0800 | [diff] [blame] | 417 | inline void Heap::CheckConcurrentGC(Thread* self, size_t new_num_bytes_allocated, |
Mathieu Chartier | f517f1a | 2014-03-06 15:52:27 -0800 | [diff] [blame] | 418 | mirror::Object** obj) { |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 419 | if (UNLIKELY(new_num_bytes_allocated >= concurrent_start_bytes_)) { |
Hiroshi Yamauchi | 0ae9899 | 2015-05-01 14:33:19 -0700 | [diff] [blame] | 420 | RequestConcurrentGCAndSaveObject(self, false, obj); |
Hiroshi Yamauchi | 3b4c189 | 2013-09-12 21:33:12 -0700 | [diff] [blame] | 421 | } |
| 422 | } |
| 423 | |
| 424 | } // namespace gc |
| 425 | } // namespace art |
| 426 | |
| 427 | #endif // ART_RUNTIME_GC_HEAP_INL_H_ |