blob: 2510514c0444d4eb502e88818dffc8b65ed9e514 [file] [log] [blame]
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -08001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_INL_H_
18#define ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_INL_H_
19
20#include "rosalloc.h"
21
22namespace art {
23namespace gc {
24namespace allocator {
25
Andreas Gamped7576322014-10-24 22:13:45 -070026inline ALWAYS_INLINE bool RosAlloc::ShouldCheckZeroMemory() {
Evgenii Stepanov1e133742015-05-20 12:30:59 -070027 return kCheckZeroMemory && !is_running_on_memory_tool_;
Andreas Gamped7576322014-10-24 22:13:45 -070028}
29
Mathieu Chartier0651d412014-04-29 14:37:57 -070030template<bool kThreadSafe>
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070031inline ALWAYS_INLINE void* RosAlloc::Alloc(Thread* self, size_t size, size_t* bytes_allocated,
32 size_t* usable_size,
33 size_t* bytes_tl_bulk_allocated) {
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080034 if (UNLIKELY(size > kLargeSizeThreshold)) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070035 return AllocLargeObject(self, size, bytes_allocated, usable_size,
36 bytes_tl_bulk_allocated);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080037 }
Mathieu Chartier0651d412014-04-29 14:37:57 -070038 void* m;
39 if (kThreadSafe) {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070040 m = AllocFromRun(self, size, bytes_allocated, usable_size, bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070041 } else {
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070042 m = AllocFromRunThreadUnsafe(self, size, bytes_allocated, usable_size,
43 bytes_tl_bulk_allocated);
Mathieu Chartier0651d412014-04-29 14:37:57 -070044 }
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080045 // Check if the returned memory is really all zero.
Andreas Gamped7576322014-10-24 22:13:45 -070046 if (ShouldCheckZeroMemory() && m != nullptr) {
Ian Rogers13735952014-10-08 12:43:28 -070047 uint8_t* bytes = reinterpret_cast<uint8_t*>(m);
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -080048 for (size_t i = 0; i < size; ++i) {
49 DCHECK_EQ(bytes[i], 0);
50 }
51 }
52 return m;
53}
54
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070055inline bool RosAlloc::Run::IsFull() {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -070056 return free_list_.Size() == 0;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -070057}
58
59inline bool RosAlloc::CanAllocFromThreadLocalRun(Thread* self, size_t size) {
60 if (UNLIKELY(!IsSizeForThreadLocal(size))) {
61 return false;
62 }
63 size_t bracket_size;
64 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
65 DCHECK_EQ(idx, SizeToIndex(size));
66 DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
67 DCHECK_EQ(bracket_size, bracketSizes[idx]);
68 DCHECK_LE(size, bracket_size);
69 DCHECK(size > 512 || bracket_size - size < 16);
70 DCHECK_LT(idx, kNumThreadLocalSizeBrackets);
71 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
72 if (kIsDebugBuild) {
73 // Need the lock to prevent race conditions.
74 MutexLock mu(self, *size_bracket_locks_[idx]);
75 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
76 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
77 }
78 DCHECK(thread_local_run != nullptr);
79 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
80 return !thread_local_run->IsFull();
81}
82
83inline void* RosAlloc::AllocFromThreadLocalRun(Thread* self, size_t size,
84 size_t* bytes_allocated) {
85 DCHECK(bytes_allocated != nullptr);
86 if (UNLIKELY(!IsSizeForThreadLocal(size))) {
87 return nullptr;
88 }
89 size_t bracket_size;
90 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
91 Run* thread_local_run = reinterpret_cast<Run*>(self->GetRosAllocRun(idx));
92 if (kIsDebugBuild) {
93 // Need the lock to prevent race conditions.
94 MutexLock mu(self, *size_bracket_locks_[idx]);
95 CHECK(non_full_runs_[idx].find(thread_local_run) == non_full_runs_[idx].end());
96 CHECK(full_runs_[idx].find(thread_local_run) == full_runs_[idx].end());
97 }
98 DCHECK(thread_local_run != nullptr);
99 DCHECK(thread_local_run->IsThreadLocal() || thread_local_run == dedicated_full_run_);
100 void* slot_addr = thread_local_run->AllocSlot();
101 if (LIKELY(slot_addr != nullptr)) {
102 *bytes_allocated = bracket_size;
103 }
104 return slot_addr;
105}
106
107inline size_t RosAlloc::MaxBytesBulkAllocatedFor(size_t size) {
108 if (UNLIKELY(!IsSizeForThreadLocal(size))) {
109 return size;
110 }
111 size_t bracket_size;
112 size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
113 return numOfSlots[idx] * bracket_size;
114}
115
116inline void* RosAlloc::Run::AllocSlot() {
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700117 Slot* slot = free_list_.Remove();
118 if (kTraceRosAlloc && slot != nullptr) {
119 const uint8_t idx = size_bracket_idx_;
120 LOG(INFO) << "RosAlloc::Run::AllocSlot() : " << slot
121 << ", bracket_size=" << std::dec << bracketSizes[idx]
122 << ", slot_idx=" << SlotIndex(slot);
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700123 }
Hiroshi Yamauchi31bf42c2015-09-24 11:20:29 -0700124 return slot;
Hiroshi Yamauchi4460a842015-03-09 11:57:48 -0700125}
126
Hiroshi Yamauchi3c2856e2013-11-22 13:42:53 -0800127} // namespace allocator
128} // namespace gc
129} // namespace art
130
131#endif // ART_RUNTIME_GC_ALLOCATOR_ROSALLOC_INL_H_