blob: 7e95e71a5be16447febe4cfde9d64a06e113d21a [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080022#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080023#include "oat_file-inl.h"
24
25namespace art {
26namespace jit {
27
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010028static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
29static constexpr int kProtData = PROT_READ | PROT_WRITE;
30static constexpr int kProtCode = PROT_READ | PROT_EXEC;
31
32#define CHECKED_MPROTECT(memory, size, prot) \
33 do { \
34 int rc = mprotect(memory, size, prot); \
35 if (UNLIKELY(rc != 0)) { \
36 errno = rc; \
37 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
38 } \
39 } while (false) \
40
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080041JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
42 CHECK_GT(capacity, 0U);
43 CHECK_LT(capacity, kMaxCapacity);
44 std::string error_str;
45 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010046 MemMap* data_map = MemMap::MapAnonymous(
47 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
48 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080049 std::ostringstream oss;
50 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
51 *error_msg = oss.str();
52 return nullptr;
53 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010054
55 // Data cache is 1 / 4 of the map.
56 // TODO: Make this variable?
57 size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
58 size_t code_size = data_map->Size() - data_size;
59 uint8_t* divider = data_map->Begin() + data_size;
60
61 // We need to have 32 bit offsets from method headers in code cache which point to things
62 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
63 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
64 if (code_map == nullptr) {
65 std::ostringstream oss;
66 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
67 *error_msg = oss.str();
68 return nullptr;
69 }
70 DCHECK_EQ(code_map->Size(), code_size);
71 DCHECK_EQ(code_map->Begin(), divider);
72 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080073}
74
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010075JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
76 : lock_("Jit code cache", kJitCodeCacheLock),
77 code_map_(code_map),
78 data_map_(data_map),
79 num_methods_(0) {
80
81 VLOG(jit) << "Created jit code cache: data size="
82 << PrettySize(data_map_->Size())
83 << ", code size="
84 << PrettySize(code_map_->Size());
85
86 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
87 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
88
89 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
90 PLOG(FATAL) << "create_mspace_with_base failed";
91 }
92
93 // Prevent morecore requests from the mspace.
94 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
95 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
96
97 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
98 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080099}
100
Mathieu Chartiere401d142015-04-22 13:56:20 -0700101bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800102 return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
103}
104
105bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100106 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800107}
108
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100109class ScopedCodeCacheWrite {
110 public:
111 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
112 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800113 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100114 ~ScopedCodeCacheWrite() {
115 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
116 }
117 private:
118 MemMap* const code_map_;
119
120 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
121};
122
123uint8_t* JitCodeCache::CommitCode(Thread* self,
124 const uint8_t* mapping_table,
125 const uint8_t* vmap_table,
126 const uint8_t* gc_map,
127 size_t frame_size_in_bytes,
128 size_t core_spill_mask,
129 size_t fp_spill_mask,
130 const uint8_t* code,
131 size_t code_size) {
132 size_t total_size = RoundUp(sizeof(OatQuickMethodHeader) + code_size + 32, sizeof(void*));
133 OatQuickMethodHeader* method_header = nullptr;
134 uint8_t* code_ptr;
135
136 MutexLock mu(self, lock_);
137 {
138 ScopedCodeCacheWrite scc(code_map_.get());
139 uint8_t* result = reinterpret_cast<uint8_t*>(mspace_malloc(code_mspace_, total_size));
140 if (result == nullptr) {
141 return nullptr;
142 }
143 code_ptr = reinterpret_cast<uint8_t*>(
144 RoundUp(reinterpret_cast<size_t>(result + sizeof(OatQuickMethodHeader)),
145 GetInstructionSetAlignment(kRuntimeISA)));
146
147 std::copy(code, code + code_size, code_ptr);
148 method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
149 new (method_header) OatQuickMethodHeader(
150 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
151 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
152 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
153 frame_size_in_bytes,
154 core_spill_mask,
155 fp_spill_mask,
156 code_size);
157 }
158
159 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
160 reinterpret_cast<char*>(code_ptr + code_size));
161
Mathieu Chartiera4885cb2015-03-09 15:38:54 -0700162 ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100163 return reinterpret_cast<uint8_t*>(method_header);
164}
165
166size_t JitCodeCache::CodeCacheSize() {
167 MutexLock mu(Thread::Current(), lock_);
168 size_t bytes_allocated = 0;
169 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
170 return bytes_allocated;
171}
172
173size_t JitCodeCache::DataCacheSize() {
174 MutexLock mu(Thread::Current(), lock_);
175 size_t bytes_allocated = 0;
176 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
177 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800178}
179
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100180uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100181 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100182 MutexLock mu(self, lock_);
183 return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100184}
185
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800186uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100187 uint8_t* result = ReserveData(self, end - begin);
188 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800189 return nullptr; // Out of space in the data cache.
190 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100191 std::copy(begin, end, result);
192 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800193}
194
Mathieu Chartiere401d142015-04-22 13:56:20 -0700195const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800196 const void* code = method->GetEntryPointFromQuickCompiledCode();
197 if (ContainsCodePtr(code)) {
198 return code;
199 }
200 MutexLock mu(Thread::Current(), lock_);
201 auto it = method_code_map_.find(method);
202 if (it != method_code_map_.end()) {
203 return it->second;
204 }
205 return nullptr;
206}
207
Mathieu Chartiere401d142015-04-22 13:56:20 -0700208void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800209 DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
210 DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
211 << old_code_ptr;
212 MutexLock mu(Thread::Current(), lock_);
213 auto it = method_code_map_.find(method);
214 if (it != method_code_map_.end()) {
215 return;
216 }
217 method_code_map_.Put(method, old_code_ptr);
218}
219
220} // namespace jit
221} // namespace art