blob: 4187358bc0155b2fa12eb006e7688ac478348dd4 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "jit_code_cache.h"
18
19#include <sstream>
20
Mathieu Chartiere401d142015-04-22 13:56:20 -070021#include "art_method-inl.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080022#include "mem_map.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080023#include "oat_file-inl.h"
24
25namespace art {
26namespace jit {
27
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010028static constexpr int kProtAll = PROT_READ | PROT_WRITE | PROT_EXEC;
29static constexpr int kProtData = PROT_READ | PROT_WRITE;
30static constexpr int kProtCode = PROT_READ | PROT_EXEC;
31
32#define CHECKED_MPROTECT(memory, size, prot) \
33 do { \
34 int rc = mprotect(memory, size, prot); \
35 if (UNLIKELY(rc != 0)) { \
36 errno = rc; \
37 PLOG(FATAL) << "Failed to mprotect jit code cache"; \
38 } \
39 } while (false) \
40
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080041JitCodeCache* JitCodeCache::Create(size_t capacity, std::string* error_msg) {
42 CHECK_GT(capacity, 0U);
43 CHECK_LT(capacity, kMaxCapacity);
44 std::string error_str;
45 // Map name specific for android_os_Debug.cpp accounting.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010046 MemMap* data_map = MemMap::MapAnonymous(
47 "data-code-cache", nullptr, capacity, kProtAll, false, false, &error_str);
48 if (data_map == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080049 std::ostringstream oss;
50 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
51 *error_msg = oss.str();
52 return nullptr;
53 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010054
55 // Data cache is 1 / 4 of the map.
56 // TODO: Make this variable?
57 size_t data_size = RoundUp(data_map->Size() / 4, kPageSize);
58 size_t code_size = data_map->Size() - data_size;
59 uint8_t* divider = data_map->Begin() + data_size;
60
61 // We need to have 32 bit offsets from method headers in code cache which point to things
62 // in the data cache. If the maps are more than 4G apart, having multiple maps wouldn't work.
63 MemMap* code_map = data_map->RemapAtEnd(divider, "jit-code-cache", kProtAll, &error_str);
64 if (code_map == nullptr) {
65 std::ostringstream oss;
66 oss << "Failed to create read write execute cache: " << error_str << " size=" << capacity;
67 *error_msg = oss.str();
68 return nullptr;
69 }
70 DCHECK_EQ(code_map->Size(), code_size);
71 DCHECK_EQ(code_map->Begin(), divider);
72 return new JitCodeCache(code_map, data_map);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080073}
74
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010075JitCodeCache::JitCodeCache(MemMap* code_map, MemMap* data_map)
76 : lock_("Jit code cache", kJitCodeCacheLock),
77 code_map_(code_map),
78 data_map_(data_map),
79 num_methods_(0) {
80
81 VLOG(jit) << "Created jit code cache: data size="
82 << PrettySize(data_map_->Size())
83 << ", code size="
84 << PrettySize(code_map_->Size());
85
86 code_mspace_ = create_mspace_with_base(code_map_->Begin(), code_map_->Size(), false /*locked*/);
87 data_mspace_ = create_mspace_with_base(data_map_->Begin(), data_map_->Size(), false /*locked*/);
88
89 if (code_mspace_ == nullptr || data_mspace_ == nullptr) {
90 PLOG(FATAL) << "create_mspace_with_base failed";
91 }
92
93 // Prevent morecore requests from the mspace.
94 mspace_set_footprint_limit(code_mspace_, code_map_->Size());
95 mspace_set_footprint_limit(data_mspace_, data_map_->Size());
96
97 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
98 CHECKED_MPROTECT(data_map_->Begin(), data_map_->Size(), kProtData);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080099}
100
Mathieu Chartiere401d142015-04-22 13:56:20 -0700101bool JitCodeCache::ContainsMethod(ArtMethod* method) const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800102 return ContainsCodePtr(method->GetEntryPointFromQuickCompiledCode());
103}
104
105bool JitCodeCache::ContainsCodePtr(const void* ptr) const {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100106 return code_map_->Begin() <= ptr && ptr < code_map_->End();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800107}
108
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100109class ScopedCodeCacheWrite {
110 public:
111 explicit ScopedCodeCacheWrite(MemMap* code_map) : code_map_(code_map) {
112 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtAll);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800113 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100114 ~ScopedCodeCacheWrite() {
115 CHECKED_MPROTECT(code_map_->Begin(), code_map_->Size(), kProtCode);
116 }
117 private:
118 MemMap* const code_map_;
119
120 DISALLOW_COPY_AND_ASSIGN(ScopedCodeCacheWrite);
121};
122
123uint8_t* JitCodeCache::CommitCode(Thread* self,
124 const uint8_t* mapping_table,
125 const uint8_t* vmap_table,
126 const uint8_t* gc_map,
127 size_t frame_size_in_bytes,
128 size_t core_spill_mask,
129 size_t fp_spill_mask,
130 const uint8_t* code,
131 size_t code_size) {
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100132 size_t alignment = GetInstructionSetAlignment(kRuntimeISA);
133 // Ensure the header ends up at expected instruction alignment.
134 size_t header_size = RoundUp(sizeof(OatQuickMethodHeader), alignment);
135 size_t total_size = header_size + code_size;
136
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100137 OatQuickMethodHeader* method_header = nullptr;
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100138 uint8_t* code_ptr = nullptr;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100139
140 MutexLock mu(self, lock_);
141 {
142 ScopedCodeCacheWrite scc(code_map_.get());
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100143 uint8_t* result = reinterpret_cast<uint8_t*>(
144 mspace_memalign(code_mspace_, alignment, total_size));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100145 if (result == nullptr) {
146 return nullptr;
147 }
Nicolas Geoffray1e7de6c2015-10-21 12:07:31 +0100148 code_ptr = result + header_size;
149 DCHECK_ALIGNED_PARAM(reinterpret_cast<uintptr_t>(code_ptr), alignment);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100150
151 std::copy(code, code + code_size, code_ptr);
152 method_header = reinterpret_cast<OatQuickMethodHeader*>(code_ptr) - 1;
153 new (method_header) OatQuickMethodHeader(
154 (mapping_table == nullptr) ? 0 : code_ptr - mapping_table,
155 (vmap_table == nullptr) ? 0 : code_ptr - vmap_table,
156 (gc_map == nullptr) ? 0 : code_ptr - gc_map,
157 frame_size_in_bytes,
158 core_spill_mask,
159 fp_spill_mask,
160 code_size);
161 }
162
163 __builtin___clear_cache(reinterpret_cast<char*>(code_ptr),
164 reinterpret_cast<char*>(code_ptr + code_size));
165
Mathieu Chartiera4885cb2015-03-09 15:38:54 -0700166 ++num_methods_; // TODO: This is hacky but works since each method has exactly one code region.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100167 return reinterpret_cast<uint8_t*>(method_header);
168}
169
170size_t JitCodeCache::CodeCacheSize() {
171 MutexLock mu(Thread::Current(), lock_);
172 size_t bytes_allocated = 0;
173 mspace_inspect_all(code_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
174 return bytes_allocated;
175}
176
177size_t JitCodeCache::DataCacheSize() {
178 MutexLock mu(Thread::Current(), lock_);
179 size_t bytes_allocated = 0;
180 mspace_inspect_all(data_mspace_, DlmallocBytesAllocatedCallback, &bytes_allocated);
181 return bytes_allocated;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800182}
183
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100184uint8_t* JitCodeCache::ReserveData(Thread* self, size_t size) {
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100185 size = RoundUp(size, sizeof(void*));
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100186 MutexLock mu(self, lock_);
187 return reinterpret_cast<uint8_t*>(mspace_malloc(data_mspace_, size));
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100188}
189
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800190uint8_t* JitCodeCache::AddDataArray(Thread* self, const uint8_t* begin, const uint8_t* end) {
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100191 uint8_t* result = ReserveData(self, end - begin);
192 if (result == nullptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800193 return nullptr; // Out of space in the data cache.
194 }
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100195 std::copy(begin, end, result);
196 return result;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800197}
198
Mathieu Chartiere401d142015-04-22 13:56:20 -0700199const void* JitCodeCache::GetCodeFor(ArtMethod* method) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800200 const void* code = method->GetEntryPointFromQuickCompiledCode();
201 if (ContainsCodePtr(code)) {
202 return code;
203 }
204 MutexLock mu(Thread::Current(), lock_);
205 auto it = method_code_map_.find(method);
206 if (it != method_code_map_.end()) {
207 return it->second;
208 }
209 return nullptr;
210}
211
Mathieu Chartiere401d142015-04-22 13:56:20 -0700212void JitCodeCache::SaveCompiledCode(ArtMethod* method, const void* old_code_ptr) {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800213 DCHECK_EQ(method->GetEntryPointFromQuickCompiledCode(), old_code_ptr);
214 DCHECK(ContainsCodePtr(old_code_ptr)) << PrettyMethod(method) << " old_code_ptr="
215 << old_code_ptr;
216 MutexLock mu(Thread::Current(), lock_);
217 auto it = method_code_map_.find(method);
218 if (it != method_code_map_.end()) {
219 return;
220 }
221 method_code_map_.Put(method, old_code_ptr);
222}
223
224} // namespace jit
225} // namespace art