blob: 175501f915489524f857b92d73e32c6dcb308154 [file] [log] [blame]
Mathieu Chartiere5f13e52015-02-24 09:37:21 -08001/*
2 * Copyright 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
18#define ART_RUNTIME_JIT_JIT_CODE_CACHE_H_
19
20#include "instrumentation.h"
21
22#include "atomic.h"
Mingyao Yang063fc772016-08-02 11:02:54 -070023#include "base/arena_containers.h"
Nicolas Geoffray933330a2016-03-16 14:20:06 +000024#include "base/histogram-inl.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080025#include "base/macros.h"
26#include "base/mutex.h"
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010027#include "gc/accounting/bitmap.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080028#include "gc_root.h"
29#include "jni.h"
Calin Juravle99629622016-04-19 16:33:46 +010030#include "method_reference.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080031#include "oat_file.h"
Calin Juravle940eb0c2017-01-30 19:30:44 -080032#include "profile_compilation_info.h"
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080033#include "safe_map.h"
34#include "thread_pool.h"
35
36namespace art {
37
Mathieu Chartiere401d142015-04-22 13:56:20 -070038class ArtMethod;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010039class LinearAlloc;
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +000040class InlineCache;
Andreas Gampe5d08fcc2017-06-05 17:56:46 -070041class IsMarkedVisitor;
Andreas Gampe513061a2017-06-01 09:17:34 -070042class OatQuickMethodHeader;
Nicolas Geoffray26705e22015-10-28 12:50:11 +000043class ProfilingInfo;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080044
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080045namespace jit {
46
47class JitInstrumentationCache;
48
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000049// Alignment in bits that will suit all architectures.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010050static constexpr int kJitCodeAlignment = 16;
51using CodeCacheBitmap = gc::accounting::MemoryRangeBitmap<kJitCodeAlignment>;
52
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080053class JitCodeCache {
54 public:
Nicolas Geoffray0a3be162015-11-18 11:15:22 +000055 static constexpr size_t kMaxCapacity = 64 * MB;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010056 // Put the default to a very low amount for debug builds to stress the code cache
57 // collection.
Nicolas Geoffray7ca4b772016-02-23 13:52:01 +000058 static constexpr size_t kInitialCapacity = kIsDebugBuild ? 8 * KB : 64 * KB;
Nicolas Geoffray65b83d82016-02-22 13:14:04 +000059
60 // By default, do not GC until reaching 256KB.
61 static constexpr size_t kReservedCapacity = kInitialCapacity * 4;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080062
Mathieu Chartierbce416f2015-03-23 12:37:35 -070063 // Create the code cache with a code + data capacity equal to "capacity", error message is passed
64 // in the out arg error_msg.
Nicolas Geoffraya25dce92016-01-12 16:41:10 +000065 static JitCodeCache* Create(size_t initial_capacity,
66 size_t max_capacity,
67 bool generate_debug_info,
68 std::string* error_msg);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080069
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010070 // Number of bytes allocated in the code cache.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010071 size_t CodeCacheSize() REQUIRES(!lock_);
72
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +010073 // Number of bytes allocated in the data cache.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +010074 size_t DataCacheSize() REQUIRES(!lock_);
75
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +000076 bool NotifyCompilationOf(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070077 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +010078 REQUIRES(!lock_);
79
Alex Lightdba61482016-12-21 08:20:29 -080080 void NotifyMethodRedefined(ArtMethod* method)
81 REQUIRES(Locks::mutator_lock_)
82 REQUIRES(!lock_);
83
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +000084 // Notify to the code cache that the compiler wants to use the
85 // profiling info of `method` to drive optimizations,
86 // and therefore ensure the returned profiling info object is not
87 // collected.
88 ProfilingInfo* NotifyCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070089 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +000090 REQUIRES(!lock_);
91
buzbee454b3b62016-04-07 14:42:47 -070092 void DoneCompiling(ArtMethod* method, Thread* self, bool osr)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070093 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray73be1e82015-09-17 15:22:56 +010094 REQUIRES(!lock_);
95
Nicolas Geoffray07e3ca92016-03-11 09:57:57 +000096 void DoneCompilerUse(ArtMethod* method, Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -070097 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +000098 REQUIRES(!lock_);
99
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100100 // Allocate and write code and its metadata to the code cache.
Mingyao Yang063fc772016-08-02 11:02:54 -0700101 // `cha_single_implementation_list` needs to be registered via CHA (if it's
102 // still valid), since the compiled code still needs to be invalidated if the
103 // single-implementation assumptions are violated later. This needs to be done
104 // even if `has_should_deoptimize_flag` is false, which can happen due to CHA
105 // guard elimination.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100106 uint8_t* CommitCode(Thread* self,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100107 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000108 uint8_t* stack_map,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700109 uint8_t* method_info,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000110 uint8_t* roots_data,
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100111 size_t frame_size_in_bytes,
112 size_t core_spill_mask,
113 size_t fp_spill_mask,
114 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000115 size_t code_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000116 bool osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700117 Handle<mirror::ObjectArray<mirror::Object>> roots,
118 bool has_should_deoptimize_flag,
119 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700120 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100121 REQUIRES(!lock_);
122
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100123 // Return true if the code cache contains this pc.
124 bool ContainsPc(const void* pc) const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800125
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000126 // Return true if the code cache contains this method.
127 bool ContainsMethod(ArtMethod* method) REQUIRES(!lock_);
128
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000129 // Allocate a region of data that contain `size` bytes, and potentially space
130 // for storing `number_of_roots` roots. Returns null if there is no more room.
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000131 // Return the number of bytes allocated.
132 size_t ReserveData(Thread* self,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700133 size_t stack_map_size,
134 size_t method_info_size,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000135 size_t number_of_roots,
136 ArtMethod* method,
137 uint8_t** stack_map_data,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700138 uint8_t** method_info_data,
Nicolas Geoffrayed015ac2016-12-15 17:58:48 +0000139 uint8_t** roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700140 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100141 REQUIRES(!lock_);
Nicolas Geoffray5550ca82015-08-21 18:38:30 +0100142
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000143 // Clear data from the data portion of the code cache.
Nicolas Geoffrayf46501c2016-11-22 13:45:36 +0000144 void ClearData(Thread* self, uint8_t* stack_map_data, uint8_t* roots_data)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700145 REQUIRES_SHARED(Locks::mutator_lock_)
Nicolas Geoffrayd28b9692015-11-04 14:36:55 +0000146 REQUIRES(!lock_);
147
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100148 CodeCacheBitmap* GetLiveBitmap() const {
149 return live_bitmap_.get();
150 }
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800151
Nicolas Geoffray35122442016-03-02 12:05:30 +0000152 // Return whether we should do a full collection given the current state of the cache.
153 bool ShouldDoFullCollection()
154 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700155 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray35122442016-03-02 12:05:30 +0000156
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100157 // Perform a collection on the code cache.
158 void GarbageCollectCache(Thread* self)
159 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700160 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100161
162 // Given the 'pc', try to find the JIT compiled code associated with it.
163 // Return null if 'pc' is not in the code cache. 'method' is passed for
164 // sanity check.
165 OatQuickMethodHeader* LookupMethodHeader(uintptr_t pc, ArtMethod* method)
166 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700167 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100168
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000169 OatQuickMethodHeader* LookupOsrMethodHeader(ArtMethod* method)
170 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700171 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000172
Orion Hodsoneced6922017-06-01 10:54:28 +0100173 // Removes method from the cache for testing purposes. The caller
174 // must ensure that all threads are suspended and the method should
175 // not be in any thread's stack.
176 bool RemoveMethod(ArtMethod* method, bool release_memory)
177 REQUIRES(!lock_)
178 REQUIRES(Locks::mutator_lock_);
179
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000180 // Remove all methods in our cache that were allocated by 'alloc'.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100181 void RemoveMethodsIn(Thread* self, const LinearAlloc& alloc)
182 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700183 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800184
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000185 void CopyInlineCacheInto(const InlineCache& ic, Handle<mirror::ObjectArray<mirror::Class>> array)
186 REQUIRES(!lock_)
187 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb6e20ae2016-03-07 14:29:04 +0000188
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000189 // Create a 'ProfileInfo' for 'method'. If 'retry_allocation' is true,
190 // will collect and retry if the first allocation is unsuccessful.
191 ProfilingInfo* AddProfilingInfo(Thread* self,
192 ArtMethod* method,
193 const std::vector<uint32_t>& entries,
194 bool retry_allocation)
195 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700196 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000197
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000198 bool OwnsSpace(const void* mspace) const NO_THREAD_SAFETY_ANALYSIS {
199 return mspace == code_mspace_ || mspace == data_mspace_;
200 }
201
202 void* MoreCore(const void* mspace, intptr_t increment);
203
Calin Juravle99629622016-04-19 16:33:46 +0100204 // Adds to `methods` all profiled methods which are part of any of the given dex locations.
205 void GetProfiledMethods(const std::set<std::string>& dex_base_locations,
Calin Juravle940eb0c2017-01-30 19:30:44 -0800206 std::vector<ProfileMethodInfo>& methods)
Calin Juravle31f2c152015-10-23 17:56:15 +0100207 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700208 REQUIRES_SHARED(Locks::mutator_lock_);
Calin Juravle31f2c152015-10-23 17:56:15 +0100209
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000210 uint64_t GetLastUpdateTimeNs() const;
Calin Juravle31f2c152015-10-23 17:56:15 +0100211
Nicolas Geoffrayaee21562015-12-15 16:39:44 +0000212 size_t GetCurrentCapacity() REQUIRES(!lock_) {
213 MutexLock lock(Thread::Current(), lock_);
214 return current_capacity_;
215 }
216
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000217 size_t GetMemorySizeOfCodePointer(const void* ptr) REQUIRES(!lock_);
218
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000219 void InvalidateCompiledCodeFor(ArtMethod* method, const OatQuickMethodHeader* code)
220 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700221 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffrayb88d59e2016-02-17 11:31:49 +0000222
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000223 void Dump(std::ostream& os) REQUIRES(!lock_);
224
Nicolas Geoffray71cd50f2016-04-14 15:00:33 +0100225 bool IsOsrCompiled(ArtMethod* method) REQUIRES(!lock_);
226
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000227 void SweepRootTables(IsMarkedVisitor* visitor)
228 REQUIRES(!lock_)
229 REQUIRES_SHARED(Locks::mutator_lock_);
230
David Sehrd1dbb742017-07-17 11:20:38 -0700231 uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = nullptr);
232
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000233 // The GC needs to disallow the reading of inline caches when it processes them,
234 // to avoid having a class being used while it is being deleted.
235 void AllowInlineCacheAccess() REQUIRES(!lock_);
236 void DisallowInlineCacheAccess() REQUIRES(!lock_);
237 void BroadcastForInlineCacheAccess() REQUIRES(!lock_);
238
Alex Lightdba61482016-12-21 08:20:29 -0800239 // Notify the code cache that the method at the pointer 'old_method' is being moved to the pointer
240 // 'new_method' since it is being made obsolete.
241 void MoveObsoleteMethod(ArtMethod* old_method, ArtMethod* new_method)
242 REQUIRES(!lock_) REQUIRES(Locks::mutator_lock_);
243
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000244 // Dynamically change whether we want to garbage collect code. Should only be used
245 // by tests.
246 void SetGarbageCollectCode(bool value) {
247 garbage_collect_code_ = value;
248 }
249
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800250 private:
David Sehrd1dbb742017-07-17 11:20:38 -0700251 friend class ScopedCodeCacheWrite;
252
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000253 // Take ownership of maps.
254 JitCodeCache(MemMap* code_map,
255 MemMap* data_map,
David Sehrd1dbb742017-07-17 11:20:38 -0700256 MemMap* writable_code_map,
Orion Hodson56fe32e2017-07-21 11:42:10 +0100257 MemMap* code_sync_map,
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000258 size_t initial_code_capacity,
259 size_t initial_data_capacity,
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000260 size_t max_capacity,
261 bool garbage_collect_code);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800262
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100263 // Internal version of 'CommitCode' that will not retry if the
264 // allocation fails. Return null if the allocation fails.
265 uint8_t* CommitCodeInternal(Thread* self,
266 ArtMethod* method,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000267 uint8_t* stack_map,
Mathieu Chartiercbcedbf2017-03-12 22:24:50 -0700268 uint8_t* method_info,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000269 uint8_t* roots_data,
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100270 size_t frame_size_in_bytes,
271 size_t core_spill_mask,
272 size_t fp_spill_mask,
273 const uint8_t* code,
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000274 size_t code_size,
Nicolas Geoffray132d8362016-11-16 09:19:42 +0000275 bool osr,
Mingyao Yang063fc772016-08-02 11:02:54 -0700276 Handle<mirror::ObjectArray<mirror::Object>> roots,
277 bool has_should_deoptimize_flag,
278 const ArenaSet<ArtMethod*>& cha_single_implementation_list)
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100279 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700280 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100281
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000282 ProfilingInfo* AddProfilingInfoInternal(Thread* self,
283 ArtMethod* method,
284 const std::vector<uint32_t>& entries)
Nicolas Geoffray1e7da9b2016-03-01 14:11:40 +0000285 REQUIRES(lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700286 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000287
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100288 // If a collection is in progress, wait for it to finish. Return
289 // whether the thread actually waited.
290 bool WaitForPotentialCollectionToComplete(Thread* self)
291 REQUIRES(lock_) REQUIRES(!Locks::mutator_lock_);
292
Mingyao Yang063fc772016-08-02 11:02:54 -0700293 // Remove CHA dependents and underlying allocations for entries in `method_headers`.
294 void FreeAllMethodHeaders(const std::unordered_set<OatQuickMethodHeader*>& method_headers)
295 REQUIRES(!lock_)
296 REQUIRES(!Locks::cha_lock_);
297
298 // Free in the mspace allocations for `code_ptr`.
David Sehrd1dbb742017-07-17 11:20:38 -0700299 void FreeCodeAndData(const void* code_ptr) REQUIRES(lock_);
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100300
Nicolas Geoffraya5891e82015-11-06 14:18:27 +0000301 // Number of bytes allocated in the code cache.
302 size_t CodeCacheSizeLocked() REQUIRES(lock_);
303
304 // Number of bytes allocated in the data cache.
305 size_t DataCacheSizeLocked() REQUIRES(lock_);
306
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000307 // Notify all waiting threads that a collection is done.
308 void NotifyCollectionDone(Thread* self) REQUIRES(lock_);
309
310 // Try to increase the current capacity of the code cache. Return whether we
311 // succeeded at doing so.
312 bool IncreaseCodeCacheCapacity() REQUIRES(lock_);
313
314 // Set the footprint limit of the code cache.
315 void SetFootprintLimit(size_t new_footprint) REQUIRES(lock_);
316
Nicolas Geoffray35122442016-03-02 12:05:30 +0000317 void DoCollection(Thread* self, bool collect_profiling_info)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000318 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700319 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000320
Nicolas Geoffray9abb2972016-03-04 14:32:59 +0000321 void RemoveUnmarkedCode(Thread* self)
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000322 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700323 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000324
325 void MarkCompiledCodeOnThreadStacks(Thread* self)
326 REQUIRES(!lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700327 REQUIRES_SHARED(Locks::mutator_lock_);
Nicolas Geoffray8d372502016-02-23 13:56:43 +0000328
Nicolas Geoffray35122442016-03-02 12:05:30 +0000329 bool CheckLiveCompiledCodeHasProfilingInfo()
Nicolas Geoffraycf48fa02016-07-30 22:49:11 +0100330 REQUIRES(lock_);
Nicolas Geoffray35122442016-03-02 12:05:30 +0000331
David Sehrd1dbb742017-07-17 11:20:38 -0700332 void FreeRawCode(void* code) REQUIRES(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000333 uint8_t* AllocateCode(size_t code_size) REQUIRES(lock_);
334 void FreeData(uint8_t* data) REQUIRES(lock_);
335 uint8_t* AllocateData(size_t data_size) REQUIRES(lock_);
336
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000337 bool IsWeakAccessEnabled(Thread* self) const;
338 void WaitUntilInlineCacheAccessible(Thread* self)
339 REQUIRES(!lock_)
340 REQUIRES_SHARED(Locks::mutator_lock_);
341
David Sehrd1dbb742017-07-17 11:20:38 -0700342 MemMap* GetWritableMemMap() const {
343 if (writable_code_map_ == nullptr) {
344 // The system required us to map the JIT Code Cache RWX (see
345 // JitCodeCache::Create()).
346 return executable_code_map_.get();
347 } else {
348 // Executable code is mapped RX, and writable code is mapped RW
349 // to the underlying same memory, but at a different address.
350 return writable_code_map_.get();
351 }
352 }
353
354 bool IsDataAddress(const void* raw_addr) const;
355
356 bool IsExecutableAddress(const void* raw_addr) const;
357
358 bool IsWritableAddress(const void* raw_addr) const;
359
360 template <typename T>
361 T* ToExecutableAddress(T* writable_address) const;
362
363 void* ToWritableAddress(const void* executable_address) const;
364
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100365 // Lock for guarding allocations, collections, and the method_code_map_.
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800366 Mutex lock_;
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100367 // Condition to wait on during collection.
368 ConditionVariable lock_cond_ GUARDED_BY(lock_);
369 // Whether there is a code cache collection in progress.
370 bool collection_in_progress_ GUARDED_BY(lock_);
David Sehrd1dbb742017-07-17 11:20:38 -0700371 // JITting methods obviously requires both write and execute permissions on a region of memory.
372 // In tye typical (non-debugging) case, we separate the memory mapped view that can write the code
373 // from a view that the runtime uses to execute the code. Having these two views eliminates any
374 // single address region having rwx permissions. An attacker could still write the writable
375 // address and then execute the executable address. We allocate the mappings with a random
376 // address relationship to each other which makes the attacker need two addresses rather than
377 // just one. In the debugging case there is no file descriptor to back the
378 // shared memory, and hence we have to use a single mapping.
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100379 // Mem map which holds data (stack maps and profiling info).
380 std::unique_ptr<MemMap> data_map_;
David Sehrd1dbb742017-07-17 11:20:38 -0700381 // Mem map which holds a non-writable view of code for JIT.
382 std::unique_ptr<MemMap> executable_code_map_;
383 // Mem map which holds a non-executable view of code for JIT.
384 std::unique_ptr<MemMap> writable_code_map_;
Orion Hodson56fe32e2017-07-21 11:42:10 +0100385 // Mem map which holds one executable page that we use for flushing instruction
386 // fetch buffers. The code on this page is never executed.
387 std::unique_ptr<MemMap> code_sync_map_;
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100388 // The opaque mspace for allocating code.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100389 void* code_mspace_ GUARDED_BY(lock_);
Nicolas Geoffray0c3c2662015-10-15 13:53:04 +0100390 // The opaque mspace for allocating data.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100391 void* data_mspace_ GUARDED_BY(lock_);
392 // Bitmap for collecting code and data.
393 std::unique_ptr<CodeCacheBitmap> live_bitmap_;
David Sehrd1dbb742017-07-17 11:20:38 -0700394 // Holds non-writable compiled code associated to the ArtMethod.
Nicolas Geoffray1dad3f62015-10-23 14:59:54 +0100395 SafeMap<const void*, ArtMethod*> method_code_map_ GUARDED_BY(lock_);
David Sehrd1dbb742017-07-17 11:20:38 -0700396 // Holds non-writable osr compiled code associated to the ArtMethod.
Nicolas Geoffrayb331feb2016-02-05 16:51:53 +0000397 SafeMap<ArtMethod*, const void*> osr_code_map_ GUARDED_BY(lock_);
Nicolas Geoffray26705e22015-10-28 12:50:11 +0000398 // ProfilingInfo objects we have allocated.
399 std::vector<ProfilingInfo*> profiling_infos_ GUARDED_BY(lock_);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800400
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000401 // The maximum capacity in bytes this code cache can go to.
402 size_t max_capacity_ GUARDED_BY(lock_);
403
404 // The current capacity in bytes of the code cache.
405 size_t current_capacity_ GUARDED_BY(lock_);
406
407 // The current footprint in bytes of the code portion of the code cache.
408 size_t code_end_ GUARDED_BY(lock_);
409
410 // The current footprint in bytes of the data portion of the code cache.
411 size_t data_end_ GUARDED_BY(lock_);
412
Nicolas Geoffray35122442016-03-02 12:05:30 +0000413 // Whether the last collection round increased the code cache.
414 bool last_collection_increased_code_cache_ GUARDED_BY(lock_);
Nicolas Geoffray0a3be162015-11-18 11:15:22 +0000415
Calin Juravle31f2c152015-10-23 17:56:15 +0100416 // Last time the the code_cache was updated.
Calin Juravle4d77b6a2015-12-01 18:38:09 +0000417 // It is atomic to avoid locking when reading it.
418 Atomic<uint64_t> last_update_time_ns_;
Calin Juravle31f2c152015-10-23 17:56:15 +0100419
Nicolas Geoffray169722b2017-02-27 14:01:59 +0000420 // Whether we can do garbage collection. Not 'const' as tests may override this.
421 bool garbage_collect_code_;
Nicolas Geoffraya25dce92016-01-12 16:41:10 +0000422
Nicolas Geoffray38ea9bd2016-02-19 16:25:57 +0000423 // The size in bytes of used memory for the data portion of the code cache.
424 size_t used_memory_for_data_ GUARDED_BY(lock_);
425
426 // The size in bytes of used memory for the code portion of the code cache.
427 size_t used_memory_for_code_ GUARDED_BY(lock_);
428
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000429 // Number of compilations done throughout the lifetime of the JIT.
430 size_t number_of_compilations_ GUARDED_BY(lock_);
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000431
432 // Number of compilations for on-stack-replacement done throughout the lifetime of the JIT.
Nicolas Geoffrayfcdd7292016-02-25 13:27:47 +0000433 size_t number_of_osr_compilations_ GUARDED_BY(lock_);
Nicolas Geoffray0a522232016-01-19 09:34:58 +0000434
Nicolas Geoffraybcd94c82016-03-03 13:23:33 +0000435 // Number of code cache collections done throughout the lifetime of the JIT.
436 size_t number_of_collections_ GUARDED_BY(lock_);
437
Nicolas Geoffray933330a2016-03-16 14:20:06 +0000438 // Histograms for keeping track of stack map size statistics.
439 Histogram<uint64_t> histogram_stack_map_memory_use_ GUARDED_BY(lock_);
440
441 // Histograms for keeping track of code size statistics.
442 Histogram<uint64_t> histogram_code_memory_use_ GUARDED_BY(lock_);
443
444 // Histograms for keeping track of profiling info statistics.
445 Histogram<uint64_t> histogram_profiling_info_memory_use_ GUARDED_BY(lock_);
446
Nicolas Geoffraye51ca8b2016-11-22 14:49:31 +0000447 // Whether the GC allows accessing weaks in inline caches. Note that this
448 // is not used by the concurrent collector, which uses
449 // Thread::SetWeakRefAccessEnabled instead.
450 Atomic<bool> is_weak_access_enabled_;
451
452 // Condition to wait on for accessing inline caches.
453 ConditionVariable inline_cache_cond_ GUARDED_BY(lock_);
454
Mathieu Chartier3130cdf2015-05-03 15:20:23 -0700455 DISALLOW_IMPLICIT_CONSTRUCTORS(JitCodeCache);
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800456};
457
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800458} // namespace jit
459} // namespace art
460
461#endif // ART_RUNTIME_JIT_JIT_CODE_CACHE_H_