blob: bd85cf63bd9b6b96ade38303ca17f4f23dff71ed [file] [log] [blame]
Elliott Hughes2faa5f12012-01-30 14:42:07 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
Carl Shapiro1fb86202011-06-27 17:43:13 -070016
Brian Carlstromfc0e3212013-07-17 14:40:12 -070017#ifndef ART_RUNTIME_RUNTIME_H_
18#define ART_RUNTIME_RUNTIME_H_
Carl Shapiro1fb86202011-06-27 17:43:13 -070019
Elliott Hughesa0e18062012-04-13 15:59:59 -070020#include <jni.h>
Elliott Hughesc5f7c912011-08-18 14:00:42 -070021#include <stdio.h>
22
Elliott Hughese27955c2011-08-26 15:21:24 -070023#include <iosfwd>
Hiroshi Yamauchi799eb3a2014-07-18 15:38:17 -070024#include <set>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070025#include <string>
Carl Shapirofc322c72011-07-27 00:20:01 -070026#include <utility>
Chang Xing16d1dd82017-07-20 17:56:26 -070027#include <memory>
Brian Carlstrom6ea095a2011-08-16 15:26:54 -070028#include <vector>
Brian Carlstrom578bbdc2011-07-21 14:07:47 -070029
Ian Rogersd582fa42014-11-05 23:46:43 -080030#include "arch/instruction_set.h"
Andreas Gampe44f67602018-11-28 08:27:27 -080031#include "base/locks.h"
Andreas Gampe794ad762015-02-23 08:12:24 -080032#include "base/macros.h"
Vladimir Markoc34bebf2018-08-16 16:12:49 +010033#include "base/mem_map.h"
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +010034#include "deoptimization_kind.h"
David Sehr9e734c72018-01-04 17:56:19 -080035#include "dex/dex_file_types.h"
Alex Lighteb7c1442015-08-31 13:17:42 -070036#include "experimental_flags.h"
Hiroshi Yamauchi94f7b492014-07-22 18:08:23 -070037#include "gc_root.h"
Ian Rogers62d6c772013-02-27 08:32:07 -080038#include "instrumentation.h"
Alex Light40320712017-12-14 11:52:04 -080039#include "jdwp_provider.h"
Mathieu Chartier8778c522016-10-04 19:06:30 -070040#include "obj_ptr.h"
Ian Rogers576ca0c2014-06-06 15:58:22 -070041#include "offsets.h"
Mathieu Chartierf8cb1782016-03-18 18:45:41 -070042#include "process_state.h"
Vladimir Marko7624d252014-05-02 14:40:15 +010043#include "quick/quick_method_frame_info.h"
Elliott Hughes9d5ccec2011-09-19 13:19:50 -070044#include "runtime_stats.h"
Carl Shapirob5573532011-07-12 18:22:59 -070045
Carl Shapiro1fb86202011-06-27 17:43:13 -070046namespace art {
47
Ian Rogers1d54e732013-05-02 21:10:01 -070048namespace gc {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080049class AbstractSystemWeakHolder;
50class Heap;
Ian Rogers576ca0c2014-06-06 15:58:22 -070051} // namespace gc
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080052
Mathew Inwooda5dc52c2018-02-19 15:30:51 +000053namespace hiddenapi {
54enum class EnforcementPolicy;
55} // namespace hiddenapi
56
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080057namespace jit {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080058class Jit;
Orion Hodsonad28f5e2018-10-17 09:08:17 +010059class JitCodeCache;
Igor Murashkin2ffb7032017-11-08 13:35:21 -080060class JitOptions;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -080061} // namespace jit
62
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080063namespace mirror {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080064class Array;
65class ClassLoader;
66class DexCache;
67template<class T> class ObjectArray;
68template<class T> class PrimitiveArray;
69typedef PrimitiveArray<int8_t> ByteArray;
70class String;
71class Throwable;
Ian Rogers2dd0e2c2013-01-24 12:42:14 -080072} // namespace mirror
Alex Light7233c7e2016-07-28 10:07:45 -070073namespace ti {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080074class Agent;
Andreas Gampeaadcbc62017-12-28 14:05:42 -080075class AgentSpec;
Alex Light7233c7e2016-07-28 10:07:45 -070076} // namespace ti
Mathieu Chartierc528dba2013-11-26 12:00:11 -080077namespace verifier {
Igor Murashkin2ffb7032017-11-08 13:35:21 -080078class MethodVerifier;
79enum class VerifyMode : int8_t;
Ian Rogers6a3c1fc2014-10-31 00:33:20 -070080} // namespace verifier
Mathieu Chartiere401d142015-04-22 13:56:20 -070081class ArenaPool;
82class ArtMethod;
Andreas Gampe8228cdf2017-05-30 15:03:54 -070083enum class CalleeSaveType: uint32_t;
Carl Shapiro61e019d2011-07-14 16:53:09 -070084class ClassLinker;
Mathieu Chartiere401d142015-04-22 13:56:20 -070085class CompilerCallbacks;
Carl Shapirofc322c72011-07-27 00:20:01 -070086class DexFile;
Elliott Hughescf4c6c42011-09-01 15:16:42 -070087class InternTable;
Andreas Gamped482e732017-04-24 17:59:09 -070088class IsMarkedVisitor;
Ian Rogersb48b9eb2014-02-28 16:20:21 -080089class JavaVMExt;
Mathieu Chartiere401d142015-04-22 13:56:20 -070090class LinearAlloc;
Elliott Hughesc33a32b2011-10-11 18:18:07 -070091class MonitorList;
Ian Rogersef7d42f2014-01-06 12:55:46 -080092class MonitorPool;
Ian Rogers576ca0c2014-06-06 15:58:22 -070093class NullPointerHandler;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -070094class OatFileManager;
Alex Light185d1342016-08-11 10:48:03 -070095class Plugin;
Vladimir Marko88b2b802015-12-04 14:19:04 +000096struct RuntimeArgumentMap;
Andreas Gampeac30fa22017-01-18 21:02:36 -080097class RuntimeCallbacks;
Elliott Hughese27955c2011-08-26 15:21:24 -070098class SignalCatcher;
Ian Rogers576ca0c2014-06-06 15:58:22 -070099class StackOverflowHandler;
100class SuspensionHandler;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700101class ThreadList;
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800102class ThreadPool;
jeffhao2692b572011-12-16 15:42:28 -0800103class Trace;
Andreas Gampef6a780a2015-04-02 18:51:05 -0700104struct TraceConfig;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100105class Transaction;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700106
Ian Rogerse63db272014-07-15 15:36:11 -0700107typedef std::vector<std::pair<std::string, const void*>> RuntimeOptions;
108
Carl Shapiro1fb86202011-06-27 17:43:13 -0700109class Runtime {
110 public:
Vladimir Marko88b2b802015-12-04 14:19:04 +0000111 // Parse raw runtime options.
112 static bool ParseOptions(const RuntimeOptions& raw_options,
113 bool ignore_unrecognized,
114 RuntimeArgumentMap* runtime_options);
115
Carl Shapiro61e019d2011-07-14 16:53:09 -0700116 // Creates and initializes a new runtime.
Vladimir Marko88b2b802015-12-04 14:19:04 +0000117 static bool Create(RuntimeArgumentMap&& runtime_options)
118 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
119
120 // Creates and initializes a new runtime.
121 static bool Create(const RuntimeOptions& raw_options, bool ignore_unrecognized)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700122 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700123
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800124 // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently.
125 bool IsAotCompiler() const {
Calin Juravleffc87072016-04-20 14:22:09 +0100126 return !UseJitCompilation() && IsCompiler();
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800127 }
128
129 // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT.
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800130 bool IsCompiler() const {
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000131 return compiler_callbacks_ != nullptr;
132 }
133
Andreas Gampe4585f872015-03-27 23:45:15 -0700134 // If a compiler, are we compiling a boot image?
135 bool IsCompilingBootImage() const;
136
137 bool CanRelocate() const;
Alex Lighta59dd802014-07-02 16:28:08 -0700138
139 bool ShouldRelocate() const {
140 return must_relocate_ && CanRelocate();
141 }
142
143 bool MustRelocateIfPossible() const {
144 return must_relocate_;
145 }
146
Alex Light64ad14d2014-08-19 14:23:13 -0700147 bool IsImageDex2OatEnabled() const {
148 return image_dex2oat_enabled_;
Nicolas Geoffray4fcdc942014-07-22 10:48:00 +0100149 }
150
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000151 CompilerCallbacks* GetCompilerCallbacks() {
152 return compiler_callbacks_;
Elliott Hughesd9c67be2012-02-02 19:54:06 -0800153 }
154
Mathieu Chartier07ddb6f2015-11-05 11:16:34 -0800155 void SetCompilerCallbacks(CompilerCallbacks* callbacks) {
156 CHECK(callbacks != nullptr);
157 compiler_callbacks_ = callbacks;
158 }
159
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700160 bool IsZygote() const {
161 return is_zygote_;
162 }
163
Anwar Ghuloum87183592013-08-14 12:12:19 -0700164 bool IsExplicitGcDisabled() const {
165 return is_explicit_gc_disabled_;
166 }
167
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700168 std::string GetCompilerExecutable() const;
169
Brian Carlstrom6449c622014-02-10 23:48:36 -0800170 const std::vector<std::string>& GetCompilerOptions() const {
171 return compiler_options_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -0700172 }
173
Vladimir Marko5c657fe2016-11-03 15:12:29 +0000174 void AddCompilerOption(const std::string& option) {
Andreas Gamped2abbc92014-12-19 09:53:27 -0800175 compiler_options_.push_back(option);
176 }
177
Brian Carlstrom6449c622014-02-10 23:48:36 -0800178 const std::vector<std::string>& GetImageCompilerOptions() const {
179 return image_compiler_options_;
Anwar Ghuloum8447d842013-04-30 17:27:40 -0700180 }
181
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700182 const std::string& GetImageLocation() const {
183 return image_location_;
184 }
185
Brian Carlstrom69b15fb2011-09-03 12:25:21 -0700186 // Starts a runtime, which may cause threads to be started and code to run.
Brian Carlstrombd86bcc2013-03-10 20:26:16 -0700187 bool Start() UNLOCK_FUNCTION(Locks::mutator_lock_);
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700188
Mathieu Chartier590fee92013-09-13 13:46:47 -0700189 bool IsShuttingDown(Thread* self);
Mathieu Chartier90443472015-07-16 20:32:27 -0700190 bool IsShuttingDownLocked() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers9af209c2012-06-03 20:50:30 -0700191 return shutting_down_;
192 }
193
Mathieu Chartier90443472015-07-16 20:32:27 -0700194 size_t NumberOfThreadsBeingBorn() const REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700195 return threads_being_born_;
196 }
197
Mathieu Chartier90443472015-07-16 20:32:27 -0700198 void StartThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_) {
Ian Rogers120f1c72012-09-28 17:17:10 -0700199 threads_being_born_++;
200 }
201
Mathieu Chartier90443472015-07-16 20:32:27 -0700202 void EndThreadBirth() REQUIRES(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -0700203
Ian Rogers9af209c2012-06-03 20:50:30 -0700204 bool IsStarted() const {
205 return started_;
206 }
Elliott Hughesdcc24742011-09-07 14:02:44 -0700207
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700208 bool IsFinishedStarting() const {
209 return finished_starting_;
210 }
211
Vladimir Markodcfcce42018-06-27 10:00:28 +0000212 void RunRootClinits(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_);
213
Carl Shapiro2ed144c2011-07-26 16:52:08 -0700214 static Runtime* Current() {
215 return instance_;
216 }
Carl Shapiro1fb86202011-06-27 17:43:13 -0700217
Nicolas Geoffraydb978712014-12-09 13:33:38 +0000218 // Aborts semi-cleanly. Used in the implementation of LOG(FATAL), which most
219 // callers should prefer.
Andreas Gampe90a32b12016-10-03 19:47:08 -0700220 NO_RETURN static void Abort(const char* msg) REQUIRES(!Locks::abort_lock_);
Elliott Hughesffe67362011-07-17 12:09:27 -0700221
Ian Rogers365c1022012-06-22 15:05:28 -0700222 // Returns the "main" ThreadGroup, used when attaching user threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700223 jobject GetMainThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700224
225 // Returns the "system" ThreadGroup, used when attaching our internal threads.
Brian Carlstrom034f76b2012-08-01 15:51:58 -0700226 jobject GetSystemThreadGroup() const;
Ian Rogers365c1022012-06-22 15:05:28 -0700227
Brian Carlstromce888532013-10-10 00:32:58 -0700228 // Returns the system ClassLoader which represents the CLASSPATH.
229 jobject GetSystemClassLoader() const;
230
Elliott Hughes462c9442012-03-23 18:47:50 -0700231 // Attaches the calling native thread to the runtime.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700232 bool AttachCurrentThread(const char* thread_name, bool as_daemon, jobject thread_group,
Mathieu Chartier664bebf2012-11-12 16:54:11 -0800233 bool create_peer);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700234
Elliott Hughesbf86d042011-08-31 17:53:14 -0700235 void CallExitHook(jint status);
236
Carl Shapiro61e019d2011-07-14 16:53:09 -0700237 // Detaches the current native thread from the runtime.
Mathieu Chartier90443472015-07-16 20:32:27 -0700238 void DetachCurrentThread() REQUIRES(!Locks::mutator_lock_);
Carl Shapiro61e019d2011-07-14 16:53:09 -0700239
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100240 void DumpDeoptimizations(std::ostream& os);
Ian Rogers7b078e82014-09-10 14:44:24 -0700241 void DumpForSigQuit(std::ostream& os);
Elliott Hughes21a5bf22011-12-07 14:35:20 -0800242 void DumpLockHolders(std::ostream& os);
Elliott Hughese27955c2011-08-26 15:21:24 -0700243
Carl Shapiro61e019d2011-07-14 16:53:09 -0700244 ~Runtime();
Carl Shapirob5573532011-07-12 18:22:59 -0700245
Vladimir Markod1908512018-11-22 14:57:28 +0000246 const std::vector<std::string>& GetBootClassPath() const {
247 return boot_class_path_;
248 }
249
250 const std::vector<std::string>& GetBootClassPathLocations() const {
251 DCHECK(boot_class_path_locations_.empty() ||
252 boot_class_path_locations_.size() == boot_class_path_.size());
253 return boot_class_path_locations_.empty() ? boot_class_path_ : boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -0800254 }
255
256 const std::string& GetClassPathString() const {
257 return class_path_string_;
Brian Carlstromb765be02011-08-17 23:54:10 -0700258 }
259
260 ClassLinker* GetClassLinker() const {
Carl Shapiro7a909592011-07-24 19:21:59 -0700261 return class_linker_;
262 }
263
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700264 size_t GetDefaultStackSize() const {
265 return default_stack_size_;
266 }
267
Ian Rogers1d54e732013-05-02 21:10:01 -0700268 gc::Heap* GetHeap() const {
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800269 return heap_;
270 }
271
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700272 InternTable* GetInternTable() const {
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700273 DCHECK(intern_table_ != nullptr);
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700274 return intern_table_;
275 }
276
Elliott Hughes0af55432011-08-17 18:37:28 -0700277 JavaVMExt* GetJavaVM() const {
Richard Uhlerda0a69e2016-10-11 15:06:38 +0100278 return java_vm_.get();
Elliott Hughesf2682d52011-08-15 16:37:04 -0700279 }
280
Hans Boehmb3da36c2016-12-15 13:12:59 -0800281 size_t GetMaxSpinsBeforeThinLockInflation() const {
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700282 return max_spins_before_thin_lock_inflation_;
283 }
284
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700285 MonitorList* GetMonitorList() const {
286 return monitor_list_;
287 }
288
Ian Rogersef7d42f2014-01-06 12:55:46 -0800289 MonitorPool* GetMonitorPool() const {
290 return monitor_pool_;
291 }
292
Ian Rogersc0542af2014-09-03 16:16:56 -0700293 // Is the given object the special object used to mark a cleared JNI weak global?
Mathieu Chartier8778c522016-10-04 19:06:30 -0700294 bool IsClearedJniWeakGlobal(ObjPtr<mirror::Object> obj) REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700295
296 // Get the special object used to mark a cleared JNI weak global.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700297 mirror::Object* GetClearedJniWeakGlobal() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersc0542af2014-09-03 16:16:56 -0700298
Roland Levillain7b0e8442018-04-11 18:27:47 +0100299 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingException()
300 REQUIRES_SHARED(Locks::mutator_lock_);
301 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenThrowingOOME()
302 REQUIRES_SHARED(Locks::mutator_lock_);
303 mirror::Throwable* GetPreAllocatedOutOfMemoryErrorWhenHandlingStackOverflow()
304 REQUIRES_SHARED(Locks::mutator_lock_);
Elliott Hughes225f5a12012-06-11 11:23:48 -0700305
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700306 mirror::Throwable* GetPreAllocatedNoClassDefFoundError()
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700307 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers63557452014-06-04 16:57:15 -0700308
Elliott Hughes225f5a12012-06-11 11:23:48 -0700309 const std::vector<std::string>& GetProperties() const {
310 return properties_;
311 }
312
Elliott Hughesd92bec42011-09-02 17:04:36 -0700313 ThreadList* GetThreadList() const {
314 return thread_list_;
315 }
316
Brian Carlstrom491ca9e2014-03-02 18:24:38 -0800317 static const char* GetVersion() {
Andreas Gampe2153f932014-06-26 08:09:17 -0700318 return "2.1.0";
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700319 }
320
Narayan Kamath25352fc2016-08-03 12:46:58 +0100321 bool IsMethodHandlesEnabled() const {
Narayan Kamath93206752017-01-17 13:20:55 +0000322 return true;
Narayan Kamath25352fc2016-08-03 12:46:58 +0100323 }
324
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700325 void DisallowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
326 void AllowNewSystemWeaks() REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi30493242016-11-03 13:06:52 -0700327 // broadcast_for_checkpoint is true when we broadcast for making blocking threads to respond to
328 // checkpoint requests. It's false when we broadcast to unblock blocking threads after system weak
329 // access is reenabled.
330 void BroadcastForNewSystemWeaks(bool broadcast_for_checkpoint = false);
Mathieu Chartierc11d9b82013-09-19 10:01:59 -0700331
Ian Rogers1d54e732013-05-02 21:10:01 -0700332 // Visit all the roots. If only_dirty is true then non-dirty roots won't be visited. If
333 // clean_dirty is true then dirty roots will be marked as non-dirty after visiting.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700334 void VisitRoots(RootVisitor* visitor, VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700335 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700336 REQUIRES_SHARED(Locks::mutator_lock_);
Brian Carlstrom1f870082011-08-23 16:02:11 -0700337
Mathieu Chartier461687d2015-03-31 12:05:24 -0700338 // Visit image roots, only used for hprof since the GC uses the image space mod union table
339 // instead.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700340 void VisitImageRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier461687d2015-03-31 12:05:24 -0700341
Roland Levillainef012222017-06-21 16:28:06 +0100342 // Visit all of the roots we can safely visit concurrently.
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700343 void VisitConcurrentRoots(RootVisitor* visitor,
344 VisitRootFlags flags = kVisitRootFlagAllRoots)
Andreas Gamped98b4ed2016-11-04 20:27:24 -0700345 REQUIRES(!Locks::classlinker_classes_lock_, !Locks::trace_lock_)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700346 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9ebae1f2012-10-15 17:38:16 -0700347
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700348 // Visit all of the non thread roots, we can do this with mutators unpaused.
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700349 void VisitNonThreadRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700350 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier858f1c52012-10-17 17:45:55 -0700351
Mathieu Chartierbb87e0f2015-04-03 11:21:55 -0700352 void VisitTransactionRoots(RootVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700353 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchi2cd334a2015-01-09 14:03:35 -0800354
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700355 // Sweep system weaks, the system weak is deleted if the visitor return null. Otherwise, the
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700356 // system weak is updated to be the visitor's returned value.
Mathieu Chartier97509952015-07-13 14:35:43 -0700357 void SweepSystemWeaks(IsMarkedVisitor* visitor)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700358 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier6aa3df92013-09-17 15:17:28 -0700359
Ian Rogers9af209c2012-06-03 20:50:30 -0700360 // Returns a special method that calls into a trampoline for runtime method resolution
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800361 ArtMethod* GetResolutionMethod();
Ian Rogers9af209c2012-06-03 20:50:30 -0700362
363 bool HasResolutionMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700364 return resolution_method_ != nullptr;
Ian Rogers9af209c2012-06-03 20:50:30 -0700365 }
366
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700367 void SetResolutionMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700368 void ClearResolutionMethod() {
369 resolution_method_ = nullptr;
370 }
Ian Rogers9af209c2012-06-03 20:50:30 -0700371
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700372 ArtMethod* CreateResolutionMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers19846512012-02-24 11:42:47 -0800373
Ian Rogerse63db272014-07-15 15:36:11 -0700374 // Returns a special method that calls into a trampoline for runtime imt conflicts.
Mathieu Chartierfbc31082016-01-24 11:59:56 -0800375 ArtMethod* GetImtConflictMethod();
376 ArtMethod* GetImtUnimplementedMethod();
Jeff Hao88474b42013-10-23 16:24:40 -0700377
378 bool HasImtConflictMethod() const {
Mathieu Chartiere401d142015-04-22 13:56:20 -0700379 return imt_conflict_method_ != nullptr;
Jeff Hao88474b42013-10-23 16:24:40 -0700380 }
381
Igor Murashkin8275fba2017-05-02 15:58:02 -0700382 void ClearImtConflictMethod() {
383 imt_conflict_method_ = nullptr;
384 }
385
Mathieu Chartiere42888f2016-04-14 10:49:19 -0700386 void FixupConflictTables();
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700387 void SetImtConflictMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
388 void SetImtUnimplementedMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700389
Nicolas Geoffray796d6302016-03-13 22:22:31 +0000390 ArtMethod* CreateImtConflictMethod(LinearAlloc* linear_alloc)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700391 REQUIRES_SHARED(Locks::mutator_lock_);
Jeff Hao88474b42013-10-23 16:24:40 -0700392
Igor Murashkin8275fba2017-05-02 15:58:02 -0700393 void ClearImtUnimplementedMethod() {
394 imt_unimplemented_method_ = nullptr;
395 }
396
Ian Rogers9af209c2012-06-03 20:50:30 -0700397 bool HasCalleeSaveMethod(CalleeSaveType type) const {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700398 return callee_save_methods_[static_cast<size_t>(type)] != 0u;
Ian Rogers9af209c2012-06-03 20:50:30 -0700399 }
400
Mathieu Chartiere401d142015-04-22 13:56:20 -0700401 ArtMethod* GetCalleeSaveMethod(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700402 REQUIRES_SHARED(Locks::mutator_lock_);
Hiroshi Yamauchiab088112014-07-14 13:00:14 -0700403
Mathieu Chartiere401d142015-04-22 13:56:20 -0700404 ArtMethod* GetCalleeSaveMethodUnchecked(CalleeSaveType type)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700405 REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogers9af209c2012-06-03 20:50:30 -0700406
Mathieu Chartiere401d142015-04-22 13:56:20 -0700407 QuickMethodFrameInfo GetRuntimeMethodFrameInfo(ArtMethod* method)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700408 REQUIRES_SHARED(Locks::mutator_lock_);
Vladimir Marko7624d252014-05-02 14:40:15 +0100409
David Srbecky56de89a2018-10-01 15:32:20 +0100410 static constexpr size_t GetCalleeSaveMethodOffset(CalleeSaveType type) {
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700411 return OFFSETOF_MEMBER(Runtime, callee_save_methods_[static_cast<size_t>(type)]);
Ian Rogers936b37f2014-02-14 00:52:24 -0800412 }
413
Vladimir Marko7624d252014-05-02 14:40:15 +0100414 InstructionSet GetInstructionSet() const {
415 return instruction_set_;
416 }
417
418 void SetInstructionSet(InstructionSet instruction_set);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700419 void ClearInstructionSet();
Vladimir Marko7624d252014-05-02 14:40:15 +0100420
Mathieu Chartiere401d142015-04-22 13:56:20 -0700421 void SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type);
Igor Murashkin8275fba2017-05-02 15:58:02 -0700422 void ClearCalleeSaveMethods();
Ian Rogers4f0d07c2011-10-06 23:38:47 -0700423
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700424 ArtMethod* CreateCalleeSaveMethod() REQUIRES_SHARED(Locks::mutator_lock_);
Ian Rogersff1ed472011-09-20 13:46:24 -0700425
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700426 int32_t GetStat(int kind);
427
Ian Rogers9af209c2012-06-03 20:50:30 -0700428 RuntimeStats* GetStats() {
429 return &stats_;
430 }
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700431
432 bool HasStatsEnabled() const {
433 return stats_enabled_;
434 }
435
436 void ResetStats(int kinds);
437
Mathieu Chartier90443472015-07-16 20:32:27 -0700438 void SetStatsEnabled(bool new_state)
439 REQUIRES(!Locks::instrument_entrypoints_lock_, !Locks::mutator_lock_);
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700440
Andreas Gampe6be67ee2014-09-02 21:22:18 -0700441 enum class NativeBridgeAction { // private
442 kUnload,
443 kInitialize
444 };
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800445
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000446 jit::Jit* GetJit() const {
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800447 return jit_.get();
448 }
Calin Juravleffc87072016-04-20 14:22:09 +0100449
450 // Returns true if JIT compilations are enabled. GetJit() will be not null in this case.
451 bool UseJitCompilation() const;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800452
Narayan Kamath3de95a72014-04-02 12:54:23 +0100453 void PreZygoteFork();
Nicolas Geoffrayce9ed362018-11-29 03:19:28 +0000454 void PostZygoteFork();
Nicolas Geoffrayd66c8622015-12-11 14:59:16 +0000455 void InitNonZygoteOrPostFork(
Calin Juravle016fcbe22018-05-03 19:47:35 -0700456 JNIEnv* env,
457 bool is_system_server,
458 NativeBridgeAction action,
459 const char* isa,
460 bool profile_system_server = false);
Brian Carlstromcaabb1b2011-10-11 18:09:13 -0700461
Ian Rogers9bc54402014-04-17 16:40:01 -0700462 const instrumentation::Instrumentation* GetInstrumentation() const {
463 return &instrumentation_;
464 }
465
Ian Rogers62d6c772013-02-27 08:32:07 -0800466 instrumentation::Instrumentation* GetInstrumentation() {
467 return &instrumentation_;
468 }
jeffhao2692b572011-12-16 15:42:28 -0800469
Calin Juravle66f55232015-12-08 15:09:10 +0000470 void RegisterAppInfo(const std::vector<std::string>& code_paths,
Calin Juravle77651c42017-03-03 18:04:02 -0800471 const std::string& profile_output_filename);
Dave Allison0aded082013-11-07 13:15:11 -0800472
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100473 // Transaction support.
Chang Xing605fe242017-07-20 15:57:21 -0700474 bool IsActiveTransaction() const;
Chang Xing16d1dd82017-07-20 17:56:26 -0700475 void EnterTransactionMode();
Chang Xing5a906fc2017-07-26 15:01:16 -0700476 void EnterTransactionMode(bool strict, mirror::Class* root);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100477 void ExitTransactionMode();
Chang Xing605fe242017-07-20 15:57:21 -0700478 void RollbackAllTransactions() REQUIRES_SHARED(Locks::mutator_lock_);
Chang Xing16d1dd82017-07-20 17:56:26 -0700479 // Transaction rollback and exit transaction are always done together, it's convenience to
480 // do them in one function.
481 void RollbackAndExitTransactionMode() REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100482 bool IsTransactionAborted() const;
Chang Xing605fe242017-07-20 15:57:21 -0700483 const std::unique_ptr<Transaction>& GetTransaction() const;
Chang Xing5a906fc2017-07-26 15:01:16 -0700484 bool IsActiveStrictTransactionMode() const;
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100485
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200486 void AbortTransactionAndThrowAbortError(Thread* self, const std::string& abort_message)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700487 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz2fd7e692015-04-02 11:11:19 +0200488 void ThrowTransactionAbortError(Thread* self)
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700489 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertz1c80bec2015-02-03 11:58:06 +0100490
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700491 void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700492 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700493 void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700494 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700495 void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700496 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700497 void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
Fred Shih37f05ef2014-07-16 18:38:08 -0700498 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700499 void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100500 bool is_volatile) const;
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700501 void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100502 bool is_volatile) const;
Mathieu Chartiera058fdf2016-10-06 15:13:58 -0700503 void RecordWriteFieldReference(mirror::Object* obj,
504 MemberOffset field_offset,
505 ObjPtr<mirror::Object> value,
506 bool is_volatile) const
507 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100508 void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) const
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700509 REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700510 void RecordStrongStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700511 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700512 void RecordWeakStringInsertion(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700513 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700514 void RecordStrongStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700515 REQUIRES(Locks::intern_table_lock_);
Mathieu Chartier9e868092016-10-31 14:58:04 -0700516 void RecordWeakStringRemoval(ObjPtr<mirror::String> s) const
Mathieu Chartier90443472015-07-16 20:32:27 -0700517 REQUIRES(Locks::intern_table_lock_);
Andreas Gampe8a0128a2016-11-28 07:38:35 -0800518 void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx) const
Mathieu Chartierbb816d62016-09-07 10:17:46 -0700519 REQUIRES_SHARED(Locks::mutator_lock_);
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100520
Andreas Gampe44f67602018-11-28 08:27:27 -0800521 void SetFaultMessage(const std::string& message);
Mathieu Chartier15d34022014-02-26 17:16:38 -0800522
Igor Murashkinb1d8c312015-08-04 11:18:43 -0700523 void AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::string>* arg_vector) const;
Ian Rogers8afeb852014-04-02 14:55:49 -0700524
Dave Allisonb373e092014-02-20 16:06:36 -0800525 bool ExplicitStackOverflowChecks() const {
Andreas Gampe928f72b2014-09-09 19:53:48 -0700526 return !implicit_so_checks_;
Dave Allisonb373e092014-02-20 16:06:36 -0800527 }
528
Nicolas Geoffray68bf3902017-09-07 14:40:48 +0100529 void DisableVerifier();
Igor Murashkin7617abd2015-07-10 18:27:47 -0700530 bool IsVerificationEnabled() const;
531 bool IsVerificationSoftFail() const;
Jeff Hao4a200f52014-04-01 14:58:49 -0700532
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000533 void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) {
534 hidden_api_policy_ = policy;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000535 }
536
Mathew Inwooda5dc52c2018-02-19 15:30:51 +0000537 hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const {
538 return hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +0000539 }
540
Mathew Inwood3383aa52018-03-16 14:18:33 +0000541 void SetHiddenApiExemptions(const std::vector<std::string>& exemptions) {
542 hidden_api_exemptions_ = exemptions;
543 }
544
545 const std::vector<std::string>& GetHiddenApiExemptions() {
546 return hidden_api_exemptions_;
547 }
548
David Brazdilee7d2fd2018-01-20 17:25:23 +0000549 void SetDedupeHiddenApiWarnings(bool value) {
550 dedupe_hidden_api_warnings_ = value;
551 }
552
553 bool ShouldDedupeHiddenApiWarnings() {
554 return dedupe_hidden_api_warnings_;
555 }
556
Mathew Inwood73ddda42018-04-03 15:32:32 +0100557 void SetHiddenApiEventLogSampleRate(uint32_t rate) {
558 hidden_api_access_event_log_rate_ = rate;
559 }
560
561 uint32_t GetHiddenApiEventLogSampleRate() const {
562 return hidden_api_access_event_log_rate_;
563 }
564
Mathew Inwood5bcef172018-05-01 14:40:12 +0100565 const std::string& GetProcessPackageName() const {
566 return process_package_name_;
567 }
568
569 void SetProcessPackageName(const char* package_name) {
570 if (package_name == nullptr) {
571 process_package_name_.clear();
572 } else {
573 process_package_name_ = package_name;
574 }
575 }
576
Jean Christophe Beyler24e04aa2014-09-12 12:03:25 -0700577 bool IsDexFileFallbackEnabled() const {
578 return allow_dex_file_fallback_;
579 }
580
Dmitriy Ivanov785049f2014-07-18 10:08:57 -0700581 const std::vector<std::string>& GetCpuAbilist() const {
582 return cpu_abilist_;
583 }
584
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700585 bool IsRunningOnMemoryTool() const {
586 return is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -0700587 }
588
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000589 void SetTargetSdkVersion(uint32_t version) {
Jeff Haof00571c2014-05-29 17:29:47 -0700590 target_sdk_version_ = version;
591 }
592
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000593 uint32_t GetTargetSdkVersion() const {
Jeff Haof00571c2014-05-29 17:29:47 -0700594 return target_sdk_version_;
595 }
596
Narayan Kamath5a2be3f2015-02-16 13:51:51 +0000597 uint32_t GetZygoteMaxFailedBoots() const {
598 return zygote_max_failed_boots_;
599 }
600
Alex Lighteb7c1442015-08-31 13:17:42 -0700601 bool AreExperimentalFlagsEnabled(ExperimentalFlags flags) {
602 return (experimental_flags_ & flags) != ExperimentalFlags::kNone;
Igor Murashkin158f35c2015-06-10 15:55:30 -0700603 }
604
Orion Hodsonad28f5e2018-10-17 09:08:17 +0100605 void CreateJitCodeCache(bool rwx_memory_allowed);
606
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700607 // Create the JIT and instrumentation and code cache.
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800608 void CreateJit();
609
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700610 ArenaPool* GetArenaPool() {
611 return arena_pool_.get();
612 }
Nicolas Geoffray25e04562016-03-01 13:17:58 +0000613 ArenaPool* GetJitArenaPool() {
614 return jit_arena_pool_.get();
615 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700616 const ArenaPool* GetArenaPool() const {
617 return arena_pool_.get();
618 }
Jean-Philippe Halimica76a1a2016-02-02 19:48:52 +0100619
620 void ReclaimArenaPoolMemory();
621
Mathieu Chartierc7853442015-03-27 14:35:38 -0700622 LinearAlloc* GetLinearAlloc() {
623 return linear_alloc_.get();
624 }
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700625
Mathieu Chartier455f67c2015-03-17 13:48:29 -0700626 jit::JitOptions* GetJITOptions() {
627 return jit_options_.get();
628 }
629
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000630 bool IsJavaDebuggable() const {
631 return is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -0800632 }
633
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000634 void SetJavaDebuggable(bool value);
635
636 // Deoptimize the boot image, called for Java debuggable apps.
637 void DeoptimizeBootImage();
Alex Light6b16d892016-11-11 11:21:04 -0800638
David Srbeckyf4480162016-03-16 00:06:24 +0000639 bool IsNativeDebuggable() const {
640 return is_native_debuggable_;
641 }
642
643 void SetNativeDebuggable(bool value) {
644 is_native_debuggable_ = value;
645 }
646
Alex Light0aa7a5a2018-10-10 15:58:14 +0000647 bool AreNonStandardExitsEnabled() const {
648 return non_standard_exits_enabled_;
649 }
650
651 void SetNonStandardExitsEnabled() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100652 DoAndMaybeSwitchInterpreter([=](){ non_standard_exits_enabled_ = true; });
Alex Light0aa7a5a2018-10-10 15:58:14 +0000653 }
654
Alex Light7919db92017-11-29 09:00:55 -0800655 bool AreAsyncExceptionsThrown() const {
656 return async_exceptions_thrown_;
657 }
658
659 void SetAsyncExceptionsThrown() {
David Srbecky28f6cff2018-10-16 15:07:28 +0100660 DoAndMaybeSwitchInterpreter([=](){ async_exceptions_thrown_ = true; });
Alex Light7919db92017-11-29 09:00:55 -0800661 }
662
David Srbecky28f6cff2018-10-16 15:07:28 +0100663 // Change state and re-check which interpreter should be used.
664 //
665 // This must be called whenever there is an event that forces
666 // us to use different interpreter (e.g. debugger is attached).
667 //
668 // Changing the state using the lamda gives us some multihreading safety.
669 // It ensures that two calls do not interfere with each other and
670 // it makes it possible to DCHECK that thread local flag is correct.
671 template<typename Action>
David Srbecky01e1d322018-10-29 19:48:58 +0000672 static void DoAndMaybeSwitchInterpreter(Action lambda);
David Srbecky28f6cff2018-10-16 15:07:28 +0100673
Andreas Gampedd671252015-07-23 14:37:18 -0700674 // Returns the build fingerprint, if set. Otherwise an empty string is returned.
675 std::string GetFingerprint() {
676 return fingerprint_;
677 }
678
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700679 // Called from class linker.
Andreas Gampebdf7f1c2016-08-30 16:38:47 -0700680 void SetSentinel(mirror::Object* sentinel) REQUIRES_SHARED(Locks::mutator_lock_);
Roland Levillain0e155592018-11-05 18:31:49 +0000681 // For testing purpose only.
682 // TODO: Remove this when this is no longer needed (b/116087961).
683 GcRoot<mirror::Object> GetSentinel() REQUIRES_SHARED(Locks::mutator_lock_);
Mathieu Chartier673ed3d2015-08-28 14:56:43 -0700684
Mathieu Chartier951ec2c2015-09-22 08:50:05 -0700685 // Create a normal LinearAlloc or low 4gb version if we are 64 bit AOT compiler.
686 LinearAlloc* CreateLinearAlloc();
687
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700688 OatFileManager& GetOatFileManager() const {
689 DCHECK(oat_file_manager_ != nullptr);
Mathieu Chartiere58991b2015-10-13 07:59:34 -0700690 return *oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -0700691 }
692
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -0700693 double GetHashTableMinLoadFactor() const;
694 double GetHashTableMaxLoadFactor() const;
695
Andreas Gampe83e20492018-11-07 11:12:26 -0800696 bool IsSafeMode() const {
Andreas Gampe83e20492018-11-07 11:12:26 -0800697 return safe_mode_;
698 }
699
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +0000700 void SetSafeMode(bool mode) {
701 safe_mode_ = mode;
702 }
703
Nicolas Geoffraya73280d2016-02-15 13:05:16 +0000704 bool GetDumpNativeStackOnSigQuit() const {
705 return dump_native_stack_on_sig_quit_;
706 }
707
Andreas Gampea1425a12016-03-11 17:44:04 -0800708 bool GetPrunedDalvikCache() const {
709 return pruned_dalvik_cache_;
710 }
711
712 void SetPrunedDalvikCache(bool pruned) {
713 pruned_dalvik_cache_ = pruned;
714 }
715
Mathieu Chartierf8cb1782016-03-18 18:45:41 -0700716 void UpdateProcessState(ProcessState process_state);
717
718 // Returns true if we currently care about long mutator pause.
719 bool InJankPerceptibleProcessState() const {
720 return process_state_ == kProcessStateJankPerceptible;
721 }
722
Calin Juravle97cbc922016-04-15 16:16:35 +0100723 void RegisterSensitiveThread() const;
724
Andreas Gampef38a6612016-04-11 08:42:26 -0700725 void SetZygoteNoThreadSection(bool val) {
726 zygote_no_threads_ = val;
727 }
728
729 bool IsZygoteNoThreadSection() const {
730 return zygote_no_threads_;
731 }
732
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000733 // Returns if the code can be deoptimized asynchronously. Code may be compiled with some
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700734 // optimization that makes it impossible to deoptimize.
Nicolas Geoffray433b79a2017-01-30 20:54:45 +0000735 bool IsAsyncDeoptimizeable(uintptr_t code) const REQUIRES_SHARED(Locks::mutator_lock_);
Mingyao Yangf711f2c2016-05-23 12:29:39 -0700736
David Sehrd106d9f2016-08-16 19:22:57 -0700737 // Returns a saved copy of the environment (getenv/setenv values).
738 // Used by Fork to protect against overwriting LD_LIBRARY_PATH, etc.
739 char** GetEnvSnapshot() const {
740 return env_snapshot_.GetSnapshot();
741 }
742
Andreas Gampefda57142016-09-08 20:29:18 -0700743 void AddSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
744 void RemoveSystemWeakHolder(gc::AbstractSystemWeakHolder* holder);
745
Alex Lightf889c702018-02-23 15:25:45 -0800746 void AttachAgent(JNIEnv* env, const std::string& agent_arg, jobject class_loader);
Leonard Mosescueb842212016-10-06 17:26:36 -0700747
Andreas Gampeaadcbc62017-12-28 14:05:42 -0800748 const std::list<std::unique_ptr<ti::Agent>>& GetAgents() const {
Alex Light65af20b2017-04-20 09:15:08 -0700749 return agents_;
750 }
751
Andreas Gampeac30fa22017-01-18 21:02:36 -0800752 RuntimeCallbacks* GetRuntimeCallbacks();
Andreas Gampe04bbb5b2017-01-19 17:49:03 +0000753
Alex Light77fee872017-09-05 14:51:49 -0700754 bool HasLoadedPlugins() const {
755 return !plugins_.empty();
756 }
757
Andreas Gampebad529d2017-02-13 18:52:10 -0800758 void InitThreadGroups(Thread* self);
759
Mathieu Chartier1d495012017-04-11 17:50:00 -0700760 void SetDumpGCPerformanceOnShutdown(bool value) {
761 dump_gc_performance_on_shutdown_ = value;
762 }
763
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +0100764 void IncrementDeoptimizationCount(DeoptimizationKind kind) {
765 DCHECK_LE(kind, DeoptimizationKind::kLast);
766 deoptimization_counts_[static_cast<size_t>(kind)]++;
767 }
768
Nicolas Geoffrayb9bec2e2017-05-24 15:59:18 +0100769 uint32_t GetNumberOfDeoptimizations() const {
770 uint32_t result = 0;
771 for (size_t i = 0; i <= static_cast<size_t>(DeoptimizationKind::kLast); ++i) {
772 result += deoptimization_counts_[i];
773 }
774 return result;
775 }
776
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -0700777 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
778 // This is beneficial for low RAM devices since it reduces page cache thrashing.
779 bool MAdviseRandomAccess() const {
780 return madvise_random_access_;
781 }
782
Alex Light40320712017-12-14 11:52:04 -0800783 const std::string& GetJdwpOptions() {
784 return jdwp_options_;
785 }
786
787 JdwpProvider GetJdwpProvider() const {
788 return jdwp_provider_;
789 }
790
Andreas Gampe0b0ffc12018-08-01 14:41:27 -0700791 uint32_t GetVerifierLoggingThresholdMs() const {
792 return verifier_logging_threshold_ms_;
793 }
794
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800795 ThreadPool* GetThreadPool() {
796 return thread_pool_.get();
797 }
798
Carl Shapirob5573532011-07-12 18:22:59 -0700799 private:
Elliott Hughes457005c2012-04-16 13:54:25 -0700800 static void InitPlatformSignalHandlers();
Elliott Hughesffe67362011-07-17 12:09:27 -0700801
Elliott Hughesdcc24742011-09-07 14:02:44 -0700802 Runtime();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700803
Elliott Hughesc1674ed2011-08-25 18:09:09 -0700804 void BlockSignals();
805
Vladimir Marko88b2b802015-12-04 14:19:04 +0000806 bool Init(RuntimeArgumentMap&& runtime_options)
Ian Rogersb726dcb2012-09-05 08:57:23 -0700807 SHARED_TRYLOCK_FUNCTION(true, Locks::mutator_lock_);
Mathieu Chartier90443472015-07-16 20:32:27 -0700808 void InitNativeMethods() REQUIRES(!Locks::mutator_lock_);
Elliott Hughesff17f1f2012-01-24 18:12:29 -0800809 void RegisterRuntimeNativeMethods(JNIEnv* env);
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700810
Elliott Hughes85d15452011-09-16 17:33:01 -0700811 void StartDaemonThreads();
Elliott Hughesd1cc8362011-10-24 16:58:50 -0700812 void StartSignalCatcher();
Carl Shapiro61e019d2011-07-14 16:53:09 -0700813
Calin Juravle31f2c152015-10-23 17:56:15 +0100814 void MaybeSaveJitProfilingInfo();
815
Andreas Gampe585da952016-12-02 14:52:29 -0800816 // Visit all of the thread roots.
817 void VisitThreadRoots(RootVisitor* visitor, VisitRootFlags flags)
818 REQUIRES_SHARED(Locks::mutator_lock_);
819
820 // Visit all other roots which must be done with mutators suspended.
821 void VisitNonConcurrentRoots(RootVisitor* visitor, VisitRootFlags flags)
822 REQUIRES_SHARED(Locks::mutator_lock_);
823
824 // Constant roots are the roots which never change after the runtime is initialized, they only
825 // need to be visited once per GC cycle.
826 void VisitConstantRoots(RootVisitor* visitor)
827 REQUIRES_SHARED(Locks::mutator_lock_);
828
Andreas Gampe44f67602018-11-28 08:27:27 -0800829 // Note: To be lock-free, GetFaultMessage temporarily replaces the lock message with null.
830 // As such, there is a window where a call will return an empty string. In general,
831 // only aborting code should retrieve this data (via GetFaultMessageForAbortLogging
832 // friend).
833 std::string GetFaultMessage();
834
Mathieu Chartier2cebb242015-04-21 16:50:40 -0700835 // A pointer to the active runtime or null.
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800836 static Runtime* instance_;
837
Ian Rogers8afeb852014-04-02 14:55:49 -0700838 // NOTE: these must match the gc::ProcessState values as they come directly from the framework.
839 static constexpr int kProfileForground = 0;
Calin Juravle31f2c152015-10-23 17:56:15 +0100840 static constexpr int kProfileBackground = 1;
Ian Rogers8afeb852014-04-02 14:55:49 -0700841
Mingyao Yang0a87a652017-04-12 13:43:15 -0700842 static constexpr uint32_t kCalleeSaveSize = 6u;
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700843
Mathieu Chartiere401d142015-04-22 13:56:20 -0700844 // 64 bit so that we can share the same asm offsets for both 32 and 64 bits.
Andreas Gampe8228cdf2017-05-30 15:03:54 -0700845 uint64_t callee_save_methods_[kCalleeSaveSize];
Roland Levillain7b0e8442018-04-11 18:27:47 +0100846 // Pre-allocated exceptions (see Runtime::Init).
847 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_exception_;
848 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_throwing_oome_;
849 GcRoot<mirror::Throwable> pre_allocated_OutOfMemoryError_when_handling_stack_overflow_;
Ian Rogers63557452014-06-04 16:57:15 -0700850 GcRoot<mirror::Throwable> pre_allocated_NoClassDefFoundError_;
Mathieu Chartiere401d142015-04-22 13:56:20 -0700851 ArtMethod* resolution_method_;
852 ArtMethod* imt_conflict_method_;
Mathieu Chartier2d2621a2014-10-23 16:48:06 -0700853 // Unresolved method has the same behavior as the conflict method, it is used by the class linker
854 // for differentiating between unfilled imt slots vs conflict slots in superclasses.
Mathieu Chartiere401d142015-04-22 13:56:20 -0700855 ArtMethod* imt_unimplemented_method_;
Ian Rogers6ed19fd2014-03-20 08:10:17 -0700856
Ian Rogersc0542af2014-09-03 16:16:56 -0700857 // Special sentinel object used to invalid conditions in JNI (cleared weak references) and
858 // JDWP (invalid references).
859 GcRoot<mirror::Object> sentinel_;
860
Vladimir Marko7624d252014-05-02 14:40:15 +0100861 InstructionSet instruction_set_;
Vladimir Marko7624d252014-05-02 14:40:15 +0100862
Vladimir Marko2b5eaa22013-12-13 13:59:30 +0000863 CompilerCallbacks* compiler_callbacks_;
Elliott Hughes9ca7a1f2011-10-11 14:29:52 -0700864 bool is_zygote_;
Alex Lighta59dd802014-07-02 16:28:08 -0700865 bool must_relocate_;
Mathieu Chartier069387a2012-06-18 12:01:01 -0700866 bool is_concurrent_gc_enabled_;
Anwar Ghuloum87183592013-08-14 12:12:19 -0700867 bool is_explicit_gc_disabled_;
Alex Light64ad14d2014-08-19 14:23:13 -0700868 bool image_dex2oat_enabled_;
Brian Carlstrom0a5b14d2011-09-27 13:29:15 -0700869
Tsu Chiang Chuang12e6d742014-05-22 10:22:25 -0700870 std::string compiler_executable_;
Brian Carlstrom6449c622014-02-10 23:48:36 -0800871 std::vector<std::string> compiler_options_;
872 std::vector<std::string> image_compiler_options_;
Brian Carlstrom31d8f522014-09-29 11:22:54 -0700873 std::string image_location_;
Dragos Sbirlea7467ee02013-06-21 09:20:34 -0700874
Vladimir Markod1908512018-11-22 14:57:28 +0000875 std::vector<std::string> boot_class_path_;
876 std::vector<std::string> boot_class_path_locations_;
Brian Carlstroma004aa92012-02-08 18:05:09 -0800877 std::string class_path_string_;
Elliott Hughes7ede61e2011-09-14 18:18:06 -0700878 std::vector<std::string> properties_;
879
Andreas Gampeaadcbc62017-12-28 14:05:42 -0800880 std::list<ti::AgentSpec> agent_specs_;
881 std::list<std::unique_ptr<ti::Agent>> agents_;
Alex Light185d1342016-08-11 10:48:03 -0700882 std::vector<Plugin> plugins_;
Alex Light7233c7e2016-07-28 10:07:45 -0700883
Brian Carlstromb765be02011-08-17 23:54:10 -0700884 // The default stack size for managed threads created by the runtime.
Elliott Hughesbe759c62011-09-08 19:38:21 -0700885 size_t default_stack_size_;
Brian Carlstromb765be02011-08-17 23:54:10 -0700886
Ian Rogers1d54e732013-05-02 21:10:01 -0700887 gc::Heap* heap_;
Elliott Hughesb3bd5f02012-03-08 21:05:27 -0800888
Nicolas Geoffray25e04562016-03-01 13:17:58 +0000889 std::unique_ptr<ArenaPool> jit_arena_pool_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700890 std::unique_ptr<ArenaPool> arena_pool_;
Mathieu Chartierc7853442015-03-27 14:35:38 -0700891 // Special low 4gb pool for compiler linear alloc. We need ArtFields to be in low 4gb if we are
892 // compiling using a 32 bit image on a 64 bit compiler in case we resolve things in the image
893 // since the field arrays are int arrays in this case.
894 std::unique_ptr<ArenaPool> low_4gb_arena_pool_;
895
896 // Shared linear alloc for now.
897 std::unique_ptr<LinearAlloc> linear_alloc_;
Mathieu Chartier9b34b242015-03-09 11:30:17 -0700898
Mathieu Chartierc6068c72018-11-13 16:00:58 -0800899 // Thread pool
900 std::unique_ptr<ThreadPool> thread_pool_;
901
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700902 // The number of spins that are done before thread suspension is used to forcibly inflate.
903 size_t max_spins_before_thin_lock_inflation_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700904 MonitorList* monitor_list_;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800905 MonitorPool* monitor_pool_;
Elliott Hughesc33a32b2011-10-11 18:18:07 -0700906
Carl Shapirob5573532011-07-12 18:22:59 -0700907 ThreadList* thread_list_;
Carl Shapiro61e019d2011-07-14 16:53:09 -0700908
Elliott Hughescf4c6c42011-09-01 15:16:42 -0700909 InternTable* intern_table_;
910
Brian Carlstromb0460ea2011-07-29 10:08:05 -0700911 ClassLinker* class_linker_;
912
Elliott Hughese27955c2011-08-26 15:21:24 -0700913 SignalCatcher* signal_catcher_;
Narayan Kamatheb710332017-05-10 11:48:46 +0100914
Richard Uhlerda0a69e2016-10-11 15:06:38 +0100915 std::unique_ptr<JavaVMExt> java_vm_;
Elliott Hughesf2682d52011-08-15 16:37:04 -0700916
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800917 std::unique_ptr<jit::Jit> jit_;
Orion Hodsonad28f5e2018-10-17 09:08:17 +0100918 std::unique_ptr<jit::JitCodeCache> jit_code_cache_;
Mathieu Chartiere5f13e52015-02-24 09:37:21 -0800919 std::unique_ptr<jit::JitOptions> jit_options_;
920
Andreas Gampe44f67602018-11-28 08:27:27 -0800921 // Fault message, printed when we get a SIGSEGV. Stored as a native-heap object and accessed
922 // lock-free, so needs to be atomic.
923 std::atomic<std::string*> fault_message_;
Mathieu Chartier15d34022014-02-26 17:16:38 -0800924
Ian Rogers120f1c72012-09-28 17:17:10 -0700925 // A non-zero value indicates that a thread has been created but not yet initialized. Guarded by
926 // the shutdown lock so that threads aren't born while we're shutting down.
927 size_t threads_being_born_ GUARDED_BY(Locks::runtime_shutdown_lock_);
928
929 // Waited upon until no threads are being born.
Ian Rogers700a4022014-05-19 16:49:03 -0700930 std::unique_ptr<ConditionVariable> shutdown_cond_ GUARDED_BY(Locks::runtime_shutdown_lock_);
Ian Rogers120f1c72012-09-28 17:17:10 -0700931
932 // Set when runtime shutdown is past the point that new threads may attach.
933 bool shutting_down_ GUARDED_BY(Locks::runtime_shutdown_lock_);
934
935 // The runtime is starting to shutdown but is blocked waiting on shutdown_cond_.
936 bool shutting_down_started_ GUARDED_BY(Locks::runtime_shutdown_lock_);
937
Elliott Hughesdcc24742011-09-07 14:02:44 -0700938 bool started_;
939
Mathieu Chartier7664f5c2012-06-08 18:15:32 -0700940 // New flag added which tells us if the runtime has finished starting. If
941 // this flag is set then the Daemon threads are created and the class loader
942 // is created. This flag is needed for knowing if its safe to request CMS.
943 bool finished_starting_;
944
Brian Carlstrom6ea095a2011-08-16 15:26:54 -0700945 // Hooks supported by JNI_CreateJavaVM
946 jint (*vfprintf_)(FILE* stream, const char* format, va_list ap);
947 void (*exit_)(jint status);
948 void (*abort_)();
949
Elliott Hughes9d5ccec2011-09-19 13:19:50 -0700950 bool stats_enabled_;
951 RuntimeStats stats_;
952
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700953 const bool is_running_on_memory_tool_;
Mathieu Chartierda44d772014-04-01 15:01:46 -0700954
Andreas Gampef6a780a2015-04-02 18:51:05 -0700955 std::unique_ptr<TraceConfig> trace_config_;
956
Ian Rogers62d6c772013-02-27 08:32:07 -0800957 instrumentation::Instrumentation instrumentation_;
jeffhao2692b572011-12-16 15:42:28 -0800958
Ian Rogers365c1022012-06-22 15:05:28 -0700959 jobject main_thread_group_;
960 jobject system_thread_group_;
961
Brian Carlstromce888532013-10-10 00:32:58 -0700962 // As returned by ClassLoader.getSystemClassLoader().
963 jobject system_class_loader_;
964
Hiroshi Yamauchi2e899a92013-11-22 16:50:12 -0800965 // If true, then we dump the GC cumulative timings on shutdown.
966 bool dump_gc_performance_on_shutdown_;
967
Chang Xing605fe242017-07-20 15:57:21 -0700968 // Transactions used for pre-initializing classes at compilation time.
969 // Support nested transactions, maintain a list containing all transactions. Transactions are
970 // handled under a stack discipline. Because GC needs to go over all transactions, we choose list
971 // as substantial data structure instead of stack.
972 std::list<std::unique_ptr<Transaction>> preinitialization_transactions_;
Sebastien Hertzd2fe10a2014-01-15 10:20:56 +0100973
Igor Murashkin7617abd2015-07-10 18:27:47 -0700974 // If kNone, verification is disabled. kEnable by default.
975 verifier::VerifyMode verify_;
Jeff Hao4a200f52014-04-01 14:58:49 -0700976
Jean Christophe Beyler24e04aa2014-09-12 12:03:25 -0700977 // If true, the runtime may use dex files directly with the interpreter if an oat file is not
978 // available/usable.
979 bool allow_dex_file_fallback_;
980
Dmitriy Ivanov785049f2014-07-18 10:08:57 -0700981 // List of supported cpu abis.
982 std::vector<std::string> cpu_abilist_;
983
Jeff Haof00571c2014-05-29 17:29:47 -0700984 // Specifies target SDK version to allow workarounds for certain API levels.
David Brazdil2bb2fbd2018-11-13 18:24:26 +0000985 uint32_t target_sdk_version_;
Jeff Haof00571c2014-05-29 17:29:47 -0700986
Dave Allison69dfe512014-07-11 17:11:58 +0000987 // Implicit checks flags.
988 bool implicit_null_checks_; // NullPointer checks are implicit.
989 bool implicit_so_checks_; // StackOverflow checks are implicit.
990 bool implicit_suspend_checks_; // Thread suspension checks are implicit.
991
Calin Juravle01aaf6e2015-06-19 22:05:39 +0100992 // Whether or not the sig chain (and implicitly the fault handler) should be
Vladimir Markoa497a392018-09-26 10:52:50 +0100993 // disabled. Tools like dex2oat don't need them. This enables
Calin Juravle01aaf6e2015-06-19 22:05:39 +0100994 // building a statically link version of dex2oat.
995 bool no_sig_chain_;
996
Calin Juravled3e7c6c2016-02-04 19:07:51 +0000997 // Force the use of native bridge even if the app ISA matches the runtime ISA.
998 bool force_native_bridge_;
999
Calin Juravle07d83c72014-10-22 21:02:23 +01001000 // Whether or not a native bridge has been loaded.
Calin Juravlec8423522014-08-12 20:55:20 +01001001 //
1002 // The native bridge allows running native code compiled for a foreign ISA. The way it works is,
1003 // if standard dlopen fails to load native library associated with native activity, it calls to
1004 // the native bridge to load it and then gets the trampoline for the entry to native activity.
Calin Juravle07d83c72014-10-22 21:02:23 +01001005 //
1006 // The option 'native_bridge_library_filename' specifies the name of the native bridge.
1007 // When non-empty the native bridge will be loaded from the given file. An empty value means
1008 // that there's no native bridge.
1009 bool is_native_bridge_loaded_;
Calin Juravlec8423522014-08-12 20:55:20 +01001010
David Srbeckyf4480162016-03-16 00:06:24 +00001011 // Whether we are running under native debugger.
1012 bool is_native_debuggable_;
1013
Alex Light7919db92017-11-29 09:00:55 -08001014 // whether or not any async exceptions have ever been thrown. This is used to speed up the
1015 // MterpShouldSwitchInterpreters function.
1016 bool async_exceptions_thrown_;
1017
Alex Light0aa7a5a2018-10-10 15:58:14 +00001018 // Whether anything is going to be using the shadow-frame APIs to force a function to return
1019 // early. Doing this requires that (1) we be debuggable and (2) that mterp is exited.
1020 bool non_standard_exits_enabled_;
1021
Nicolas Geoffray433b79a2017-01-30 20:54:45 +00001022 // Whether Java code needs to be debuggable.
1023 bool is_java_debuggable_;
Alex Light6b16d892016-11-11 11:21:04 -08001024
Narayan Kamath5a2be3f2015-02-16 13:51:51 +00001025 // The maximum number of failed boots we allow before pruning the dalvik cache
1026 // and trying again. This option is only inspected when we're running as a
1027 // zygote.
1028 uint32_t zygote_max_failed_boots_;
1029
Igor Murashkin158f35c2015-06-10 15:55:30 -07001030 // Enable experimental opcodes that aren't fully specified yet. The intent is to
1031 // eventually publish them as public-usable opcodes, but they aren't ready yet.
1032 //
1033 // Experimental opcodes should not be used by other production code.
Alex Lighteb7c1442015-08-31 13:17:42 -07001034 ExperimentalFlags experimental_flags_;
Igor Murashkin158f35c2015-06-10 15:55:30 -07001035
Andreas Gampedd671252015-07-23 14:37:18 -07001036 // Contains the build fingerprint, if given as a parameter.
1037 std::string fingerprint_;
1038
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001039 // Oat file manager, keeps track of what oat files are open.
Mathieu Chartiere58991b2015-10-13 07:59:34 -07001040 OatFileManager* oat_file_manager_;
Mathieu Chartierf9c6fc62015-10-07 11:44:05 -07001041
Mathieu Chartier32cc9ee2015-10-15 09:19:15 -07001042 // Whether or not we are on a low RAM device.
1043 bool is_low_memory_mode_;
1044
Mathieu Chartierc42cb0e2017-10-13 11:35:00 -07001045 // Whether or not we use MADV_RANDOM on files that are thought to have random access patterns.
1046 // This is beneficial for low RAM devices since it reduces page cache thrashing.
1047 bool madvise_random_access_;
1048
Nicolas Geoffray787ae8e2015-11-05 11:32:24 +00001049 // Whether the application should run in safe mode, that is, interpreter only.
1050 bool safe_mode_;
1051
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001052 // Whether access checks on hidden API should be performed.
Mathew Inwooda5dc52c2018-02-19 15:30:51 +00001053 hiddenapi::EnforcementPolicy hidden_api_policy_;
David Brazdil3e0fa0a2018-01-15 18:41:44 +00001054
Mathew Inwoodc8ce5f52018-04-05 13:58:55 +01001055 // List of signature prefixes of methods that have been removed from the blacklist, and treated
1056 // as if whitelisted.
Mathew Inwood3383aa52018-03-16 14:18:33 +00001057 std::vector<std::string> hidden_api_exemptions_;
1058
David Brazdilee7d2fd2018-01-20 17:25:23 +00001059 // Whether the application has used an API which is not restricted but we
1060 // should issue a warning about it.
1061 bool pending_hidden_api_warning_;
1062
1063 // Do not warn about the same hidden API access violation twice.
1064 // This is only used for testing.
1065 bool dedupe_hidden_api_warnings_;
1066
David Brazdil92265222018-02-02 11:21:40 +00001067 // Hidden API can print warnings into the log and/or set a flag read by the
1068 // framework to show a UI warning. If this flag is set, always set the flag
1069 // when there is a warning. This is only used for testing.
1070 bool always_set_hidden_api_warning_flag_;
1071
Mathew Inwood5bcef172018-05-01 14:40:12 +01001072 // How often to log hidden API access to the event log. An integer between 0
1073 // (never) and 0x10000 (always).
Mathew Inwood73ddda42018-04-03 15:32:32 +01001074 uint32_t hidden_api_access_event_log_rate_;
1075
Mathew Inwood5bcef172018-05-01 14:40:12 +01001076 // The package of the app running in this process.
1077 std::string process_package_name_;
1078
Nicolas Geoffraya73280d2016-02-15 13:05:16 +00001079 // Whether threads should dump their native stack on SIGQUIT.
1080 bool dump_native_stack_on_sig_quit_;
1081
Andreas Gampea1425a12016-03-11 17:44:04 -08001082 // Whether the dalvik cache was pruned when initializing the runtime.
1083 bool pruned_dalvik_cache_;
1084
Mathieu Chartierf8cb1782016-03-18 18:45:41 -07001085 // Whether or not we currently care about pause times.
1086 ProcessState process_state_;
1087
Andreas Gampef38a6612016-04-11 08:42:26 -07001088 // Whether zygote code is in a section that should not start threads.
1089 bool zygote_no_threads_;
1090
Alex Light40320712017-12-14 11:52:04 -08001091 // The string containing requested jdwp options
1092 std::string jdwp_options_;
1093
1094 // The jdwp provider we were configured with.
1095 JdwpProvider jdwp_provider_;
1096
David Sehrd106d9f2016-08-16 19:22:57 -07001097 // Saved environment.
1098 class EnvSnapshot {
1099 public:
1100 EnvSnapshot() = default;
1101 void TakeSnapshot();
1102 char** GetSnapshot() const;
1103
1104 private:
1105 std::unique_ptr<char*[]> c_env_vector_;
1106 std::vector<std::unique_ptr<std::string>> name_value_pairs_;
1107
1108 DISALLOW_COPY_AND_ASSIGN(EnvSnapshot);
1109 } env_snapshot_;
1110
Andreas Gampefda57142016-09-08 20:29:18 -07001111 // Generic system-weak holders.
1112 std::vector<gc::AbstractSystemWeakHolder*> system_weak_holders_;
1113
Andreas Gampeac30fa22017-01-18 21:02:36 -08001114 std::unique_ptr<RuntimeCallbacks> callbacks_;
Andreas Gampe04bbb5b2017-01-19 17:49:03 +00001115
Nicolas Geoffray81cc43e2017-05-10 12:04:49 +01001116 std::atomic<uint32_t> deoptimization_counts_[
1117 static_cast<uint32_t>(DeoptimizationKind::kLast) + 1];
Nicolas Geoffray4e92c3c2017-05-08 09:34:26 +01001118
Vladimir Markoc34bebf2018-08-16 16:12:49 +01001119 MemMap protected_fault_page_;
Andreas Gampe2ac67d52017-05-11 22:30:38 -07001120
Andreas Gampe0b0ffc12018-08-01 14:41:27 -07001121 uint32_t verifier_logging_threshold_ms_;
1122
Andreas Gampe44f67602018-11-28 08:27:27 -08001123 // Note: See comments on GetFaultMessage.
1124 friend std::string GetFaultMessageForAbortLogging();
1125
Carl Shapiro61e019d2011-07-14 16:53:09 -07001126 DISALLOW_COPY_AND_ASSIGN(Runtime);
Carl Shapiro1fb86202011-06-27 17:43:13 -07001127};
1128
1129} // namespace art
1130
Brian Carlstromfc0e3212013-07-17 14:40:12 -07001131#endif // ART_RUNTIME_RUNTIME_H_