Add compiler type to CompilerOptions.

Let CompilerOptions hold the information whether it is AOT
or JIT compilation, or Zygote JIT for shared code.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing --jit
Test: aosp_taimen-userdebug boots.
Change-Id: Id9200572406f8e43d99b8b61ef0e3edf43b52fff
diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc
index 0550075..a5805f9 100644
--- a/compiler/driver/compiler_options.cc
+++ b/compiler/driver/compiler_options.cc
@@ -50,6 +50,7 @@
       dex_files_for_oat_file_(),
       image_classes_(),
       verification_results_(nullptr),
+      compiler_type_(CompilerType::kAotCompiler),
       image_type_(ImageType::kNone),
       compile_art_test_(false),
       baseline_(false),
diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h
index 6cdb821..7723707 100644
--- a/compiler/driver/compiler_options.h
+++ b/compiler/driver/compiler_options.h
@@ -69,6 +69,13 @@
   static const size_t kDefaultInlineMaxCodeUnits = 32;
   static constexpr size_t kUnsetInlineMaxCodeUnits = -1;
 
+  enum class CompilerType : uint8_t {
+    kAotCompiler,             // AOT compiler.
+    kJitCompiler,             // Normal JIT compiler.
+    kSharedCodeJitCompiler,   // Zygote JIT producing code in the shared region area, putting
+                              // restrictions on, for example, how literals are being generated.
+  };
+
   enum class ImageType : uint8_t {
     kNone,                    // JIT or AOT app compilation producing only an oat file but no image.
     kBootImage,               // Creating boot image.
@@ -191,6 +198,19 @@
     return implicit_so_checks_;
   }
 
+  bool IsAotCompiler() const {
+    return compiler_type_ == CompilerType::kAotCompiler;
+  }
+
+  bool IsJitCompiler() const {
+    return compiler_type_ == CompilerType::kJitCompiler ||
+           compiler_type_ == CompilerType::kSharedCodeJitCompiler;
+  }
+
+  bool IsJitCompilerForSharedCode() const {
+    return compiler_type_ == CompilerType::kSharedCodeJitCompiler;
+  }
+
   bool GetImplicitSuspendChecks() const {
     return implicit_suspend_checks_;
   }
@@ -394,6 +414,7 @@
   // Results of AOT verification.
   const VerificationResults* verification_results_;
 
+  CompilerType compiler_type_;
   ImageType image_type_;
   bool compile_art_test_;
   bool baseline_;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 31dbec4..632a4fb 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -55,6 +55,10 @@
       UNREACHABLE();
     }
   }
+  // Set to appropriate JIT compiler type.
+  compiler_options_->compiler_type_ = runtime->IsZygote()
+      ? CompilerOptions::CompilerType::kSharedCodeJitCompiler
+      : CompilerOptions::CompilerType::kJitCompiler;
   // JIT is never PIC, no matter what the runtime compiler options specify.
   compiler_options_->SetNonPic();
 
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6cd51cc..fd5f689 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -92,12 +92,6 @@
 // the offset explicitly.
 constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;
 
-ALWAYS_INLINE static inline bool UseJitCompilation() {
-  Runtime* runtime = Runtime::Current();
-  // Note: There may be no Runtime for gtests; gtests use debug builds.
-  return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation();
-}
-
 inline Condition ARM64Condition(IfCondition cond) {
   switch (cond) {
     case kCondEQ: return eq;
@@ -944,7 +938,7 @@
   EmitJumpTables();
 
   // Emit JIT baker read barrier slow paths.
-  DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+  DCHECK(GetCompilerOptions().IsJitCompiler() || jit_baker_read_barrier_slow_paths_.empty());
   for (auto& entry : jit_baker_read_barrier_slow_paths_) {
     uint32_t encoded_data = entry.first;
     vixl::aarch64::Label* slow_path_entry = &entry.second.label;
@@ -1788,7 +1782,7 @@
   // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
   // entire oat file. This adds an extra branch and we do not want to slow down the main path.
   // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
-  if (slow_path == nullptr || UseJitCompilation()) {
+  if (slow_path == nullptr || GetCompilerOptions().IsJitCompiler()) {
     __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
     // Ensure the pc position is recorded immediately after the `blr` instruction.
     ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
@@ -4535,7 +4529,7 @@
 
 void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset) {
   DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
-  DCHECK(!UseJitCompilation());
+  DCHECK(!GetCompilerOptions().IsJitCompiler());
   call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
   vixl::aarch64::Label* bl_label = &call_entrypoint_patches_.back().label;
   __ bind(bl_label);
@@ -4544,7 +4538,7 @@
 
 void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
   DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
-  if (UseJitCompilation()) {
+  if (GetCompilerOptions().IsJitCompiler()) {
     auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
     vixl::aarch64::Label* slow_path_entry = &it->second.label;
     __ cbnz(mr, slow_path_entry);
@@ -4635,7 +4629,7 @@
     vixl::aarch64::Label* ldr_label = NewBootImageRelRoPatch(boot_image_reference, adrp_label);
     EmitLdrOffsetPlaceholder(ldr_label, reg.W(), reg.X());
   } else {
-    DCHECK(Runtime::Current()->UseJitCompilation());
+    DCHECK(GetCompilerOptions().IsJitCompiler());
     gc::Heap* heap = Runtime::Current()->GetHeap();
     DCHECK(!heap->GetBootImageSpaces().empty());
     const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
@@ -4847,11 +4841,11 @@
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kJitBootImageAddress:
     case HLoadClass::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kRuntimeCall:
       break;
@@ -5066,11 +5060,11 @@
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kJitBootImageAddress:
     case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kRuntimeCall:
       break;
@@ -6684,7 +6678,7 @@
 
   // For JIT, the slow path is considered part of the compiled method,
   // so JIT should pass null as `debug_name`.
-  DCHECK(!UseJitCompilation() || debug_name == nullptr);
+  DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr);
   if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
     std::ostringstream oss;
     oss << "BakerReadBarrierThunk";
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 44d2f4d..1d8fd6c 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -93,12 +93,6 @@
 // Using a base helps identify when we hit Marking Register check breakpoints.
 constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10;
 
-ALWAYS_INLINE static inline bool UseJitCompilation() {
-  Runtime* runtime = Runtime::Current();
-  // Note: There may be no Runtime for gtests which use debug builds.
-  return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation();
-}
-
 #ifdef __
 #error "ARM Codegen VIXL macro-assembler macro already defined."
 #endif
@@ -1937,7 +1931,7 @@
   FixJumpTables();
 
   // Emit JIT baker read barrier slow paths.
-  DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+  DCHECK(GetCompilerOptions().IsJitCompiler() || jit_baker_read_barrier_slow_paths_.empty());
   for (auto& entry : jit_baker_read_barrier_slow_paths_) {
     uint32_t encoded_data = entry.first;
     vixl::aarch32::Label* slow_path_entry = &entry.second.label;
@@ -2517,7 +2511,7 @@
   // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
   // entire oat file. This adds an extra branch and we do not want to slow down the main path.
   // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
-  if (slow_path == nullptr || UseJitCompilation()) {
+  if (slow_path == nullptr || GetCompilerOptions().IsJitCompiler()) {
     __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
     // Ensure the pc position is recorded immediately after the `blx` instruction.
     // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
@@ -7129,11 +7123,11 @@
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kJitBootImageAddress:
     case HLoadClass::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kRuntimeCall:
       break;
@@ -7394,11 +7388,11 @@
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kJitBootImageAddress:
     case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kRuntimeCall:
       break;
@@ -9076,7 +9070,7 @@
 
 void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset) {
   DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
-  DCHECK(!UseJitCompilation());
+  DCHECK(!GetCompilerOptions().IsJitCompiler());
   call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
   vixl::aarch32::Label* bl_label = &call_entrypoint_patches_.back().label;
   __ bind(bl_label);
@@ -9087,7 +9081,7 @@
 
 void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
   DCHECK(!__ AllowMacroInstructions());  // In ExactAssemblyScope.
-  if (UseJitCompilation()) {
+  if (GetCompilerOptions().IsJitCompiler()) {
     auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
     vixl::aarch32::Label* slow_path_entry = &it->second.label;
     __ b(ne, EncodingSize(Wide), slow_path_entry);
@@ -9140,7 +9134,7 @@
     EmitMovwMovtPlaceholder(labels, reg);
     __ Ldr(reg, MemOperand(reg, /* offset= */ 0));
   } else {
-    DCHECK(Runtime::Current()->UseJitCompilation());
+    DCHECK(GetCompilerOptions().IsJitCompiler());
     gc::Heap* heap = Runtime::Current()->GetHeap();
     DCHECK(!heap->GetBootImageSpaces().empty());
     uintptr_t address =
@@ -9717,7 +9711,7 @@
 
   // For JIT, the slow path is considered part of the compiled method,
   // so JIT should pass null as `debug_name`.
-  DCHECK(!UseJitCompilation() || debug_name == nullptr);
+  DCHECK(!GetCompilerOptions().IsJitCompiler() || debug_name == nullptr);
   if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
     std::ostringstream oss;
     oss << "BakerReadBarrierThunk";
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ed1a536..e9ef21a 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -5144,7 +5144,7 @@
     __ movl(reg, Address(method_address_reg, CodeGeneratorX86::kDummy32BitOffset));
     RecordBootImageRelRoPatch(method_address, boot_image_reference);
   } else {
-    DCHECK(Runtime::Current()->UseJitCompilation());
+    DCHECK(GetCompilerOptions().IsJitCompiler());
     gc::Heap* heap = Runtime::Current()->GetHeap();
     DCHECK(!heap->GetBootImageSpaces().empty());
     const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
@@ -6630,11 +6630,11 @@
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kJitBootImageAddress:
     case HLoadClass::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kRuntimeCall:
       break;
@@ -6867,11 +6867,11 @@
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kJitBootImageAddress:
     case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kRuntimeCall:
       break;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 8518b6d..ec54376 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1134,7 +1134,7 @@
     __ movl(reg, Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip= */ false));
     RecordBootImageRelRoPatch(boot_image_reference);
   } else {
-    DCHECK(Runtime::Current()->UseJitCompilation());
+    DCHECK(GetCompilerOptions().IsJitCompiler());
     gc::Heap* heap = Runtime::Current()->GetHeap();
     DCHECK(!heap->GetBootImageSpaces().empty());
     const uint8_t* address = heap->GetBootImageSpaces()[0]->Begin() + boot_image_reference;
@@ -5966,11 +5966,11 @@
     case HLoadClass::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadClass::LoadKind::kBootImageRelRo:
     case HLoadClass::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kJitBootImageAddress:
     case HLoadClass::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadClass::LoadKind::kRuntimeCall:
       break;
@@ -6162,11 +6162,11 @@
     case HLoadString::LoadKind::kBootImageLinkTimePcRelative:
     case HLoadString::LoadKind::kBootImageRelRo:
     case HLoadString::LoadKind::kBssEntry:
-      DCHECK(!Runtime::Current()->UseJitCompilation());
+      DCHECK(!GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kJitBootImageAddress:
     case HLoadString::LoadKind::kJitTableAddress:
-      DCHECK(Runtime::Current()->UseJitCompilation());
+      DCHECK(GetCompilerOptions().IsJitCompiler());
       break;
     case HLoadString::LoadKind::kRuntimeCall:
       break;
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3646cc7..ef5669a 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -414,7 +414,7 @@
 static bool IsMethodUnverified(const CompilerOptions& compiler_options, ArtMethod* method)
     REQUIRES_SHARED(Locks::mutator_lock_) {
   if (!method->GetDeclaringClass()->IsVerified()) {
-    if (Runtime::Current()->UseJitCompilation()) {
+    if (compiler_options.IsJitCompiler()) {
       // We're at runtime, we know this is cold code if the class
       // is not verified, so don't bother analyzing.
       return true;
@@ -673,7 +673,7 @@
     StackHandleScope<1>* hs,
     /*out*/Handle<mirror::ObjectArray<mirror::Class>>* inline_cache)
     REQUIRES_SHARED(Locks::mutator_lock_) {
-  DCHECK(Runtime::Current()->UseJitCompilation());
+  DCHECK(codegen_->GetCompilerOptions().IsJitCompiler());
 
   ArtMethod* caller = graph_->GetArtMethod();
   // Under JIT, we should always know the caller.
@@ -1185,7 +1185,7 @@
     ArtMethod* resolved_method,
     Handle<mirror::ObjectArray<mirror::Class>> classes) {
   // This optimization only works under JIT for now.
-  if (!Runtime::Current()->UseJitCompilation()) {
+  if (!codegen_->GetCompilerOptions().IsJitCompiler()) {
     return false;
   }
 
@@ -2046,7 +2046,6 @@
       callee_dead_reference_safe,
       graph_->IsDebuggable(),
       /* osr= */ false,
-      /* is_shared_jit_code= */ graph_->IsCompilingForSharedJitCode(),
       /* baseline= */ graph_->IsCompilingBaseline(),
       /* start_instruction_id= */ caller_instruction_counter);
   callee_graph->SetArtMethod(resolved_method);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 69f6778..df236c1 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -1352,18 +1352,18 @@
 
   // Check if the class will be initialized at runtime.
   if (cls->IsInitialized()) {
-    Runtime* runtime = Runtime::Current();
-    if (runtime->IsAotCompiler()) {
+    const CompilerOptions& compiler_options = code_generator_->GetCompilerOptions();
+    if (compiler_options.IsAotCompiler()) {
       // Assume loaded only if klass is in the boot image. App classes cannot be assumed
       // loaded because we don't even know what class loader will be used to load them.
-      if (IsInBootImage(cls, code_generator_->GetCompilerOptions())) {
+      if (IsInBootImage(cls, compiler_options)) {
         return true;
       }
     } else {
-      DCHECK(runtime->UseJitCompilation());
+      DCHECK(compiler_options.IsJitCompiler());
       if (Runtime::Current()->GetJit()->CanAssumeInitialized(
               cls,
-              graph_->IsCompilingForSharedJitCode())) {
+              compiler_options.IsJitCompilerForSharedCode())) {
         // For JIT, the class cannot revert to an uninitialized state.
         return true;
       }
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 1563a7e..653d92a 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -215,12 +215,12 @@
     if (cache == nullptr) {
       return;  // No cache in the boot image.
     }
-    if (runtime->UseJitCompilation()) {
+    if (compiler_options.IsJitCompiler()) {
       if (!CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache)) {
         return;  // The cache was somehow messed up, probably by using reflection.
       }
     } else {
-      DCHECK(runtime->IsAotCompiler());
+      DCHECK(compiler_options.IsAotCompiler());
       DCHECK(CheckIntegerCache(self, runtime->GetClassLinker(), boot_image_live_objects, cache));
       if (invoke->InputAt(0)->IsIntConstant()) {
         int32_t value = invoke->InputAt(0)->AsIntConstant()->GetValue();
@@ -287,8 +287,7 @@
   // we need to provide data that shall not lead to a crash even if the fields were
   // modified through reflection since ComputeIntegerValueOfLocations() when JITting.
 
-  Runtime* runtime = Runtime::Current();
-  ClassLinker* class_linker = runtime->GetClassLinker();
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   Thread* self = Thread::Current();
   ScopedObjectAccess soa(self);
 
@@ -328,7 +327,7 @@
     ArtField* value_field = integer_class->FindDeclaredInstanceField(kValueFieldName, "I");
     DCHECK(value_field != nullptr);
     info.value_offset = value_field->GetOffset().Uint32Value();
-    if (runtime->UseJitCompilation()) {
+    if (compiler_options.IsJitCompiler()) {
       // Use the current `IntegerCache.low` for JIT to avoid truly surprising behavior if the
       // code messes up the `value` field in the lowest cached Integer using reflection.
       info.low = GetIntegerCacheLowFromIntegerCache(self, class_linker);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 67e02f2..7c0e973 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -379,7 +379,6 @@
          bool dead_reference_safe = false,
          bool debuggable = false,
          bool osr = false,
-         bool is_shared_jit_code = false,
          bool baseline = false,
          int start_instruction_id = 0)
       : allocator_(allocator),
@@ -418,8 +417,7 @@
         art_method_(nullptr),
         osr_(osr),
         baseline_(baseline),
-        cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)),
-        is_shared_jit_code_(is_shared_jit_code) {
+        cha_single_implementation_list_(allocator->Adapter(kArenaAllocCHA)) {
     blocks_.reserve(kDefaultNumberOfBlocks);
   }
 
@@ -651,10 +649,6 @@
 
   bool IsCompilingBaseline() const { return baseline_; }
 
-  bool IsCompilingForSharedJitCode() const {
-    return is_shared_jit_code_;
-  }
-
   ArenaSet<ArtMethod*>& GetCHASingleImplementationList() {
     return cha_single_implementation_list_;
   }
@@ -855,10 +849,6 @@
   // List of methods that are assumed to have single implementation.
   ArenaSet<ArtMethod*> cha_single_implementation_list_;
 
-  // Whether we are JIT compiling in the shared region area, putting
-  // restrictions on, for example, how literals are being generated.
-  bool is_shared_jit_code_;
-
   friend class SsaBuilder;           // For caching constants.
   friend class SsaLivenessAnalysis;  // For the linear order.
   friend class HInliner;             // For the reverse post order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 1a2d40b..2d5216a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -379,7 +379,6 @@
                             ArtMethod* method,
                             bool baseline,
                             bool osr,
-                            bool is_shared_jit_code,
                             VariableSizedHandleScope* handles) const;
 
   CodeGenerator* TryCompileIntrinsic(ArenaAllocator* allocator,
@@ -719,7 +718,6 @@
                                               ArtMethod* method,
                                               bool baseline,
                                               bool osr,
-                                              bool is_shared_jit_code,
                                               VariableSizedHandleScope* handles) const {
   MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
   const CompilerOptions& compiler_options = GetCompilerOptions();
@@ -789,7 +787,6 @@
       dead_reference_safe,
       compiler_options.GetDebuggable(),
       /* osr= */ osr,
-      /* is_shared_jit_code= */ is_shared_jit_code,
       /* baseline= */ baseline);
 
   if (method != nullptr) {
@@ -992,6 +989,7 @@
                                             const DexFile& dex_file,
                                             Handle<mirror::DexCache> dex_cache) const {
   const CompilerOptions& compiler_options = GetCompilerOptions();
+  DCHECK(compiler_options.IsAotCompiler());
   CompiledMethod* compiled_method = nullptr;
   Runtime* runtime = Runtime::Current();
   DCHECK(runtime->IsAotCompiler());
@@ -1050,7 +1048,6 @@
                        method,
                        compiler_options.IsBaseline(),
                        /* osr= */ false,
-                       /* is_shared_jit_code= */ false,
                        &handles));
       }
     }
@@ -1199,6 +1196,9 @@
                                     bool baseline,
                                     bool osr,
                                     jit::JitLogger* jit_logger) {
+  const CompilerOptions& compiler_options = GetCompilerOptions();
+  DCHECK(compiler_options.IsJitCompiler());
+  DCHECK_EQ(compiler_options.IsJitCompilerForSharedCode(), code_cache->IsSharedRegion(*region));
   StackHandleScope<3> hs(self);
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(
       method->GetDeclaringClass()->GetClassLoader()));
@@ -1215,7 +1215,6 @@
   ArenaAllocator allocator(runtime->GetJitArenaPool());
 
   if (UNLIKELY(method->IsNative())) {
-    const CompilerOptions& compiler_options = GetCompilerOptions();
     JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
         compiler_options, access_flags, method_idx, *dex_file);
     std::vector<Handle<mirror::Object>> roots;
@@ -1316,9 +1315,8 @@
                    &code_allocator,
                    dex_compilation_unit,
                    method,
-                   baseline || GetCompilerOptions().IsBaseline(),
+                   baseline || compiler_options.IsBaseline(),
                    osr,
-                   /* is_shared_jit_code= */ code_cache->IsSharedRegion(*region),
                    &handles));
     if (codegen.get() == nullptr) {
       return false;
@@ -1353,7 +1351,6 @@
                      }));
 
   // Add debug info after we know the code location but before we update entry-point.
-  const CompilerOptions& compiler_options = GetCompilerOptions();
   std::vector<uint8_t> debug_info;
   if (compiler_options.GenerateAnyDebugInfo()) {
     debug::MethodDebugInfo info = {};
diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc
index b8471e3..1539421 100644
--- a/compiler/optimizing/sharpening.cc
+++ b/compiler/optimizing/sharpening.cc
@@ -101,11 +101,11 @@
       method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kBssEntry;
     }
     code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
-  } else if (Runtime::Current()->UseJitCompilation()) {
+  } else if (compiler_options.IsJitCompiler()) {
     ScopedObjectAccess soa(Thread::Current());
     if (Runtime::Current()->GetJit()->CanEncodeMethod(
             callee,
-            codegen->GetGraph()->IsCompilingForSharedJitCode())) {
+            compiler_options.IsJitCompilerForSharedCode())) {
       method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kJitDirectAddress;
       method_load_data = reinterpret_cast<uintptr_t>(callee);
       code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallArtMethod;
@@ -165,7 +165,7 @@
     const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
     if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
       // Compiling boot image or boot image extension. Check if the class is a boot image class.
-      DCHECK(!runtime->UseJitCompilation());
+      DCHECK(!compiler_options.IsJitCompiler());
       if (!compiler_options.GetCompilePic()) {
         // Test configuration, do not sharpen.
         desired_load_kind = HLoadClass::LoadKind::kRuntimeCall;
@@ -184,14 +184,14 @@
     } else {
       is_in_boot_image = (klass != nullptr) &&
           runtime->GetHeap()->ObjectIsInBootImageSpace(klass.Get());
-      if (runtime->UseJitCompilation()) {
+      if (compiler_options.IsJitCompiler()) {
         DCHECK(!compiler_options.GetCompilePic());
         if (is_in_boot_image) {
           desired_load_kind = HLoadClass::LoadKind::kJitBootImageAddress;
         } else if (klass != nullptr) {
           if (runtime->GetJit()->CanEncodeClass(
                   klass.Get(),
-                  codegen->GetGraph()->IsCompilingForSharedJitCode())) {
+                  compiler_options.IsJitCompilerForSharedCode())) {
             desired_load_kind = HLoadClass::LoadKind::kJitTableAddress;
           } else {
             // Shared JIT code cannot encode a literal that the GC can move.
@@ -239,7 +239,8 @@
   DCHECK(!klass->IsProxyClass());
   DCHECK(!klass->IsArrayClass());
 
-  if (Runtime::Current()->UseJitCompilation()) {
+  const CompilerOptions& compiler_options = codegen->GetCompilerOptions();
+  if (compiler_options.IsJitCompiler()) {
     // If we're JITting, try to assign a type check bitstring (fall through).
   } else if (codegen->GetCompilerOptions().IsBootImage()) {
     const char* descriptor = klass->GetDexFile().StringByTypeIdx(klass->GetDexTypeIndex());
@@ -259,8 +260,8 @@
   if ((false) &&  // FIXME: Inliner does not respect CompilerDriver::ShouldCompileMethod()
                   // and we're hitting an unassigned bitstring in dex2oat_image_test. b/26687569
       kIsDebugBuild &&
-      codegen->GetCompilerOptions().IsBootImage() &&
-      codegen->GetCompilerOptions().IsForceDeterminism()) {
+      compiler_options.IsBootImage() &&
+      compiler_options.IsForceDeterminism()) {
     SubtypeCheckInfo::State old_state = SubtypeCheck<ObjPtr<mirror::Class>>::GetState(klass);
     CHECK(old_state == SubtypeCheckInfo::kAssigned || old_state == SubtypeCheckInfo::kOverflowed)
         << klass->PrettyDescriptor() << "/" << old_state
@@ -325,7 +326,7 @@
     if (compiler_options.IsBootImage() || compiler_options.IsBootImageExtension()) {
       // Compiling boot image or boot image extension. Resolve the string and allocate it
       // if needed, to ensure the string will be added to the boot image.
-      DCHECK(!runtime->UseJitCompilation());
+      DCHECK(!compiler_options.IsJitCompiler());
       if (compiler_options.GetCompilePic()) {
         if (compiler_options.IsForceDeterminism()) {
           // Strings for methods we're compiling should be pre-resolved but Strings in inlined
@@ -354,7 +355,7 @@
         // Test configuration, do not sharpen.
         desired_load_kind = HLoadString::LoadKind::kRuntimeCall;
       }
-    } else if (runtime->UseJitCompilation()) {
+    } else if (compiler_options.IsJitCompiler()) {
       DCHECK(!codegen->GetCompilerOptions().GetCompilePic());
       string = class_linker->LookupString(string_index, dex_cache.Get());
       if (string != nullptr) {
@@ -363,7 +364,7 @@
           desired_load_kind = HLoadString::LoadKind::kJitBootImageAddress;
         } else if (runtime->GetJit()->CanEncodeString(
                   string,
-                  codegen->GetGraph()->IsCompilingForSharedJitCode())) {
+                  compiler_options.IsJitCompilerForSharedCode())) {
           desired_load_kind = HLoadString::LoadKind::kJitTableAddress;
         } else {
           // Shared JIT code cannot encode a literal that the GC can move.
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index 6fd1143..025828a 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -904,6 +904,7 @@
   }
 
   void ProcessOptions(ParserOptions* parser_options) {
+    compiler_options_->compiler_type_ = CompilerOptions::CompilerType::kAotCompiler;
     compiler_options_->compile_pic_ = true;  // All AOT compilation is PIC.
 
     if (android_root_.empty()) {