Merge "Use optimizing for apps."
diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc
index f354a49..3103f96 100644
--- a/compiler/dex/mir_graph.cc
+++ b/compiler/dex/mir_graph.cc
@@ -2459,11 +2459,9 @@
return res;
}
-void MIRGraph::CalculateBasicBlockInformation() {
- auto* quick_compiler = down_cast<QuickCompiler*>(cu_->compiler_driver->GetCompiler());
- DCHECK(quick_compiler != nullptr);
+void MIRGraph::CalculateBasicBlockInformation(const PassManager* const post_opt_pass_manager) {
/* Create the pass driver and launch it */
- PassDriverMEPostOpt driver(quick_compiler->GetPostOptPassManager(), cu_);
+ PassDriverMEPostOpt driver(post_opt_pass_manager, cu_);
driver.Launch();
}
diff --git a/compiler/dex/mir_graph.h b/compiler/dex/mir_graph.h
index 3dae5b4..9da39d1 100644
--- a/compiler/dex/mir_graph.h
+++ b/compiler/dex/mir_graph.h
@@ -38,6 +38,7 @@
class DexFileMethodInliner;
class GlobalValueNumbering;
class GvnDeadCodeElimination;
+class PassManager;
// Forward declaration.
class MIRGraph;
@@ -1201,7 +1202,7 @@
void AllocateSSAUseData(MIR *mir, int num_uses);
void AllocateSSADefData(MIR *mir, int num_defs);
- void CalculateBasicBlockInformation();
+ void CalculateBasicBlockInformation(const PassManager* const post_opt);
void ComputeDFSOrders();
void ComputeDefBlockMatrix();
void ComputeDominators();
diff --git a/compiler/dex/pass_driver_me_opts.cc b/compiler/dex/pass_driver_me_opts.cc
index 320d06a..2e871da 100644
--- a/compiler/dex/pass_driver_me_opts.cc
+++ b/compiler/dex/pass_driver_me_opts.cc
@@ -66,7 +66,7 @@
// Is it dirty at least?
if (pass_me_data_holder->dirty == true) {
CompilationUnit* c_unit = pass_me_data_holder->c_unit;
- c_unit->mir_graph.get()->CalculateBasicBlockInformation();
+ c_unit->mir_graph.get()->CalculateBasicBlockInformation(post_opt_pass_manager_);
}
}
}
diff --git a/compiler/dex/pass_driver_me_opts.h b/compiler/dex/pass_driver_me_opts.h
index b930d02..e94c189 100644
--- a/compiler/dex/pass_driver_me_opts.h
+++ b/compiler/dex/pass_driver_me_opts.h
@@ -29,8 +29,10 @@
class PassDriverMEOpts : public PassDriverME {
public:
- explicit PassDriverMEOpts(const PassManager* const manager, CompilationUnit* cu)
- : PassDriverME(manager, cu) {
+ explicit PassDriverMEOpts(const PassManager* const manager,
+ const PassManager* const post_opt_pass_manager,
+ CompilationUnit* cu)
+ : PassDriverME(manager, cu), post_opt_pass_manager_(post_opt_pass_manager) {
}
~PassDriverMEOpts() {
@@ -45,6 +47,8 @@
* @brief Apply a patch: perform start/work/end functions.
*/
virtual void ApplyPass(PassDataHolder* data, const Pass* pass) OVERRIDE;
+
+ const PassManager* const post_opt_pass_manager_;
};
} // namespace art
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 02d74a0..922f2f7 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -708,7 +708,7 @@
}
/* Create the pass driver and launch it */
- PassDriverMEOpts pass_driver(GetPreOptPassManager(), &cu);
+ PassDriverMEOpts pass_driver(GetPreOptPassManager(), GetPostOptPassManager(), &cu);
pass_driver.Launch();
/* For non-leaf methods check if we should skip compilation when the profiler is enabled. */
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 123f690..0a069a7 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -903,10 +903,6 @@
void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- __ Comment("Unreachable");
- __ bkpt(0);
- }
}
void LocationsBuilderARM::VisitIf(HIf* if_instr) {
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index c48cab4..aeec5dd 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1596,10 +1596,6 @@
void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
- __ Brk(__LINE__); // TODO: Introduce special markers for such code locations.
- }
}
void LocationsBuilderARM64::VisitFloatConstant(HFloatConstant* constant) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1db1600..754dd10 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -812,10 +812,6 @@
void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- __ Comment("Unreachable");
- __ int3();
- }
}
void LocationsBuilderX86::VisitIf(HIf* if_instr) {
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 90d87d4..dbd7c9e 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -749,10 +749,6 @@
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
UNUSED(exit);
- if (kIsDebugBuild) {
- __ Comment("Unreachable");
- __ int3();
- }
}
void LocationsBuilderX86_64::VisitIf(HIf* if_instr) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index e47b4f6..b70f925 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -173,24 +173,40 @@
jobject class_loader,
const DexFile& dex_file) const OVERRIDE;
+ CompiledMethod* TryCompile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const;
+
CompiledMethod* JniCompile(uint32_t access_flags,
uint32_t method_idx,
- const DexFile& dex_file) const OVERRIDE;
+ const DexFile& dex_file) const OVERRIDE {
+ return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
+ }
uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
+ InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
+ }
bool WriteElf(art::File* file,
OatWriter* oat_writer,
const std::vector<const art::DexFile*>& dex_files,
const std::string& android_root,
- bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+ bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
+ *GetCompilerDriver());
+ }
- void InitCompilationUnit(CompilationUnit& cu ATTRIBUTE_UNUSED) const OVERRIDE {}
+ void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
void Init() OVERRIDE;
- void UnInit() const OVERRIDE {}
+ void UnInit() const OVERRIDE;
private:
// Whether we should run any optimization or register allocation. If false, will
@@ -214,6 +230,9 @@
std::unique_ptr<std::ostream> visualizer_output_;
+ // Delegate to Quick in case the optimizing compiler cannot compile a method.
+ std::unique_ptr<Compiler> delegate_;
+
DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
};
@@ -224,9 +243,11 @@
run_optimizations_(
(driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime)
&& !driver->GetCompilerOptions().GetDebuggable()),
- compilation_stats_() {}
+ compilation_stats_(),
+ delegate_(Create(driver, Compiler::Kind::kQuick)) {}
void OptimizingCompiler::Init() {
+ delegate_->Init();
// Enable C1visualizer output. Must be done in Init() because the compiler
// driver is not fully initialized when passed to the compiler's constructor.
CompilerDriver* driver = GetCompilerDriver();
@@ -239,34 +260,24 @@
}
}
+void OptimizingCompiler::UnInit() const {
+ delegate_->UnInit();
+}
+
OptimizingCompiler::~OptimizingCompiler() {
compilation_stats_.Log();
}
+void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
+ delegate_->InitCompilationUnit(cu);
+}
+
bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx ATTRIBUTE_UNUSED,
const DexFile& dex_file ATTRIBUTE_UNUSED,
CompilationUnit* cu ATTRIBUTE_UNUSED) const {
return true;
}
-CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) const {
- return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
-}
-
-uintptr_t OptimizingCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCodePtrSize(
- InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet())));
-}
-
-bool OptimizingCompiler::WriteElf(art::File* file, OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root, bool is_host) const {
- return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
- *GetCompilerDriver());
-}
-
static bool IsInstructionSetSupported(InstructionSet instruction_set) {
return instruction_set == kArm64
|| (instruction_set == kThumb2 && !kArm32QuickCodeUseSoftFloat)
@@ -422,13 +433,13 @@
ArrayRef<const uint8_t>());
}
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
+CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
UNUSED(invoke_type);
std::string method_name = PrettyMethod(method_idx, dex_file);
compilation_stats_.RecordStat(MethodCompilationStat::kAttemptCompilation);
@@ -502,6 +513,11 @@
bool can_optimize = CanOptimize(*code_item);
bool can_allocate_registers = RegisterAllocator::CanAllocateRegistersFor(*graph, instruction_set);
+
+ // `run_optimizations_` is set explicitly (either through a compiler filter
+ // or the debuggable flag). If it is set, we can run baseline. Otherwise, we fall back
+ // to Quick.
+ bool can_use_baseline = !run_optimizations_;
if (run_optimizations_ && can_optimize && can_allocate_registers) {
VLOG(compiler) << "Optimizing " << method_name;
@@ -524,7 +540,7 @@
} else if (shouldOptimize && can_allocate_registers) {
LOG(FATAL) << "Could not allocate registers in optimizing compiler";
UNREACHABLE();
- } else {
+ } else if (can_use_baseline) {
VLOG(compiler) << "Compile baseline " << method_name;
if (!run_optimizations_) {
@@ -536,9 +552,27 @@
}
return CompileBaseline(codegen.get(), compiler_driver, dex_compilation_unit);
+ } else {
+ return nullptr;
}
}
+CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+ method_idx, class_loader, dex_file);
+ if (method != nullptr) {
+ return method;
+ }
+ return delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+}
+
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
return new OptimizingCompiler(driver);
}
diff --git a/runtime/base/arena_allocator.cc b/runtime/base/arena_allocator.cc
index e37aca1..dd29404 100644
--- a/runtime/base/arena_allocator.cc
+++ b/runtime/base/arena_allocator.cc
@@ -33,7 +33,7 @@
const char* const ArenaAllocatorStatsImpl<kCount>::kAllocNames[] = {
"Misc ",
"BasicBlock ",
- "BBList "
+ "BBList ",
"BBPreds ",
"DfsPreOrd ",
"DfsPostOrd ",
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 0cad11f..dff8f4d 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -291,10 +291,18 @@
// Try to reserve virtual memory at a lower address if we have a separate non moving space.
request_begin = reinterpret_cast<uint8_t*>(300 * MB);
}
+ // Attempt to create 2 mem maps at or after the requested begin.
if (foreground_collector_type_ != kCollectorTypeCC) {
- // Attempt to create 2 mem maps at or after the requested begin.
- main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin, capacity_,
- &error_str));
+ if (separate_non_moving_space) {
+ main_mem_map_1.reset(MapAnonymousPreferredAddress(kMemMapSpaceName[0], request_begin,
+ capacity_, &error_str));
+ } else {
+ // If no separate non-moving space, the main space must come
+ // right after the image space to avoid a gap.
+ main_mem_map_1.reset(MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
+ PROT_READ | PROT_WRITE, true, false,
+ &error_str));
+ }
CHECK(main_mem_map_1.get() != nullptr) << error_str;
}
if (support_homogeneous_space_compaction ||
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index c056adc..8395150 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -59,10 +59,11 @@
DEBUG_ENABLE_ASSERT = 1 << 2,
DEBUG_ENABLE_SAFEMODE = 1 << 3,
DEBUG_ENABLE_JNI_LOGGING = 1 << 4,
+ DEBUG_ENABLE_JIT = 1 << 5,
};
+ Runtime* const runtime = Runtime::Current();
if ((debug_flags & DEBUG_ENABLE_CHECKJNI) != 0) {
- Runtime* runtime = Runtime::Current();
JavaVMExt* vm = runtime->GetJavaVM();
if (!vm->IsCheckJniEnabled()) {
LOG(INFO) << "Late-enabling -Xcheck:jni";
@@ -86,13 +87,26 @@
}
debug_flags &= ~DEBUG_ENABLE_DEBUGGER;
- if ((debug_flags & DEBUG_ENABLE_SAFEMODE) != 0) {
+ const bool safe_mode = (debug_flags & DEBUG_ENABLE_SAFEMODE) != 0;
+ if (safe_mode) {
// Ensure that any (secondary) oat files will be interpreted.
- Runtime* runtime = Runtime::Current();
runtime->AddCompilerOption("--compiler-filter=interpret-only");
debug_flags &= ~DEBUG_ENABLE_SAFEMODE;
}
+ if ((debug_flags & DEBUG_ENABLE_JIT) != 0) {
+ if (safe_mode) {
+ LOG(INFO) << "Not enabling JIT due to VM safe mode";
+ } else {
+ if (runtime->GetJit() == nullptr) {
+ runtime->CreateJit();
+ } else {
+ LOG(INFO) << "Not late-enabling JIT (already on)";
+ }
+ }
+ debug_flags &= ~DEBUG_ENABLE_JIT;
+ }
+
// This is for backwards compatibility with Dalvik.
debug_flags &= ~DEBUG_ENABLE_ASSERT;
diff --git a/runtime/oat_file_assistant_test.cc b/runtime/oat_file_assistant_test.cc
index 71679ae..be8652c 100644
--- a/runtime/oat_file_assistant_test.cc
+++ b/runtime/oat_file_assistant_test.cc
@@ -203,7 +203,9 @@
// Ensure a chunk of memory is reserved for the image space.
uintptr_t reservation_start = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MIN_DELTA;
uintptr_t reservation_end = ART_BASE_ADDRESS + ART_BASE_ADDRESS_MAX_DELTA
- + 100 * 1024 * 1024;
+ // Include the main space that has to come right after the
+ // image in case of the GSS collector.
+ + 384 * MB;
std::string error_msg;
std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid(), true));
diff --git a/test/run-test b/test/run-test
index 2f7a5ac..df0fce4 100755
--- a/test/run-test
+++ b/test/run-test
@@ -525,7 +525,7 @@
# if Checker is not invoked and the test only runs the program.
build_args="${build_args} --dx-option --no-optimize"
- if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no"]; then
+ if [ "$runtime" = "art" -a "$image_suffix" = "-optimizing" -a "$target_mode" = "no" -a "$debuggable" = "no" ]; then
run_checker="yes"
run_args="${run_args} -Xcompiler-option --dump-cfg=$tmp_dir/$cfg_output \
-Xcompiler-option -j1"