Merge "Move inline method detection to runtime."
diff --git a/build/Android.common.mk b/build/Android.common.mk
index f22eb37..219f1e2 100644
--- a/build/Android.common.mk
+++ b/build/Android.common.mk
@@ -37,6 +37,12 @@
ART_BUILD_HOST_NDEBUG ?= $(WITH_HOST_DALVIK)
ART_BUILD_HOST_DEBUG ?= $(WITH_HOST_DALVIK)
+ifeq ($(BUILD_HOST_64bit),)
+ART_HOST_ARCH := x86
+else
+ART_HOST_ARCH := x86_64
+endif
+
ifeq ($(ART_BUILD_TARGET_NDEBUG),false)
$(info Disabling ART_BUILD_TARGET_NDEBUG)
endif
@@ -87,6 +93,23 @@
ART_USE_PORTABLE_COMPILER := true
endif
+#
+# Used to enable optimizing compiler
+#
+ART_USE_OPTIMIZING_COMPILER := false
+ifneq ($(wildcard art/USE_OPTIMIZING_COMPILER),)
+$(info Enabling ART_USE_OPTIMIZING_COMPILER because of existence of art/USE_OPTIMIZING_COMPILER)
+ART_USE_OPTIMIZING_COMPILER := true
+endif
+ifeq ($(WITH_ART_USE_OPTIMIZING_COMPILER), true)
+ART_USE_OPTIMIZING_COMPILER := true
+endif
+
+ifeq ($(ART_USE_OPTIMIZING_COMPILER),true)
+DEX2OAT_FLAGS := --compiler-backend=Optimizing
+DALVIKVM_FLAGS := -Xcompiler-option --compiler-backend=Optimizing
+endif
+
LLVM_ROOT_PATH := external/llvm
# Don't fail a dalvik minimal host build.
-include $(LLVM_ROOT_PATH)/llvm.mk
diff --git a/build/Android.oat.mk b/build/Android.oat.mk
index 6012421..def585b 100644
--- a/build/Android.oat.mk
+++ b/build/Android.oat.mk
@@ -40,19 +40,13 @@
TARGET_INSTRUCTION_SET_FEATURES := $(DEX2OAT_TARGET_INSTRUCTION_SET_FEATURES)
-ifeq ($(BUILD_HOST_64bit),)
-host_arch := x86
-else
-host_arch := x86_64
-endif
-
$(HOST_CORE_IMG_OUT): $(HOST_CORE_DEX_FILES) $(DEX2OAT_DEPENDENCY)
@echo "host dex2oat: $@ ($?)"
@mkdir -p $(dir $@)
$(hide) $(DEX2OAT) --runtime-arg -Xms16m --runtime-arg -Xmx16m --image-classes=$(PRELOADED_CLASSES) $(addprefix \
--dex-file=,$(HOST_CORE_DEX_FILES)) $(addprefix --dex-location=,$(HOST_CORE_DEX_LOCATIONS)) --oat-file=$(HOST_CORE_OAT_OUT) \
--oat-location=$(HOST_CORE_OAT) --image=$(HOST_CORE_IMG_OUT) --base=$(LIBART_IMG_HOST_BASE_ADDRESS) \
- --instruction-set=$(host_arch) --host --android-root=$(HOST_OUT)
+ --instruction-set=$(ART_HOST_ARCH) --host --android-root=$(HOST_OUT)
$(TARGET_CORE_IMG_OUT): $(TARGET_CORE_DEX_FILES) $(DEX2OAT_DEPENDENCY)
@echo "target dex2oat: $@ ($?)"
diff --git a/compiler/Android.mk b/compiler/Android.mk
index 2127b40..bcd120b 100644
--- a/compiler/Android.mk
+++ b/compiler/Android.mk
@@ -75,6 +75,7 @@
optimizing/code_generator_arm.cc \
optimizing/code_generator_x86.cc \
optimizing/nodes.cc \
+ optimizing/optimizing_compiler.cc \
trampolines/trampoline_compiler.cc \
utils/arena_allocator.cc \
utils/arena_bit_vector.cc \
@@ -89,7 +90,8 @@
utils/x86/managed_register_x86.cc \
utils/scoped_arena_allocator.cc \
buffered_output_stream.cc \
- compiler_backend.cc \
+ compilers.cc \
+ compiler.cc \
elf_fixup.cc \
elf_stripper.cc \
elf_writer.cc \
@@ -209,7 +211,7 @@
LOCAL_SHARED_LIBRARIES += libart
endif
ifeq ($(ART_USE_PORTABLE_COMPILER),true)
- LOCAL_SHARED_LIBRARIES += libbcc libbcinfo libLLVM
+ LOCAL_SHARED_LIBRARIES += libLLVM
LOCAL_CFLAGS += -DART_USE_PORTABLE_COMPILER=1
ifeq ($$(art_target_or_host),target)
LOCAL_STATIC_LIBRARIES_arm += libmcldARMInfo libmcldARMTarget
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index def7b68..49c1283 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -17,7 +17,7 @@
#ifndef ART_COMPILER_COMMON_COMPILER_TEST_H_
#define ART_COMPILER_COMMON_COMPILER_TEST_H_
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_callbacks.h"
#include "common_runtime_test.h"
#include "dex/quick/dex_file_to_method_inliner_map.h"
@@ -219,8 +219,15 @@
} else {
const void* method_code = GetQuickGenericJniTrampoline();
mirror::ArtMethod* callee_save_method = runtime_->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+
+ // Compute Sirt size, as Sirt goes into frame
+ MethodHelper mh(method);
+ uint32_t sirt_refs = mh.GetNumberOfReferenceArgsWithoutReceiver() + 1;
+ uint32_t sirt_size = StackIndirectReferenceTable::SizeOf(sirt_refs);
+
OatFile::OatMethod oat_method = CreateOatMethod(method_code,
- callee_save_method->GetFrameSizeInBytes(),
+ callee_save_method->GetFrameSizeInBytes() +
+ sirt_size,
callee_save_method->GetCoreSpillMask(),
callee_save_method->GetFpSpillMask(),
nullptr,
@@ -312,13 +319,13 @@
}
// TODO: make selectable
- CompilerBackend::Kind compiler_backend
- = (kUsePortableCompiler) ? CompilerBackend::kPortable : CompilerBackend::kQuick;
+ Compiler::Kind compiler_kind
+ = (kUsePortableCompiler) ? Compiler::kPortable : Compiler::kQuick;
timer_.reset(new CumulativeLogger("Compilation times"));
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
method_inliner_map_.get(),
- compiler_backend, instruction_set,
+ compiler_kind, instruction_set,
instruction_set_features,
true, new CompilerDriver::DescriptorSet,
2, true, true, timer_.get()));
diff --git a/compiler/compiler.cc b/compiler/compiler.cc
new file mode 100644
index 0000000..c88c38e
--- /dev/null
+++ b/compiler/compiler.cc
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler.h"
+#include "compilers.h"
+#include "driver/compiler_driver.h"
+#include "mirror/art_method-inl.h"
+
+#ifdef ART_USE_PORTABLE_COMPILER
+#include "dex/portable/mir_to_gbc.h"
+#include "elf_writer_mclinker.h"
+#endif
+
+namespace art {
+
+#ifdef ART_SEA_IR_MODE
+extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+#endif
+
+
+CompiledMethod* Compiler::TryCompileWithSeaIR(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file) {
+#ifdef ART_SEA_IR_MODE
+ bool use_sea = Runtime::Current()->IsSeaIRMode();
+ use_sea = use_sea &&
+ (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
+ if (use_sea) {
+ LOG(INFO) << "Using SEA IR to compile..." << std::endl;
+ return SeaIrCompileMethod(compiler,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+ }
+#endif
+ return nullptr;
+}
+
+
+#ifdef ART_USE_PORTABLE_COMPILER
+
+extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver);
+
+extern "C" void ArtUnInitCompilerContext(art::CompilerDriver& driver);
+
+extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::DexFile& dex_file);
+
+extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver,
+ std::string const& filename);
+
+
+class LLVMCompiler : public Compiler {
+ public:
+ LLVMCompiler() : Compiler(1000) {}
+
+ void Init(CompilerDriver& driver) const {
+ ArtInitCompilerContext(driver);
+ }
+
+ void UnInit(CompilerDriver& driver) const {
+ ArtUnInitCompilerContext(driver);
+ }
+
+ CompiledMethod* Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompileWithSeaIR(driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+ if (method != nullptr) return method;
+
+ return ArtCompileMethod(compiler,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+ }
+
+ CompiledMethod* JniCompile(CompilerDriver& driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) const {
+ return ArtLLVMJniCompileMethod(driver, access_flags, method_idx, dex_file);
+ }
+
+ uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const {
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode());
+ }
+
+ bool WriteElf(art::File* file,
+ OatWriter* oat_writer,
+ const std::vector<const art::DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host, const CompilerDriver& driver) const
+ OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return art::ElfWriterMclinker::Create(
+ file, oat_writer, dex_files, android_root, is_host, driver);
+ }
+
+ Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ return PortableCodeGenerator(
+ cu, cu->mir_graph.get(), &cu->arena,
+ reinterpret_cast<art::llvm::LlvmCompilationUnit*>(compilation_unit));
+ }
+
+ void InitCompilationUnit(CompilationUnit& cu) const {
+ // Fused long branches not currently useful in bitcode.
+ cu.disable_opt |=
+ (1 << kBranchFusing) |
+ (1 << kSuppressExceptionEdges);
+ }
+
+ bool IsPortable() const OVERRIDE {
+ return true;
+ }
+
+ void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
+ typedef void (*SetBitcodeFileNameFn)(const CompilerDriver&, const std::string&);
+
+ SetBitcodeFileNameFn set_bitcode_file_name =
+ reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
+
+ set_bitcode_file_name(driver, filename);
+ }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LLVMCompiler);
+};
+#endif
+
+Compiler* Compiler::Create(Compiler::Kind kind) {
+ switch (kind) {
+ case kQuick:
+ return new QuickCompiler();
+ break;
+ case kOptimizing:
+ return new OptimizingCompiler();
+ break;
+ case kPortable:
+#ifdef ART_USE_PORTABLE_COMPILER
+ return new LLVMCompiler();
+#else
+ LOG(FATAL) << "Portable compiler not compiled";
+#endif
+ break;
+ default:
+ LOG(FATAL) << "UNREACHABLE";
+ }
+ return nullptr;
+}
+
+} // namespace art
diff --git a/compiler/compiler_backend.h b/compiler/compiler.h
similarity index 79%
rename from compiler/compiler_backend.h
rename to compiler/compiler.h
index b473806..1d5fc24 100644
--- a/compiler/compiler_backend.h
+++ b/compiler/compiler.h
@@ -14,8 +14,8 @@
* limitations under the License.
*/
-#ifndef ART_COMPILER_COMPILER_BACKEND_H_
-#define ART_COMPILER_COMPILER_BACKEND_H_
+#ifndef ART_COMPILER_COMPILER_H_
+#define ART_COMPILER_COMPILER_H_
#include "dex_file.h"
#include "os.h"
@@ -33,18 +33,19 @@
class ArtMethod;
}
-class CompilerBackend {
+class Compiler {
public:
enum Kind {
kQuick,
+ kOptimizing,
kPortable
};
- explicit CompilerBackend(uint64_t warning)
+ explicit Compiler(uint64_t warning)
: maximum_compilation_time_before_warning_(warning) {
}
- static CompilerBackend* Create(Kind kind);
+ static Compiler* Create(Kind kind);
virtual void Init(CompilerDriver& driver) const = 0;
@@ -59,6 +60,15 @@
jobject class_loader,
const DexFile& dex_file) const = 0;
+ static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
virtual CompiledMethod* JniCompile(CompilerDriver& driver,
uint32_t access_flags,
uint32_t method_idx,
@@ -91,7 +101,7 @@
virtual void InitCompilationUnit(CompilationUnit& cu) const = 0;
- virtual ~CompilerBackend() {}
+ virtual ~Compiler() {}
/*
* @brief Generate and return Dwarf CFI initialization, if supported by the
@@ -109,9 +119,9 @@
private:
const uint64_t maximum_compilation_time_before_warning_;
- DISALLOW_COPY_AND_ASSIGN(CompilerBackend);
+ DISALLOW_COPY_AND_ASSIGN(Compiler);
};
} // namespace art
-#endif // ART_COMPILER_COMPILER_BACKEND_H_
+#endif // ART_COMPILER_COMPILER_H_
diff --git a/compiler/compiler_backend.cc b/compiler/compiler_backend.cc
deleted file mode 100644
index 0afa665..0000000
--- a/compiler/compiler_backend.cc
+++ /dev/null
@@ -1,331 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compiler_backend.h"
-#include "elf_writer_quick.h"
-#include "dex/quick/mir_to_lir.h"
-#include "dex/mir_graph.h"
-#include "driver/compiler_driver.h"
-#include "mirror/art_method-inl.h"
-
-#ifdef ART_USE_PORTABLE_COMPILER
-#include "dex/portable/mir_to_gbc.h"
-#include "elf_writer_mclinker.h"
-#endif
-
-namespace art {
-
-#ifdef ART_SEA_IR_MODE
-extern "C" art::CompiledMethod* SeaIrCompileMethod(art::CompilerDriver& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
-#endif
-
-extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver);
-extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& driver);
-extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
-
-extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& compiler,
- uint32_t access_flags, uint32_t method_idx,
- const art::DexFile& dex_file);
-
-
-static CompiledMethod* TryCompileWithSeaIR(art::CompilerDriver& compiler,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file) {
-#ifdef ART_SEA_IR_MODE
- bool use_sea = Runtime::Current()->IsSeaIRMode();
- use_sea = use_sea &&
- (std::string::npos != PrettyMethod(method_idx, dex_file).find("fibonacci"));
- if (use_sea) {
- LOG(INFO) << "Using SEA IR to compile..." << std::endl;
- return SeaIrCompileMethod(compiler,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- }
-#endif
- return nullptr;
-}
-
-
-// Hack for CFI CIE initialization
-extern std::vector<uint8_t>* X86CFIInitialization();
-
-class QuickBackend : public CompilerBackend {
- public:
- QuickBackend() : CompilerBackend(100) {}
-
- void Init(CompilerDriver& driver) const {
- ArtInitQuickCompilerContext(driver);
- }
-
- void UnInit(CompilerDriver& driver) const {
- ArtUnInitQuickCompilerContext(driver);
- }
-
- CompiledMethod* Compile(CompilerDriver& compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
- CompiledMethod* method = TryCompileWithSeaIR(compiler,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- if (method != nullptr) return method;
-
- return ArtQuickCompileMethod(compiler,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- }
-
- CompiledMethod* JniCompile(CompilerDriver& driver,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) const {
- return ArtQuickJniCompileMethod(driver, access_flags, method_idx, dex_file);
- }
-
- uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
- }
-
- bool WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host, const CompilerDriver& driver) const
- OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, driver);
- }
-
- Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
- Mir2Lir* mir_to_lir = nullptr;
- switch (cu->instruction_set) {
- case kThumb2:
- mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- case kMips:
- mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- case kX86:
- mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
- break;
- default:
- LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
- }
-
- /* The number of compiler temporaries depends on backend so set it up now if possible */
- if (mir_to_lir) {
- size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
- bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
- CHECK(set_max);
- }
- return mir_to_lir;
- }
-
- void InitCompilationUnit(CompilationUnit& cu) const {}
-
- /*
- * @brief Generate and return Dwarf CFI initialization, if supported by the
- * backend.
- * @param driver CompilerDriver for this compile.
- * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
- * information.
- * @note This is used for backtrace information in generated code.
- */
- std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
- OVERRIDE {
- if (driver.GetInstructionSet() == kX86) {
- return X86CFIInitialization();
- }
- return nullptr;
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(QuickBackend);
-};
-
-#ifdef ART_USE_PORTABLE_COMPILER
-
-extern "C" void ArtInitCompilerContext(art::CompilerDriver& driver);
-
-extern "C" void ArtUnInitCompilerContext(art::CompilerDriver& driver);
-
-extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver,
- const art::DexFile::CodeItem* code_item,
- uint32_t access_flags,
- art::InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const art::DexFile& dex_file);
-
-extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& driver,
- uint32_t access_flags, uint32_t method_idx,
- const art::DexFile& dex_file);
-
-extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver,
- std::string const& filename);
-
-
-class LLVMBackend : public CompilerBackend {
- public:
- LLVMBackend() : CompilerBackend(1000) {}
-
- void Init(CompilerDriver& driver) const {
- ArtInitCompilerContext(driver);
- }
-
- void UnInit(CompilerDriver& driver) const {
- ArtUnInitCompilerContext(driver);
- }
-
- CompiledMethod* Compile(CompilerDriver& compiler,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- jobject class_loader,
- const DexFile& dex_file) const {
- CompiledMethod* method = TryCompileWithSeaIR(compiler,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- if (method != nullptr) return method;
-
- return ArtCompileMethod(compiler,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file);
- }
-
- CompiledMethod* JniCompile(CompilerDriver& driver,
- uint32_t access_flags,
- uint32_t method_idx,
- const DexFile& dex_file) const {
- return ArtLLVMJniCompileMethod(driver, access_flags, method_idx, dex_file);
- }
-
- uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const {
- return reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode());
- }
-
- bool WriteElf(art::File* file,
- OatWriter* oat_writer,
- const std::vector<const art::DexFile*>& dex_files,
- const std::string& android_root,
- bool is_host, const CompilerDriver& driver) const
- OVERRIDE
- SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return art::ElfWriterMclinker::Create(
- file, oat_writer, dex_files, android_root, is_host, driver);
- }
-
- Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
- return PortableCodeGenerator(
- cu, cu->mir_graph.get(), &cu->arena,
- reinterpret_cast<art::llvm::LlvmCompilationUnit*>(compilation_unit));
- }
-
- void InitCompilationUnit(CompilationUnit& cu) const {
- // Fused long branches not currently useful in bitcode.
- cu.disable_opt |=
- (1 << kBranchFusing) |
- (1 << kSuppressExceptionEdges);
- }
-
- bool IsPortable() const OVERRIDE {
- return true;
- }
-
- void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
- typedef void (*SetBitcodeFileNameFn)(const CompilerDriver&, const std::string&);
-
- SetBitcodeFileNameFn set_bitcode_file_name =
- reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
-
- set_bitcode_file_name(driver, filename);
- }
-
- private:
- DISALLOW_COPY_AND_ASSIGN(LLVMBackend);
-};
-#endif
-
-CompilerBackend* CompilerBackend::Create(CompilerBackend::Kind kind) {
- switch (kind) {
- case kQuick:
- return new QuickBackend();
- break;
- case kPortable:
-#ifdef ART_USE_PORTABLE_COMPILER
- return new LLVMBackend();
-#else
- LOG(FATAL) << "Portable compiler not compiled";
-#endif
- break;
- default:
- LOG(FATAL) << "UNREACHABLE";
- }
- return nullptr;
-}
-
-} // namespace art
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
new file mode 100644
index 0000000..9bb1719
--- /dev/null
+++ b/compiler/compilers.cc
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compilers.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir.h"
+#include "elf_writer_quick.h"
+#include "mirror/art_method-inl.h"
+
+namespace art {
+
+extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver& driver);
+extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver& driver);
+extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver& driver,
+ const art::DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ art::InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const art::DexFile& dex_file);
+
+extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& driver,
+ uint32_t access_flags, uint32_t method_idx,
+ const art::DexFile& dex_file);
+
+// Hack for CFI CIE initialization
+extern std::vector<uint8_t>* X86CFIInitialization();
+
+void QuickCompiler::Init(CompilerDriver& driver) const {
+ ArtInitQuickCompilerContext(driver);
+}
+
+void QuickCompiler::UnInit(CompilerDriver& driver) const {
+ ArtUnInitQuickCompilerContext(driver);
+}
+
+CompiledMethod* QuickCompiler::Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompileWithSeaIR(driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+ if (method != nullptr) return method;
+
+ return ArtQuickCompileMethod(driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file);
+}
+
+CompiledMethod* QuickCompiler::JniCompile(CompilerDriver& driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) const {
+ return ArtQuickJniCompileMethod(driver, access_flags, method_idx, dex_file);
+}
+
+uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
+ return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+}
+
+bool QuickCompiler::WriteElf(art::File* file,
+ OatWriter* oat_writer,
+ const std::vector<const art::DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host, const CompilerDriver& driver) const
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host, driver);
+}
+
+Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+ Mir2Lir* mir_to_lir = nullptr;
+ switch (cu->instruction_set) {
+ case kThumb2:
+ mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kMips:
+ mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ case kX86:
+ mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+ break;
+ default:
+ LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+ }
+
+ /* The number of compiler temporaries depends on backend so set it up now if possible */
+ if (mir_to_lir) {
+ size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
+ bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
+ CHECK(set_max);
+ }
+ return mir_to_lir;
+}
+
+std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
+ const CompilerDriver& driver) const {
+ if (driver.GetInstructionSet() == kX86) {
+ return X86CFIInitialization();
+ }
+ return nullptr;
+}
+
+CompiledMethod* OptimizingCompiler::Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ CompiledMethod* method = TryCompile(
+ driver, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+ if (method != nullptr) return method;
+
+ return QuickCompiler::Compile(
+ driver, code_item, access_flags, invoke_type, class_def_idx, method_idx,
+ class_loader, dex_file);
+}
+
+} // namespace art
diff --git a/compiler/compilers.h b/compiler/compilers.h
new file mode 100644
index 0000000..892a6bd
--- /dev/null
+++ b/compiler/compilers.h
@@ -0,0 +1,103 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_COMPILERS_H_
+#define ART_COMPILER_COMPILERS_H_
+
+#include "compiler.h"
+
+namespace art {
+
+class QuickCompiler : public Compiler {
+ public:
+ QuickCompiler() : Compiler(100) {}
+
+ void Init(CompilerDriver& driver) const OVERRIDE;
+
+ void UnInit(CompilerDriver& driver) const OVERRIDE;
+
+ CompiledMethod* Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const OVERRIDE;
+
+ CompiledMethod* JniCompile(CompilerDriver& driver,
+ uint32_t access_flags,
+ uint32_t method_idx,
+ const DexFile& dex_file) const OVERRIDE;
+
+ uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE;
+
+ bool WriteElf(art::File* file,
+ OatWriter* oat_writer,
+ const std::vector<const art::DexFile*>& dex_files,
+ const std::string& android_root,
+ bool is_host, const CompilerDriver& driver) const
+ OVERRIDE
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+ Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
+
+ void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE {}
+
+ /*
+ * @brief Generate and return Dwarf CFI initialization, if supported by the
+ * backend.
+ * @param driver CompilerDriver for this compile.
+ * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
+ * information.
+ * @note This is used for backtrace information in generated code.
+ */
+ std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
+ OVERRIDE;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
+};
+
+class OptimizingCompiler : public QuickCompiler {
+ public:
+ OptimizingCompiler() { }
+
+ CompiledMethod* Compile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const OVERRIDE;
+
+ CompiledMethod* TryCompile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
+};
+
+} // namespace art
+
+#endif // ART_COMPILER_COMPILERS_H_
diff --git a/compiler/dex/compiler_ir.h b/compiler/dex/compiler_ir.h
index c71f047..70159ca 100644
--- a/compiler/dex/compiler_ir.h
+++ b/compiler/dex/compiler_ir.h
@@ -62,7 +62,7 @@
uint32_t disable_opt; // opt_control_vector flags.
uint32_t enable_debug; // debugControlVector flags.
bool verbose;
- const CompilerBackend* compiler_backend;
+ const Compiler* compiler;
InstructionSet instruction_set;
bool target64;
diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc
index 3bd71d1..83fbca5 100644
--- a/compiler/dex/frontend.cc
+++ b/compiler/dex/frontend.cc
@@ -14,7 +14,7 @@
* limitations under the License.
*/
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_internals.h"
#include "driver/compiler_driver.h"
#include "driver/compiler_options.h"
@@ -23,7 +23,6 @@
#include "mirror/object.h"
#include "pass_driver.h"
#include "runtime.h"
-#include "backend.h"
#include "base/logging.h"
#include "base/timing_logger.h"
#include "driver/compiler_options.h"
@@ -90,7 +89,7 @@
disable_opt(0),
enable_debug(0),
verbose(false),
- compiler_backend(NULL),
+ compiler(NULL),
instruction_set(kNone),
num_dalvik_registers(0),
insns(NULL),
@@ -131,7 +130,7 @@
}
static CompiledMethod* CompileMethod(CompilerDriver& driver,
- CompilerBackend* compiler_backend,
+ Compiler* compiler,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
uint16_t class_def_idx, uint32_t method_idx,
@@ -157,7 +156,7 @@
cu.class_linker = class_linker;
cu.instruction_set = driver.GetInstructionSet();
cu.target64 = cu.instruction_set == kX86_64;
- cu.compiler_backend = compiler_backend;
+ cu.compiler = compiler;
// TODO: x86_64 is not yet implemented.
DCHECK((cu.instruction_set == kThumb2) ||
(cu.instruction_set == kX86) ||
@@ -184,7 +183,7 @@
* MIR and backend flags? Need command-line setting as well.
*/
- compiler_backend->InitCompilationUnit(cu);
+ compiler->InitCompilationUnit(cu);
if (cu.instruction_set == kMips) {
// Disable some optimizations for mips for now
@@ -209,7 +208,7 @@
* The reason we do this is that optimizations on the MIR graph may need to get information
* that is only available if a CG exists.
*/
- cu.cg.reset(compiler_backend->GetCodeGenerator(&cu, llvm_compilation_unit));
+ cu.cg.reset(compiler->GetCodeGenerator(&cu, llvm_compilation_unit));
/* Gathering opcode stats? */
if (kCompilerDebugFlags & (1 << kDebugCountOpcodes)) {
@@ -286,8 +285,8 @@
return result;
}
-CompiledMethod* CompileOneMethod(CompilerDriver& compiler,
- CompilerBackend* backend,
+CompiledMethod* CompileOneMethod(CompilerDriver& driver,
+ Compiler* compiler,
const DexFile::CodeItem* code_item,
uint32_t access_flags,
InvokeType invoke_type,
@@ -296,21 +295,21 @@
jobject class_loader,
const DexFile& dex_file,
void* compilation_unit) {
- return CompileMethod(compiler, backend, code_item, access_flags, invoke_type, class_def_idx,
+ return CompileMethod(driver, compiler, code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file, compilation_unit);
}
} // namespace art
extern "C" art::CompiledMethod*
- ArtQuickCompileMethod(art::CompilerDriver& compiler,
+ ArtQuickCompileMethod(art::CompilerDriver& driver,
const art::DexFile::CodeItem* code_item,
uint32_t access_flags, art::InvokeType invoke_type,
uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
const art::DexFile& dex_file) {
// TODO: check method fingerprint here to determine appropriate backend type. Until then, use build default
- art::CompilerBackend* backend = compiler.GetCompilerBackend();
- return art::CompileOneMethod(compiler, backend, code_item, access_flags, invoke_type,
+ art::Compiler* compiler = driver.GetCompiler();
+ return art::CompileOneMethod(driver, compiler, code_item, access_flags, invoke_type,
class_def_idx, method_idx, class_loader, dex_file,
NULL /* use thread llvm_info */);
}
diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc
index 03fc091..cb737ab 100644
--- a/compiler/dex/mir_optimization.cc
+++ b/compiler/dex/mir_optimization.cc
@@ -405,7 +405,7 @@
// Is this the select pattern?
// TODO: flesh out support for Mips. NOTE: llvm's select op doesn't quite work here.
// TUNING: expand to support IF_xx compare & branches
- if (!cu_->compiler_backend->IsPortable() &&
+ if (!cu_->compiler->IsPortable() &&
(cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86) &&
IsInstructionIfCcZ(mir->dalvikInsn.opcode)) {
BasicBlock* ft = GetBasicBlock(bb->fall_through);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index d545c06..5ee31f7 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -26,7 +26,7 @@
#include "base/stl_util.h"
#include "base/timing_logger.h"
#include "class_linker.h"
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_driver-inl.h"
#include "dex_compilation_unit.h"
#include "dex_file-inl.h"
@@ -324,7 +324,7 @@
CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
- CompilerBackend::Kind compiler_backend_kind,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
bool image, DescriptorSet* image_classes, size_t thread_count,
@@ -333,7 +333,7 @@
: profile_ok_(false), compiler_options_(compiler_options),
verification_results_(verification_results),
method_inliner_map_(method_inliner_map),
- compiler_backend_(CompilerBackend::Create(compiler_backend_kind)),
+ compiler_(Compiler::Create(compiler_kind)),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
freezing_constructor_lock_("freezing constructor lock"),
@@ -371,7 +371,7 @@
dex_to_dex_compiler_ = reinterpret_cast<DexToDexCompilerFn>(ArtCompileDEX);
- compiler_backend_->Init(*this);
+ compiler_->Init(*this);
CHECK(!Runtime::Current()->IsStarted());
if (!image_) {
@@ -380,7 +380,7 @@
// Are we generating CFI information?
if (compiler_options->GetGenerateGDBInformation()) {
- cfi_info_.reset(compiler_backend_->GetCallFrameInformationInitialization(*this));
+ cfi_info_.reset(compiler_->GetCallFrameInformationInitialization(*this));
}
}
@@ -430,7 +430,7 @@
STLDeleteElements(&classes_to_patch_);
}
CHECK_PTHREAD_CALL(pthread_key_delete, (tls_key_), "delete tls key");
- compiler_backend_->UnInit(*this);
+ compiler_->UnInit(*this);
}
CompilerTls* CompilerDriver::GetTls() {
@@ -1054,7 +1054,7 @@
*direct_method = 0;
bool use_dex_cache = false;
const bool compiling_boot = Runtime::Current()->GetHeap()->IsCompilingBoot();
- if (compiler_backend_->IsPortable()) {
+ if (compiler_->IsPortable()) {
if (sharp_type != kStatic && sharp_type != kDirect) {
return;
}
@@ -1130,13 +1130,13 @@
CHECK(!method->IsAbstract());
*type = sharp_type;
*direct_method = reinterpret_cast<uintptr_t>(method);
- *direct_code = compiler_backend_->GetEntryPointOf(method);
+ *direct_code = compiler_->GetEntryPointOf(method);
target_method->dex_file = method->GetDeclaringClass()->GetDexCache()->GetDexFile();
target_method->dex_method_index = method->GetDexMethodIndex();
} else if (!must_use_direct_pointers) {
// Set the code and rely on the dex cache for the method.
*type = sharp_type;
- *direct_code = compiler_backend_->GetEntryPointOf(method);
+ *direct_code = compiler_->GetEntryPointOf(method);
} else {
// Direct pointers were required but none were available.
VLOG(compiler) << "Dex cache devirtualization failed for: " << PrettyMethod(method);
@@ -1864,7 +1864,7 @@
#if defined(__x86_64__)
// leaving this empty will trigger the generic JNI version
#else
- compiled_method = compiler_backend_->JniCompile(*this, access_flags, method_idx, dex_file);
+ compiled_method = compiler_->JniCompile(*this, access_flags, method_idx, dex_file);
CHECK(compiled_method != NULL);
#endif
} else if ((access_flags & kAccAbstract) != 0) {
@@ -1873,7 +1873,7 @@
bool compile = verification_results_->IsCandidateForCompilation(method_ref, access_flags);
if (compile) {
// NOTE: if compiler declines to compile this method, it will return NULL.
- compiled_method = compiler_backend_->Compile(
+ compiled_method = compiler_->Compile(
*this, code_item, access_flags, invoke_type, class_def_idx,
method_idx, class_loader, dex_file);
} else if (dex_to_dex_compilation_level != kDontDexToDexCompile) {
@@ -1885,7 +1885,7 @@
}
}
uint64_t duration_ns = NanoTime() - start_ns;
- if (duration_ns > MsToNs(compiler_backend_->GetMaximumCompilationTimeBeforeWarning())) {
+ if (duration_ns > MsToNs(compiler_->GetMaximumCompilationTimeBeforeWarning())) {
LOG(WARNING) << "Compilation of " << PrettyMethod(method_idx, dex_file)
<< " took " << PrettyDuration(duration_ns);
}
@@ -1972,7 +1972,7 @@
OatWriter* oat_writer,
art::File* file)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- return compiler_backend_->WriteElf(file, oat_writer, dex_files, android_root, is_host, *this);
+ return compiler_->WriteElf(file, oat_writer, dex_files, android_root, is_host, *this);
}
void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set,
std::string* target_triple,
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index 171be7d..71c431d 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -26,7 +26,7 @@
#include "class_reference.h"
#include "compiled_class.h"
#include "compiled_method.h"
-#include "compiler_backend.h"
+#include "compiler.h"
#include "dex_file.h"
#include "instruction_set.h"
#include "invoke_type.h"
@@ -99,7 +99,7 @@
explicit CompilerDriver(const CompilerOptions* compiler_options,
VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
- CompilerBackend::Kind compiler_backend_kind,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
bool image, DescriptorSet* image_classes,
@@ -137,8 +137,8 @@
return *compiler_options_;
}
- CompilerBackend* GetCompilerBackend() const {
- return compiler_backend_.get();
+ Compiler* GetCompiler() const {
+ return compiler_.get();
}
bool ProfilePresent() const {
@@ -701,7 +701,7 @@
VerificationResults* const verification_results_;
DexFileToMethodInlinerMap* const method_inliner_map_;
- UniquePtr<CompilerBackend> compiler_backend_;
+ UniquePtr<Compiler> compiler_;
const InstructionSet instruction_set_;
const InstructionSetFeatures instruction_set_features_;
diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc
index 4ce714a..2812700 100644
--- a/compiler/llvm/compiler_llvm.cc
+++ b/compiler/llvm/compiler_llvm.cc
@@ -39,7 +39,7 @@
namespace art {
void CompileOneMethod(CompilerDriver& driver,
- CompilerBackend* compilerBackend,
+ Compiler* compiler,
const DexFile::CodeItem* code_item,
uint32_t access_flags, InvokeType invoke_type,
uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
@@ -142,7 +142,7 @@
cunit->SetCompilerDriver(compiler_driver_);
// TODO: consolidate ArtCompileMethods
CompileOneMethod(*compiler_driver_,
- compiler_driver_->GetCompilerBackend(),
+ compiler_driver_->GetCompiler(),
dex_compilation_unit->GetCodeItem(),
dex_compilation_unit->GetAccessFlags(),
invoke_type,
diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc
index 93c3502..9cfef12 100644
--- a/compiler/oat_test.cc
+++ b/compiler/oat_test.cc
@@ -15,7 +15,7 @@
*/
#include "common_compiler_test.h"
-#include "compiler/compiler_backend.h"
+#include "compiler/compiler.h"
#include "compiler/oat_writer.h"
#include "mirror/art_method-inl.h"
#include "mirror/class-inl.h"
@@ -84,9 +84,9 @@
ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
// TODO: make selectable.
- CompilerBackend::Kind compiler_backend = kUsePortableCompiler
- ? CompilerBackend::kPortable
- : CompilerBackend::kQuick;
+ Compiler::Kind compiler_kind = kUsePortableCompiler
+ ? Compiler::kPortable
+ : Compiler::kQuick;
InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86;
InstructionSetFeatures insn_features;
@@ -99,7 +99,7 @@
compiler_driver_.reset(new CompilerDriver(compiler_options_.get(),
verification_results_.get(),
method_inliner_map_.get(),
- compiler_backend, insn_set,
+ compiler_kind, insn_set,
insn_features, false, NULL, 2, true, true,
timer_.get()));
jobject class_loader = NULL;
diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc
index ffd7b41..c5219a6 100644
--- a/compiler/oat_writer.cc
+++ b/compiler/oat_writer.cc
@@ -364,7 +364,7 @@
OatClass* oat_class = oat_classes_[oat_class_index];
CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index);
- if (compiled_method != NULL) {
+ if (compiled_method != nullptr) {
const std::vector<uint8_t>* portable_code = compiled_method->GetPortableCode();
const std::vector<uint8_t>* quick_code = compiled_method->GetQuickCode();
if (portable_code != nullptr) {
@@ -495,6 +495,33 @@
if (compiler_driver_->IsImage()) {
+ // Derive frame size and spill masks for native methods without code:
+ // These are generic JNI methods...
+ if (is_native && compiled_method == nullptr) {
+ // Compute Sirt size as putting _every_ reference into it, even null ones.
+ uint32_t s_len;
+ const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx), &s_len);
+ DCHECK(shorty != nullptr);
+ uint32_t refs = 1; // Native method always has "this" or class.
+ for (uint32_t i = 1; i < s_len; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+ size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(refs);
+
+ // Get the generic spill masks and base frame size.
+ mirror::ArtMethod* callee_save_method =
+ Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
+
+ frame_size_in_bytes = callee_save_method->GetFrameSizeInBytes() + sirt_size;
+ core_spill_mask = callee_save_method->GetCoreSpillMask();
+ fp_spill_mask = callee_save_method->GetFpSpillMask();
+ mapping_table_offset = 0;
+ vmap_table_offset = 0;
+ gc_map_offset = 0;
+ }
+
ClassLinker* linker = Runtime::Current()->GetClassLinker();
// Unchecked as we hold mutator_lock_ on entry.
ScopedObjectAccessUnchecked soa(Thread::Current());
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 190c925..8c6a8cb 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -28,19 +28,25 @@
for (int i = 0; i < count; i++) {
HLocal* local = new (arena_) HLocal(i);
entry_block_->AddInstruction(local);
- locals_.Put(0, local);
+ locals_.Put(i, local);
}
}
static bool CanHandleCodeItem(const DexFile::CodeItem& code_item) {
- if (code_item.tries_size_ > 0) return false;
- if (code_item.outs_size_ > 0) return false;
- if (code_item.ins_size_ > 0) return false;
+ if (code_item.tries_size_ > 0) {
+ return false;
+ } else if (code_item.outs_size_ > 0) {
+ return false;
+ } else if (code_item.ins_size_ > 0) {
+ return false;
+ }
return true;
}
HGraph* HGraphBuilder::BuildGraph(const DexFile::CodeItem& code_item) {
- if (!CanHandleCodeItem(code_item)) return nullptr;
+ if (!CanHandleCodeItem(code_item)) {
+ return nullptr;
+ }
const uint16_t* code_ptr = code_item.insns_;
const uint16_t* code_end = code_item.insns_ + code_item.insns_size_in_code_units_;
@@ -78,7 +84,9 @@
void HGraphBuilder::MaybeUpdateCurrentBlock(size_t index) {
HBasicBlock* block = FindBlockStartingAt(index);
- if (block == nullptr) return;
+ if (block == nullptr) {
+ return;
+ }
if (current_block_ != nullptr) {
// Branching instructions clear current_block, so we know
@@ -131,7 +139,9 @@
}
bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, int32_t dex_offset) {
- if (current_block_ == nullptr) return true; // Dead code
+ if (current_block_ == nullptr) {
+ return true; // Dead code
+ }
switch (instruction.Opcode()) {
case Instruction::CONST_4: {
@@ -140,11 +150,14 @@
UpdateLocal(register_index, constant);
break;
}
- case Instruction::RETURN_VOID:
+
+ case Instruction::RETURN_VOID: {
current_block_->AddInstruction(new (arena_) HReturnVoid());
current_block_->AddSuccessor(exit_block_);
current_block_ = nullptr;
break;
+ }
+
case Instruction::IF_EQ: {
HInstruction* first = LoadLocal(instruction.VRegA());
HInstruction* second = LoadLocal(instruction.VRegB());
@@ -159,6 +172,7 @@
current_block_ = nullptr;
break;
}
+
case Instruction::GOTO:
case Instruction::GOTO_16:
case Instruction::GOTO_32: {
@@ -169,8 +183,18 @@
current_block_ = nullptr;
break;
}
+
+ case Instruction::RETURN: {
+ HInstruction* value = LoadLocal(instruction.VRegA());
+ current_block_->AddInstruction(new (arena_) HReturn(value));
+ current_block_->AddSuccessor(exit_block_);
+ current_block_ = nullptr;
+ break;
+ }
+
case Instruction::NOP:
break;
+
default:
return false;
}
@@ -178,15 +202,27 @@
}
HIntConstant* HGraphBuilder::GetConstant0() {
- if (constant0_ != nullptr) return constant0_;
- HIntConstant* constant = new(arena_) HIntConstant(0);
- entry_block_->AddInstruction(constant);
- return constant;
+ if (constant0_ != nullptr) {
+ return constant0_;
+ }
+ constant0_ = new(arena_) HIntConstant(0);
+ entry_block_->AddInstruction(constant0_);
+ return constant0_;
+}
+
+HIntConstant* HGraphBuilder::GetConstant1() {
+ if (constant1_ != nullptr) {
+ return constant1_;
+ }
+ constant1_ = new(arena_) HIntConstant(1);
+ entry_block_->AddInstruction(constant1_);
+ return constant1_;
}
HIntConstant* HGraphBuilder::GetConstant(int constant) {
switch (constant) {
case 0: return GetConstant0();
+ case 1: return GetConstant1();
default: {
HIntConstant* instruction = new (arena_) HIntConstant(constant);
entry_block_->AddInstruction(instruction);
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 399dd63..fff83a1 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -41,7 +41,8 @@
exit_block_(nullptr),
current_block_(nullptr),
graph_(nullptr),
- constant0_(nullptr) { }
+ constant0_(nullptr),
+ constant1_(nullptr) { }
HGraph* BuildGraph(const DexFile::CodeItem& code);
@@ -58,6 +59,7 @@
HBasicBlock* FindBlockStartingAt(int32_t index) const;
HIntConstant* GetConstant0();
+ HIntConstant* GetConstant1();
HIntConstant* GetConstant(int constant);
void InitializeLocals(int count);
HLocal* GetLocalAt(int register_index) const;
@@ -79,6 +81,7 @@
HGraph* graph_;
HIntConstant* constant0_;
+ HIntConstant* constant1_;
DISALLOW_COPY_AND_ASSIGN(HGraphBuilder);
};
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 01fc23b..56342aa 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -26,9 +26,11 @@
namespace art {
void CodeGenerator::Compile(CodeAllocator* allocator) {
- GenerateFrameEntry();
const GrowableArray<HBasicBlock*>* blocks = graph()->blocks();
- for (size_t i = 0; i < blocks->Size(); i++) {
+ DCHECK(blocks->Get(0) == graph()->entry_block());
+ DCHECK(GoesToNextBlock(graph()->entry_block(), blocks->Get(1)));
+ CompileEntryBlock();
+ for (size_t i = 1; i < blocks->Size(); i++) {
CompileBlock(blocks->Get(i));
}
size_t code_size = assembler_->CodeSize();
@@ -37,17 +39,54 @@
assembler_->FinalizeInstructions(code);
}
+void CodeGenerator::CompileEntryBlock() {
+ HGraphVisitor* location_builder = GetLocationBuilder();
+ // The entry block contains all locals for this method. By visiting the entry block,
+ // we're computing the required frame size.
+ for (HInstructionIterator it(graph()->entry_block()); !it.Done(); it.Advance()) {
+ HInstruction* current = it.Current();
+ // Instructions in the entry block should not generate code.
+ if (kIsDebugBuild) {
+ current->Accept(location_builder);
+ DCHECK(current->locations() == nullptr);
+ }
+ current->Accept(this);
+ }
+ GenerateFrameEntry();
+}
+
void CodeGenerator::CompileBlock(HBasicBlock* block) {
Bind(GetLabelOf(block));
+ HGraphVisitor* location_builder = GetLocationBuilder();
for (HInstructionIterator it(block); !it.Done(); it.Advance()) {
- it.Current()->Accept(this);
+ // For each instruction, we emulate a stack-based machine, where the inputs are popped from
+ // the runtime stack, and the result is pushed on the stack. We currently can do this because
+ // we do not perform any code motion, and the Dex format does not reference individual
+ // instructions but uses registers instead (our equivalent of HLocal).
+ HInstruction* current = it.Current();
+ current->Accept(location_builder);
+ InitLocations(current);
+ current->Accept(this);
+ if (current->locations() != nullptr && current->locations()->Out().IsValid()) {
+ Push(current, current->locations()->Out());
+ }
}
}
-bool CodeGenerator::GoesToNextBlock(HGoto* goto_instruction) const {
- HBasicBlock* successor = goto_instruction->GetSuccessor();
+void CodeGenerator::InitLocations(HInstruction* instruction) {
+ if (instruction->locations() == nullptr) return;
+ for (int i = 0; i < instruction->InputCount(); i++) {
+ Location location = instruction->locations()->InAt(i);
+ if (location.IsValid()) {
+ // Move the input to the desired location.
+ Move(instruction->InputAt(i), location);
+ }
+ }
+}
+
+bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
// We currently iterate over the block in insertion order.
- return goto_instruction->block()->block_id() + 1 == successor->block_id();
+ return current->block_id() + 1 == next->block_id();
}
Label* CodeGenerator::GetLabelOf(HBasicBlock* block) const {
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 2a5ae7d..c406378 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
#define ART_COMPILER_OPTIMIZING_CODE_GENERATOR_H_
+#include "globals.h"
#include "instruction_set.h"
#include "memory_region.h"
#include "nodes.h"
@@ -35,12 +36,82 @@
DISALLOW_COPY_AND_ASSIGN(CodeAllocator);
};
+/**
+ * A Location is an abstraction over the potential location
+ * of an instruction. It could be in register or stack.
+ */
+class Location : public ValueObject {
+ public:
+ template<typename T>
+ T reg() const { return static_cast<T>(reg_); }
+
+ Location() : reg_(kInvalid) { }
+ explicit Location(uword reg) : reg_(reg) { }
+
+ static Location RegisterLocation(uword reg) {
+ return Location(reg);
+ }
+
+ bool IsValid() const { return reg_ != kInvalid; }
+
+ Location(const Location& other) : reg_(other.reg_) { }
+
+ Location& operator=(const Location& other) {
+ reg_ = other.reg_;
+ return *this;
+ }
+
+ private:
+ // The target register for that location.
+ // TODO: Support stack location.
+ uword reg_;
+ static const uword kInvalid = -1;
+};
+
+/**
+ * The code generator computes LocationSummary for each instruction so that
+ * the instruction itself knows what code to generate: where to find the inputs
+ * and where to place the result.
+ *
+ * The intent is to have the code for generating the instruction independent of
+ * register allocation. A register allocator just has to provide a LocationSummary.
+ */
+class LocationSummary : public ArenaObject {
+ public:
+ explicit LocationSummary(HInstruction* instruction)
+ : inputs(instruction->block()->graph()->arena(), instruction->InputCount()) {
+ inputs.SetSize(instruction->InputCount());
+ for (int i = 0; i < instruction->InputCount(); i++) {
+ inputs.Put(i, Location());
+ }
+ }
+
+ void SetInAt(uint32_t at, Location location) {
+ inputs.Put(at, location);
+ }
+
+ Location InAt(uint32_t at) const {
+ return inputs.Get(at);
+ }
+
+ void SetOut(Location location) {
+ output = Location(location);
+ }
+
+ Location Out() const { return output; }
+
+ private:
+ GrowableArray<Location> inputs;
+ Location output;
+
+ DISALLOW_COPY_AND_ASSIGN(LocationSummary);
+};
+
class CodeGenerator : public HGraphVisitor {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
- static bool CompileGraph(
- HGraph* graph, InstructionSet instruction_set, CodeAllocator* allocator);
+ static bool CompileGraph(HGraph* graph, InstructionSet instruction_set, CodeAllocator* allocator);
Assembler* assembler() const { return assembler_; }
@@ -54,20 +125,31 @@
protected:
CodeGenerator(Assembler* assembler, HGraph* graph)
- : HGraphVisitor(graph), assembler_(assembler), block_labels_(graph->arena(), 0) {
+ : HGraphVisitor(graph),
+ frame_size_(0),
+ assembler_(assembler),
+ block_labels_(graph->arena(), 0) {
block_labels_.SetSize(graph->blocks()->Size());
}
Label* GetLabelOf(HBasicBlock* block) const;
- bool GoesToNextBlock(HGoto* got) const;
+ bool GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const;
- private:
+ // Frame size required for this method.
+ uint32_t frame_size_;
+
virtual void GenerateFrameEntry() = 0;
virtual void GenerateFrameExit() = 0;
virtual void Bind(Label* label) = 0;
+ virtual void Move(HInstruction* instruction, Location location) = 0;
+ virtual void Push(HInstruction* instruction, Location location) = 0;
+ virtual HGraphVisitor* GetLocationBuilder() = 0;
+ private:
+ void InitLocations(HInstruction* instruction);
void Compile(CodeAllocator* allocator);
void CompileBlock(HBasicBlock* block);
+ void CompileEntryBlock();
Assembler* const assembler_;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 356e909..62bf7ba 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -24,28 +24,52 @@
namespace arm {
void CodeGeneratorARM::GenerateFrameEntry() {
- RegList registers = (1 << LR) | (1 << FP);
- __ PushList(registers);
+ __ PushList((1 << FP) | (1 << LR));
+ __ mov(FP, ShifterOperand(SP));
+ if (frame_size_ != 0) {
+ __ AddConstant(SP, -frame_size_);
+ }
}
void CodeGeneratorARM::GenerateFrameExit() {
- RegList registers = (1 << PC) | (1 << FP);
- __ PopList(registers);
+ __ mov(SP, ShifterOperand(FP));
+ __ PopList((1 << FP) | (1 << PC));
}
void CodeGeneratorARM::Bind(Label* label) {
__ Bind(label);
}
+void CodeGeneratorARM::Push(HInstruction* instruction, Location location) {
+ __ Push(location.reg<Register>());
+}
+
+void CodeGeneratorARM::Move(HInstruction* instruction, Location location) {
+ HIntConstant* constant = instruction->AsIntConstant();
+ if (constant != nullptr) {
+ __ LoadImmediate(location.reg<Register>(), constant->value());
+ } else {
+ __ Pop(location.reg<Register>());
+ }
+}
+
+void LocationsBuilderARM::VisitGoto(HGoto* got) {
+ got->set_locations(nullptr);
+}
+
void CodeGeneratorARM::VisitGoto(HGoto* got) {
HBasicBlock* successor = got->GetSuccessor();
if (graph()->exit_block() == successor) {
GenerateFrameExit();
- } else if (!GoesToNextBlock(got)) {
+ } else if (!GoesToNextBlock(got->block(), successor)) {
__ b(GetLabelOf(successor));
}
}
+void LocationsBuilderARM::VisitExit(HExit* exit) {
+ exit->set_locations(nullptr);
+}
+
void CodeGeneratorARM::VisitExit(HExit* exit) {
if (kIsDebugBuild) {
__ Comment("Unreachable");
@@ -53,33 +77,101 @@
}
}
+void LocationsBuilderARM::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(if_instr);
+ locations->SetInAt(0, Location(R0));
+ if_instr->set_locations(locations);
+}
+
void CodeGeneratorARM::VisitIf(HIf* if_instr) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // TODO: Generate the input as a condition, instead of materializing in a register.
+ __ cmp(if_instr->locations()->InAt(0).reg<Register>(), ShifterOperand(0));
+ __ b(GetLabelOf(if_instr->IfFalseSuccessor()), EQ);
+ if (!GoesToNextBlock(if_instr->block(), if_instr->IfTrueSuccessor())) {
+ __ b(GetLabelOf(if_instr->IfTrueSuccessor()));
+ }
+}
+
+void LocationsBuilderARM::VisitEqual(HEqual* equal) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(equal);
+ locations->SetInAt(0, Location(R0));
+ locations->SetInAt(1, Location(R1));
+ locations->SetOut(Location(R0));
+ equal->set_locations(locations);
}
void CodeGeneratorARM::VisitEqual(HEqual* equal) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ LocationSummary* locations = equal->locations();
+ __ teq(locations->InAt(0).reg<Register>(),
+ ShifterOperand(locations->InAt(1).reg<Register>()));
+ __ mov(locations->Out().reg<Register>(), ShifterOperand(1), EQ);
+ __ mov(locations->Out().reg<Register>(), ShifterOperand(0), NE);
+}
+
+void LocationsBuilderARM::VisitLocal(HLocal* local) {
+ local->set_locations(nullptr);
}
void CodeGeneratorARM::VisitLocal(HLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ DCHECK_EQ(local->block(), graph()->entry_block());
+ frame_size_ += kWordSize;
}
-void CodeGeneratorARM::VisitLoadLocal(HLoadLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(load);
+ locations->SetOut(Location(R0));
+ load->set_locations(locations);
}
-void CodeGeneratorARM::VisitStoreLocal(HStoreLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+static int32_t GetStackSlot(HLocal* local) {
+ // We are currently using FP to access locals, so the offset must be negative.
+ return (local->reg_number() + 1) * -kWordSize;
+}
+
+void CodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
+ LocationSummary* locations = load->locations();
+ __ LoadFromOffset(kLoadWord, locations->Out().reg<Register>(),
+ FP, GetStackSlot(load->GetLocal()));
+}
+
+void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(store);
+ locations->SetInAt(1, Location(R0));
+ store->set_locations(locations);
+}
+
+void CodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
+ LocationSummary* locations = store->locations();
+ __ StoreToOffset(kStoreWord, locations->InAt(1).reg<Register>(),
+ FP, GetStackSlot(store->GetLocal()));
+}
+
+void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
+ constant->set_locations(nullptr);
}
void CodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // Will be generated at use site.
+}
+
+void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
+ ret->set_locations(nullptr);
}
void CodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
GenerateFrameExit();
}
+void LocationsBuilderARM::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(ret);
+ locations->SetInAt(0, Location(R0));
+ ret->set_locations(locations);
+}
+
+void CodeGeneratorARM::VisitReturn(HReturn* ret) {
+ DCHECK_EQ(ret->locations()->InAt(0).reg<Register>(), R0);
+ GenerateFrameExit();
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 27a83b8..33d8e62 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -27,10 +27,25 @@
namespace arm {
+class LocationsBuilderARM : public HGraphVisitor {
+ public:
+ explicit LocationsBuilderARM(HGraph* graph) : HGraphVisitor(graph) { }
+
+#define DECLARE_VISIT_INSTRUCTION(name) \
+ virtual void Visit##name(H##name* instr);
+
+ FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderARM);
+};
+
class CodeGeneratorARM : public CodeGenerator {
public:
CodeGeneratorARM(Assembler* assembler, HGraph* graph)
- : CodeGenerator(assembler, graph) { }
+ : CodeGenerator(assembler, graph), location_builder_(graph) { }
// Visit functions for instruction classes.
#define DECLARE_VISIT_INSTRUCTION(name) \
@@ -40,10 +55,19 @@
#undef DECLARE_VISIT_INSTRUCTION
+ protected:
+ virtual void GenerateFrameEntry() OVERRIDE;
+ virtual void GenerateFrameExit() OVERRIDE;
+ virtual void Bind(Label* label) OVERRIDE;
+ virtual void Move(HInstruction* instruction, Location location) OVERRIDE;
+ virtual void Push(HInstruction* instruction, Location location) OVERRIDE;
+
+ virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ return &location_builder_;
+ }
+
private:
- virtual void GenerateFrameEntry();
- virtual void GenerateFrameExit();
- virtual void Bind(Label* label);
+ LocationsBuilderARM location_builder_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM);
};
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ab34599..81ada4d 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -26,6 +26,10 @@
void CodeGeneratorX86::GenerateFrameEntry() {
__ pushl(EBP);
__ movl(EBP, ESP);
+
+ if (frame_size_ != 0) {
+ __ subl(ESP, Immediate(frame_size_));
+ }
}
void CodeGeneratorX86::GenerateFrameExit() {
@@ -37,15 +41,36 @@
__ Bind(label);
}
+void CodeGeneratorX86::Push(HInstruction* instruction, Location location) {
+ __ pushl(location.reg<Register>());
+}
+
+void CodeGeneratorX86::Move(HInstruction* instruction, Location location) {
+ HIntConstant* constant = instruction->AsIntConstant();
+ if (constant != nullptr) {
+ __ movl(location.reg<Register>(), Immediate(constant->value()));
+ } else {
+ __ popl(location.reg<Register>());
+ }
+}
+
+void LocationsBuilderX86::VisitGoto(HGoto* got) {
+ got->set_locations(nullptr);
+}
+
void CodeGeneratorX86::VisitGoto(HGoto* got) {
HBasicBlock* successor = got->GetSuccessor();
if (graph()->exit_block() == successor) {
GenerateFrameExit();
- } else if (!GoesToNextBlock(got)) {
+ } else if (!GoesToNextBlock(got->block(), successor)) {
__ jmp(GetLabelOf(successor));
}
}
+void LocationsBuilderX86::VisitExit(HExit* exit) {
+ exit->set_locations(nullptr);
+}
+
void CodeGeneratorX86::VisitExit(HExit* exit) {
if (kIsDebugBuild) {
__ Comment("Unreachable");
@@ -53,28 +78,81 @@
}
}
+void LocationsBuilderX86::VisitIf(HIf* if_instr) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(if_instr);
+ locations->SetInAt(0, Location(EAX));
+ if_instr->set_locations(locations);
+}
+
void CodeGeneratorX86::VisitIf(HIf* if_instr) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // TODO: Generate the input as a condition, instead of materializing in a register.
+ __ cmpl(if_instr->locations()->InAt(0).reg<Register>(), Immediate(0));
+ __ j(kEqual, GetLabelOf(if_instr->IfFalseSuccessor()));
+ if (!GoesToNextBlock(if_instr->block(), if_instr->IfTrueSuccessor())) {
+ __ jmp(GetLabelOf(if_instr->IfTrueSuccessor()));
+ }
+}
+
+void LocationsBuilderX86::VisitLocal(HLocal* local) {
+ local->set_locations(nullptr);
}
void CodeGeneratorX86::VisitLocal(HLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ DCHECK_EQ(local->block(), graph()->entry_block());
+ frame_size_ += kWordSize;
}
-void CodeGeneratorX86::VisitLoadLocal(HLoadLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(local);
+ locations->SetOut(Location(EAX));
+ local->set_locations(locations);
}
-void CodeGeneratorX86::VisitStoreLocal(HStoreLocal* local) {
- LOG(FATAL) << "UNIMPLEMENTED";
+static int32_t GetStackSlot(HLocal* local) {
+ // We are currently using EBP to access locals, so the offset must be negative.
+ return (local->reg_number() + 1) * -kWordSize;
+}
+
+void CodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
+ __ movl(load->locations()->Out().reg<Register>(),
+ Address(EBP, GetStackSlot(load->GetLocal())));
+}
+
+void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* local) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(local);
+ locations->SetInAt(1, Location(EAX));
+ local->set_locations(locations);
+}
+
+void CodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
+ __ movl(Address(EBP, GetStackSlot(store->GetLocal())),
+ store->locations()->InAt(1).reg<Register>());
+}
+
+void LocationsBuilderX86::VisitEqual(HEqual* equal) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(equal);
+ locations->SetInAt(0, Location(EAX));
+ locations->SetInAt(1, Location(ECX));
+ locations->SetOut(Location(EAX));
+ equal->set_locations(locations);
}
void CodeGeneratorX86::VisitEqual(HEqual* equal) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ __ cmpl(equal->locations()->InAt(0).reg<Register>(),
+ equal->locations()->InAt(1).reg<Register>());
+ __ setb(kEqual, equal->locations()->Out().reg<Register>());
+}
+
+void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
+ constant->set_locations(nullptr);
}
void CodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
- LOG(FATAL) << "UNIMPLEMENTED";
+ // Will be generated at use site.
+}
+
+void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
+ ret->set_locations(nullptr);
}
void CodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -82,5 +160,17 @@
__ ret();
}
+void LocationsBuilderX86::VisitReturn(HReturn* ret) {
+ LocationSummary* locations = new (graph()->arena()) LocationSummary(ret);
+ locations->SetInAt(0, Location(EAX));
+ ret->set_locations(locations);
+}
+
+void CodeGeneratorX86::VisitReturn(HReturn* ret) {
+ DCHECK_EQ(ret->locations()->InAt(0).reg<Register>(), EAX);
+ GenerateFrameExit();
+ __ ret();
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index 7dae2ab..dd146b8 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -27,12 +27,10 @@
namespace x86 {
-class CodeGeneratorX86 : public CodeGenerator {
+class LocationsBuilderX86 : public HGraphVisitor {
public:
- CodeGeneratorX86(Assembler* assembler, HGraph* graph)
- : CodeGenerator(assembler, graph) { }
+ explicit LocationsBuilderX86(HGraph* graph) : HGraphVisitor(graph) { }
- // Visit functions for instruction classes.
#define DECLARE_VISIT_INSTRUCTION(name) \
virtual void Visit##name(H##name* instr);
@@ -41,9 +39,34 @@
#undef DECLARE_VISIT_INSTRUCTION
private:
- virtual void GenerateFrameEntry();
- virtual void GenerateFrameExit();
- virtual void Bind(Label* label);
+ DISALLOW_COPY_AND_ASSIGN(LocationsBuilderX86);
+};
+
+class CodeGeneratorX86 : public CodeGenerator {
+ public:
+ CodeGeneratorX86(Assembler* assembler, HGraph* graph)
+ : CodeGenerator(assembler, graph), location_builder_(graph) { }
+
+#define DECLARE_VISIT_INSTRUCTION(name) \
+ virtual void Visit##name(H##name* instr);
+
+ FOR_EACH_INSTRUCTION(DECLARE_VISIT_INSTRUCTION)
+
+#undef DECLARE_VISIT_INSTRUCTION
+
+ protected:
+ virtual void GenerateFrameEntry() OVERRIDE;
+ virtual void GenerateFrameExit() OVERRIDE;
+ virtual void Bind(Label* label) OVERRIDE;
+ virtual void Move(HInstruction* instruction, Location location) OVERRIDE;
+ virtual void Push(HInstruction* instruction, Location location) OVERRIDE;
+
+ virtual HGraphVisitor* GetLocationBuilder() OVERRIDE {
+ return &location_builder_;
+ }
+
+ private:
+ LocationsBuilderX86 location_builder_;
DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86);
};
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 6d4588d..5020dd0 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -45,7 +45,7 @@
DISALLOW_COPY_AND_ASSIGN(ExecutableMemoryAllocator);
};
-static void TestCode(const uint16_t* data) {
+static void TestCode(const uint16_t* data, bool has_result = false, int32_t expected = 0) {
ArenaPool pool;
ArenaAllocator arena(&pool);
HGraphBuilder builder(&arena);
@@ -54,13 +54,19 @@
ASSERT_NE(graph, nullptr);
ExecutableMemoryAllocator allocator;
CHECK(CodeGenerator::CompileGraph(graph, kX86, &allocator));
- typedef void (*fptr)();
+ typedef int32_t (*fptr)();
#if defined(__i386__)
- reinterpret_cast<fptr>(allocator.memory())();
+ int32_t result = reinterpret_cast<fptr>(allocator.memory())();
+ if (has_result) {
+ CHECK_EQ(result, expected);
+ }
#endif
CHECK(CodeGenerator::CompileGraph(graph, kArm, &allocator));
#if defined(__arm__)
- reinterpret_cast<fptr>(allocator.memory())();
+ int32_t result = reinterpret_cast<fptr>(allocator.memory())();
+ if (has_result) {
+ CHECK_EQ(result, expected);
+ }
#endif
}
@@ -69,7 +75,7 @@
TestCode(data);
}
-TEST(PrettyPrinterTest, CFG1) {
+TEST(CodegenTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
@@ -77,7 +83,7 @@
TestCode(data);
}
-TEST(PrettyPrinterTest, CFG2) {
+TEST(CodegenTest, CFG2) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
@@ -86,7 +92,7 @@
TestCode(data);
}
-TEST(PrettyPrinterTest, CFG3) {
+TEST(CodegenTest, CFG3) {
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
@@ -109,7 +115,7 @@
TestCode(data3);
}
-TEST(PrettyPrinterTest, CFG4) {
+TEST(CodegenTest, CFG4) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
@@ -118,4 +124,70 @@
TestCode(data);
}
+TEST(CodegenTest, CFG5) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::IF_EQ, 3,
+ Instruction::GOTO | 0x100,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, IntConstant) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::RETURN_VOID);
+
+ TestCode(data);
+}
+
+TEST(CodegenTest, Return1) {
+ const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::RETURN | 0);
+
+ TestCode(data, true, 0);
+}
+
+TEST(CodegenTest, Return2) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 0 | 1 << 8,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 0);
+}
+
+TEST(CodegenTest, Return3) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 1 << 8 | 1 << 12,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 1);
+}
+
+TEST(CodegenTest, ReturnIf1) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 1 << 8 | 1 << 12,
+ Instruction::IF_EQ, 3,
+ Instruction::RETURN | 0 << 8,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 1);
+}
+
+TEST(CodegenTest, ReturnIf2) {
+ const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+ Instruction::CONST_4 | 0 | 0,
+ Instruction::CONST_4 | 1 << 8 | 1 << 12,
+ Instruction::IF_EQ | 0 << 4 | 1 << 8, 3,
+ Instruction::RETURN | 0 << 8,
+ Instruction::RETURN | 1 << 8);
+
+ TestCode(data, true, 0);
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 16dfb94..a6f3f5a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -121,6 +121,7 @@
}
void HBasicBlock::AddInstruction(HInstruction* instruction) {
+ DCHECK(instruction->block() == nullptr);
instruction->set_block(this);
instruction->set_id(graph()->GetNextInstructionId());
if (first_instruction_ == nullptr) {
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index bb08bd0..9418599 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -27,6 +27,7 @@
class HInstruction;
class HIntConstant;
class HGraphVisitor;
+class LocationSummary;
static const int kDefaultNumberOfBlocks = 8;
static const int kDefaultNumberOfSuccessors = 2;
@@ -186,12 +187,18 @@
M(IntConstant) \
M(LoadLocal) \
M(Local) \
+ M(Return) \
M(ReturnVoid) \
M(StoreLocal) \
+#define FORWARD_DECLARATION(type) class H##type;
+FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
+#undef FORWARD_DECLARATION
+
#define DECLARE_INSTRUCTION(type) \
virtual void Accept(HGraphVisitor* visitor); \
virtual const char* DebugName() const { return #type; } \
+ virtual H##type* As##type() { return this; } \
class HUseListNode : public ArenaObject {
public:
@@ -210,7 +217,14 @@
class HInstruction : public ArenaObject {
public:
- HInstruction() : previous_(nullptr), next_(nullptr), block_(nullptr), id_(-1), uses_(nullptr) { }
+ HInstruction()
+ : previous_(nullptr),
+ next_(nullptr),
+ block_(nullptr),
+ id_(-1),
+ uses_(nullptr),
+ locations_(nullptr) { }
+
virtual ~HInstruction() { }
HInstruction* next() const { return next_; }
@@ -236,6 +250,15 @@
int id() const { return id_; }
void set_id(int id) { id_ = id; }
+ LocationSummary* locations() const { return locations_; }
+ void set_locations(LocationSummary* locations) { locations_ = locations; }
+
+#define INSTRUCTION_TYPE_CHECK(type) \
+ virtual H##type* As##type() { return nullptr; }
+
+ FOR_EACH_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
+#undef INSTRUCTION_TYPE_CHECK
+
private:
HInstruction* previous_;
HInstruction* next_;
@@ -248,6 +271,9 @@
HUseListNode* uses_;
+ // Set by the code generator.
+ LocationSummary* locations_;
+
friend class HBasicBlock;
DISALLOW_COPY_AND_ASSIGN(HInstruction);
@@ -386,6 +412,20 @@
DISALLOW_COPY_AND_ASSIGN(HReturnVoid);
};
+// Represents dex's RETURN opcodes. A HReturn is a control flow
+// instruction that branches to the exit block.
+class HReturn : public HTemplateInstruction<1> {
+ public:
+ explicit HReturn(HInstruction* value) {
+ SetRawInputAt(0, value);
+ }
+
+ DECLARE_INSTRUCTION(Return)
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HReturn);
+};
+
// The exit instruction is the only instruction of the exit block.
// Instructions aborting the method (HTrow and HReturn) must branch to the
// exit block.
@@ -422,6 +462,14 @@
SetRawInputAt(0, input);
}
+ HBasicBlock* IfTrueSuccessor() const {
+ return block()->successors()->Get(0);
+ }
+
+ HBasicBlock* IfFalseSuccessor() const {
+ return block()->successors()->Get(1);
+ }
+
DECLARE_INSTRUCTION(If)
private:
@@ -449,9 +497,11 @@
DECLARE_INSTRUCTION(Local)
+ uint16_t reg_number() const { return reg_number_; }
+
private:
- // The register number in Dex.
- uint16_t reg_number_;
+ // The Dex register number.
+ const uint16_t reg_number_;
DISALLOW_COPY_AND_ASSIGN(HLocal);
};
@@ -463,6 +513,8 @@
SetRawInputAt(0, local);
}
+ HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
+
DECLARE_INSTRUCTION(LoadLocal)
private:
@@ -478,6 +530,8 @@
SetRawInputAt(1, value);
}
+ HLocal* GetLocal() const { return reinterpret_cast<HLocal*>(InputAt(0)); }
+
DECLARE_INSTRUCTION(StoreLocal)
private:
@@ -490,6 +544,8 @@
public:
explicit HIntConstant(int32_t value) : value_(value) { }
+ int32_t value() const { return value_; }
+
DECLARE_INSTRUCTION(IntConstant)
private:
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
new file mode 100644
index 0000000..73323a4
--- /dev/null
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compilers.h"
+
+namespace art {
+
+CompiledMethod* OptimizingCompiler::TryCompile(CompilerDriver& driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ jobject class_loader,
+ const DexFile& dex_file) const {
+ return nullptr;
+}
+
+} // namespace art
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index bf13a41..67c4850 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -26,4 +26,7 @@
#define ONE_REGISTER_CODE_ITEM(...) \
{ 1, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+#define TWO_REGISTERS_CODE_ITEM(...) \
+ { 2, 0, 0, 0, 0, 0, NUM_INSTRUCTIONS(__VA_ARGS__), 0, __VA_ARGS__ }
+
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZING_UNIT_TEST_H_
diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc
index cc78816..b7d8d00 100644
--- a/dex2oat/dex2oat.cc
+++ b/dex2oat/dex2oat.cc
@@ -30,7 +30,7 @@
#include "base/timing_logger.h"
#include "base/unix_file/fd_file.h"
#include "class_linker.h"
-#include "compiler_backend.h"
+#include "compiler.h"
#include "compiler_callbacks.h"
#include "dex_file-inl.h"
#include "dex/verification_results.h"
@@ -147,7 +147,7 @@
UsageError(" Example: --instruction-set-features=div");
UsageError(" Default: default");
UsageError("");
- UsageError(" --compiler-backend=(Quick|QuickGBC|Portable): select compiler backend");
+ UsageError(" --compiler-backend=(Quick|Optimizing|Portable): select compiler backend");
UsageError(" set.");
UsageError(" Example: --compiler-backend=Portable");
UsageError(" Default: Quick");
@@ -212,7 +212,7 @@
static bool Create(Dex2Oat** p_dex2oat,
const Runtime::Options& runtime_options,
const CompilerOptions& compiler_options,
- CompilerBackend::Kind compiler_backend,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
VerificationResults* verification_results,
@@ -222,7 +222,7 @@
CHECK(verification_results != nullptr);
CHECK(method_inliner_map != nullptr);
UniquePtr<Dex2Oat> dex2oat(new Dex2Oat(&compiler_options,
- compiler_backend,
+ compiler_kind,
instruction_set,
instruction_set_features,
verification_results,
@@ -335,7 +335,7 @@
UniquePtr<CompilerDriver> driver(new CompilerDriver(compiler_options_,
verification_results_,
method_inliner_map_,
- compiler_backend_,
+ compiler_kind_,
instruction_set_,
instruction_set_features_,
image,
@@ -346,7 +346,7 @@
&compiler_phases_timings,
profile_file));
- driver->GetCompilerBackend()->SetBitcodeFileName(*driver.get(), bitcode_filename);
+ driver->GetCompiler()->SetBitcodeFileName(*driver.get(), bitcode_filename);
driver->CompileAll(class_loader, dex_files, &timings);
@@ -410,14 +410,14 @@
private:
explicit Dex2Oat(const CompilerOptions* compiler_options,
- CompilerBackend::Kind compiler_backend,
+ Compiler::Kind compiler_kind,
InstructionSet instruction_set,
InstructionSetFeatures instruction_set_features,
VerificationResults* verification_results,
DexFileToMethodInlinerMap* method_inliner_map,
size_t thread_count)
: compiler_options_(compiler_options),
- compiler_backend_(compiler_backend),
+ compiler_kind_(compiler_kind),
instruction_set_(instruction_set),
instruction_set_features_(instruction_set_features),
verification_results_(verification_results),
@@ -482,7 +482,7 @@
}
const CompilerOptions* const compiler_options_;
- const CompilerBackend::Kind compiler_backend_;
+ const Compiler::Kind compiler_kind_;
const InstructionSet instruction_set_;
const InstructionSetFeatures instruction_set_features_;
@@ -722,9 +722,9 @@
std::string android_root;
std::vector<const char*> runtime_args;
int thread_count = sysconf(_SC_NPROCESSORS_CONF);
- CompilerBackend::Kind compiler_backend = kUsePortableCompiler
- ? CompilerBackend::kPortable
- : CompilerBackend::kQuick;
+ Compiler::Kind compiler_kind = kUsePortableCompiler
+ ? Compiler::kPortable
+ : Compiler::kQuick;
const char* compiler_filter_string = NULL;
int huge_method_threshold = CompilerOptions::kDefaultHugeMethodThreshold;
int large_method_threshold = CompilerOptions::kDefaultLargeMethodThreshold;
@@ -738,8 +738,12 @@
#if defined(__arm__)
InstructionSet instruction_set = kThumb2;
+#elif defined(__aarch64__)
+ InstructionSet instruction_set = kArm64;
#elif defined(__i386__)
InstructionSet instruction_set = kX86;
+#elif defined(__x86_64__)
+ InstructionSet instruction_set = kX86_64;
#elif defined(__mips__)
InstructionSet instruction_set = kMips;
#else
@@ -840,9 +844,11 @@
} else if (option.starts_with("--compiler-backend=")) {
StringPiece backend_str = option.substr(strlen("--compiler-backend=")).data();
if (backend_str == "Quick") {
- compiler_backend = CompilerBackend::kQuick;
+ compiler_kind = Compiler::kQuick;
+ } else if (backend_str == "Optimizing") {
+ compiler_kind = Compiler::kOptimizing;
} else if (backend_str == "Portable") {
- compiler_backend = CompilerBackend::kPortable;
+ compiler_kind = Compiler::kPortable;
}
} else if (option.starts_with("--compiler-filter=")) {
compiler_filter_string = option.substr(strlen("--compiler-filter=")).data();
@@ -1097,7 +1103,7 @@
if (!Dex2Oat::Create(&p_dex2oat,
runtime_options,
compiler_options,
- compiler_backend,
+ compiler_kind,
instruction_set,
instruction_set_features,
&verification_results,
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index ca3516e..5fbf8cb 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -646,7 +646,43 @@
UNIMPLEMENTED art_quick_get32_static
UNIMPLEMENTED art_quick_get64_static
UNIMPLEMENTED art_quick_get_obj_static
-UNIMPLEMENTED art_quick_proxy_invoke_handler
+
+DEFINE_FUNCTION art_quick_proxy_invoke_handler
+ // Save callee and GPR args, mixed together to agree with core spills bitmap of ref. and args
+ // callee save frame.
+ PUSH r15 // Callee save.
+ PUSH r14 // Callee save.
+ PUSH r13 // Callee save.
+ PUSH r12 // Callee save.
+ PUSH r9 // Quick arg 5.
+ PUSH r8 // Quick arg 4.
+ PUSH rsi // Quick arg 1.
+ PUSH rbp // Callee save.
+ PUSH rbx // Callee save.
+ PUSH rdx // Quick arg 2.
+ PUSH rcx // Quick arg 3.
+ // Create space for FPR args and create 2 slots, 1 of padding and 1 for the ArtMethod*.
+ subq LITERAL(80), %rsp
+ CFI_ADJUST_CFA_OFFSET(80)
+ // Save FPRs.
+ movq %xmm0, 16(%rsp)
+ movq %xmm1, 24(%rsp)
+ movq %xmm2, 32(%rsp)
+ movq %xmm3, 40(%rsp)
+ movq %xmm4, 48(%rsp)
+ movq %xmm5, 56(%rsp)
+ movq %xmm6, 64(%rsp)
+ movq %xmm7, 72(%rsp)
+ // Store proxy method to bottom of stack.
+ movq %rdi, 0(%rsp)
+ movq %gs:THREAD_SELF_OFFSET, %rdx // Pass Thread::Current().
+ movq %rsp, %rcx // Pass SP.
+ call PLT_SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
+ movq %rax, %xmm0 // Copy return value in case of float returns.
+ addq LITERAL(168), %rsp // Pop arguments.
+ CFI_ADJUST_CFA_OFFSET(-168)
+ RETURN_OR_DELIVER_PENDING_EXCEPTION
+END_FUNCTION art_quick_proxy_invoke_handler
/*
* Called to resolve an imt conflict.
@@ -675,6 +711,13 @@
* | |
* | caller method... |
* #-------------------# <--- SP on entry
+ *
+ * |
+ * V
+ *
+ * #-------------------#
+ * | caller method... |
+ * #-------------------#
* | Return |
* | R15 | callee save
* | R14 | callee save
@@ -698,22 +741,7 @@
* | Padding |
* | RDI/Method* | <- sp
* #-------------------#
- * | local ref cookie | // 4B
- * | padding | // 4B
- * #----------#--------#
- * | | | |
- * | Temp/ | SIRT | | Scratch frame is 4k
- * | Scratch | v |
- * | Frame #--------|
- * | |
- * | #--------|
- * | | ^ |
- * | | JNI | |
- * | | Stack| |
- * #----------#--------# <--- SP on native call (needs alignment?)
- * | |
- * | Stack for Regs | The trampoline assembly will pop these values
- * | | into registers for native call
+ * | Scratch Alloca | 5K scratch space
* #---------#---------#
* | | sp* |
* | Tramp. #---------#
@@ -721,6 +749,35 @@
* | Tramp. #---------#
* | | method |
* #-------------------# <--- SP on artQuickGenericJniTrampoline
+ *
+ * |
+ * v artQuickGenericJniTrampoline
+ *
+ * #-------------------#
+ * | caller method... |
+ * #-------------------#
+ * | Return |
+ * | Callee-Save Data |
+ * #-------------------#
+ * | SIRT |
+ * #-------------------#
+ * | Method* | <--- (1)
+ * #-------------------#
+ * | local ref cookie | // 4B
+ * | SIRT size | // 4B TODO: roll into call stack alignment?
+ * #-------------------#
+ * | JNI Call Stack |
+ * #-------------------# <--- SP on native call
+ * | |
+ * | Stack for Regs | The trampoline assembly will pop these values
+ * | | into registers for native call
+ * #-------------------#
+ * | Native code ptr |
+ * #-------------------#
+ * | Free scratch |
+ * #-------------------#
+ * | Ptr to (1) | <--- RSP
+ * #-------------------#
*/
/*
* Called to do a generic JNI down-call
@@ -753,7 +810,8 @@
// Store native ArtMethod* to bottom of stack.
movq %rdi, 0(%rsp)
movq %rsp, %rbp // save SP at callee-save frame
- CFI_DEF_CFA_REGISTER(rbp)
+ movq %rsp, %rbx
+ CFI_DEF_CFA_REGISTER(rbx)
//
// reserve a lot of space
//
@@ -779,12 +837,19 @@
movq %gs:THREAD_SELF_OFFSET, %rdi
movq %rbp, %rsi
call PLT_SYMBOL(artQuickGenericJniTrampoline) // (Thread*, sp)
- test %rax, %rax // Check for error, negative value.
+
+ // At the bottom of the alloca we now have the name pointer to the method=bottom of callee-save
+ // get the adjusted frame pointer
+ popq %rbp
+
+ // Check for error, negative value.
+ test %rax, %rax
js .Lentry_error
- // release part of the alloca
+
+ // release part of the alloca, get the code pointer
addq %rax, %rsp
- // get the code pointer
popq %rax
+
// pop from the register-passing alloca region
// what's the right layout?
popq %rdi
@@ -817,7 +882,7 @@
call PLT_SYMBOL(artQuickGenericJniEndTrampoline)
// Tear down the alloca.
- movq %rbp, %rsp
+ movq %rbx, %rsp
CFI_DEF_CFA_REGISTER(rsp)
// Pending exceptions possible.
@@ -855,12 +920,35 @@
movq %rax, %xmm0
ret
.Lentry_error:
- movq %rbp, %rsp
+ movq %rbx, %rsp
+ CFI_DEF_CFA_REGISTER(rsp)
.Lexception_in_native:
- CFI_REL_OFFSET(rsp,176)
// TODO: the SIRT contains the this pointer which is used by the debugger for exception
// delivery.
- RESTORE_REF_AND_ARGS_CALLEE_SAVE_FRAME
+ movq %xmm0, 16(%rsp) // doesn't make sense!!!
+ movq 24(%rsp), %xmm1 // neither does this!!!
+ movq 32(%rsp), %xmm2
+ movq 40(%rsp), %xmm3
+ movq 48(%rsp), %xmm4
+ movq 56(%rsp), %xmm5
+ movq 64(%rsp), %xmm6
+ movq 72(%rsp), %xmm7
+ // was 80 bytes
+ addq LITERAL(80), %rsp
+ CFI_ADJUST_CFA_OFFSET(-80)
+ // Save callee and GPR args, mixed together to agree with core spills bitmap.
+ POP rcx // Arg.
+ POP rdx // Arg.
+ POP rbx // Callee save.
+ POP rbp // Callee save.
+ POP rsi // Arg.
+ POP r8 // Arg.
+ POP r9 // Arg.
+ POP r12 // Callee save.
+ POP r13 // Callee save.
+ POP r14 // Callee save.
+ POP r15 // Callee save.
+
DELIVER_PENDING_EXCEPTION
END_FUNCTION art_quick_generic_jni_trampoline
diff --git a/runtime/base/hex_dump_test.cc b/runtime/base/hex_dump_test.cc
index d950961..3d782b2 100644
--- a/runtime/base/hex_dump_test.cc
+++ b/runtime/base/hex_dump_test.cc
@@ -24,11 +24,18 @@
namespace art {
+#if defined(__LP64__)
+#define ZEROPREFIX "00000000"
+#else
+#define ZEROPREFIX
+#endif
+
TEST(HexDump, OneLine) {
const char* test_text = "0123456789abcdef";
std::ostringstream oss;
oss << HexDump(test_text, strlen(test_text), false, "");
EXPECT_STREQ(oss.str().c_str(),
+ ZEROPREFIX
"00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 0123456789abcdef");
}
@@ -37,7 +44,9 @@
std::ostringstream oss;
oss << HexDump(test_text, strlen(test_text), false, "");
EXPECT_STREQ(oss.str().c_str(),
+ ZEROPREFIX
"00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 0123456789abcdef\n"
+ ZEROPREFIX
"00000010: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF");
}
@@ -56,7 +65,7 @@
std::ostringstream oss;
oss << HexDump(test_text, strlen(test_text), false, "test prefix: ");
EXPECT_STREQ(oss.str().c_str(),
- "test prefix: 00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 "
+ "test prefix: " ZEROPREFIX "00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 "
"0123456789abcdef");
}
diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
index 08de95f..9489d9b 100644
--- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
+++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc
@@ -856,9 +856,10 @@
*
* void PushStack(uintptr_t): Push a value to the stack.
*
- * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. Is guaranteed != nullptr.
+ * uintptr_t PushSirt(mirror::Object* ref): Add a reference to the Sirt. This _will_ have nullptr,
+ * as this might be important for null initialization.
* Must return the jobject, that is, the reference to the
- * entry in the Sirt.
+ * entry in the Sirt (nullptr if necessary).
*
*/
template <class T> class BuildGenericJniFrameStateMachine {
@@ -948,12 +949,7 @@
}
void AdvanceSirt(mirror::Object* ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uintptr_t sirtRef;
- if (ptr != nullptr) {
- sirtRef = PushSirt(ptr);
- } else {
- sirtRef = reinterpret_cast<uintptr_t>(nullptr);
- }
+ uintptr_t sirtRef = PushSirt(ptr);
if (HaveSirtGpr()) {
gpr_index_--;
PushGpr(sirtRef);
@@ -1155,49 +1151,49 @@
public:
ComputeGenericJniFrameSize() : num_sirt_references_(0), num_stack_entries_(0) {}
- // (negative) offset from SP to top of Sirt.
- uint32_t GetSirtOffset() {
- return 8;
- }
-
- uint32_t GetFirstSirtEntryOffset() {
- return GetSirtOffset() + sizeof(StackReference<mirror::Object>);
- }
-
- uint32_t GetNumSirtReferences() {
- return num_sirt_references_;
- }
-
uint32_t GetStackSize() {
return num_stack_entries_ * sizeof(uintptr_t);
}
- void ComputeLayout(bool is_static, const char* shorty, uint32_t shorty_len, void* sp,
- StackReference<mirror::Object>** start_sirt, StackIndirectReferenceTable** table,
- uint32_t* sirt_entries, uintptr_t** start_stack, uintptr_t** start_gpr,
- uint32_t** start_fpr, void** code_return, size_t* overall_size)
+ // WARNING: After this, *sp won't be pointing to the method anymore!
+ void ComputeLayout(mirror::ArtMethod*** m, bool is_static, const char* shorty, uint32_t shorty_len,
+ void* sp, StackIndirectReferenceTable** table, uint32_t* sirt_entries,
+ uintptr_t** start_stack, uintptr_t** start_gpr, uint32_t** start_fpr,
+ void** code_return, size_t* overall_size)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
ComputeAll(is_static, shorty, shorty_len);
+ mirror::ArtMethod* method = **m;
+
uint8_t* sp8 = reinterpret_cast<uint8_t*>(sp);
- *start_sirt = reinterpret_cast<StackReference<mirror::Object>*>(sp8-GetFirstSirtEntryOffset());
- // Add padding entries if necessary for alignment.
- if (sizeof(uintptr_t) < sizeof(uint64_t)) {
- uint32_t size = sizeof(uintptr_t) * num_sirt_references_;
- uint32_t rem = size % 8;
- if (rem != 0) {
- DCHECK_EQ(rem, 4U);
- num_sirt_references_++;
- }
- }
+ // First, fix up the layout of the callee-save frame.
+ // We have to squeeze in the Sirt, and relocate the method pointer.
+
+ // "Free" the slot for the method.
+ sp8 += kPointerSize;
+
+ // Add the Sirt.
*sirt_entries = num_sirt_references_;
- size_t sirt_size = StackIndirectReferenceTable::SizeOf(num_sirt_references_);
- sp8 -= GetSirtOffset() + sirt_size;
+ size_t sirt_size = StackIndirectReferenceTable::GetAlignedSirtSize(num_sirt_references_);
+ sp8 -= sirt_size;
*table = reinterpret_cast<StackIndirectReferenceTable*>(sp8);
+ (*table)->SetNumberOfReferences(num_sirt_references_);
+ // Add a slot for the method pointer, and fill it. Fix the pointer-pointer given to us.
+ sp8 -= kPointerSize;
+ uint8_t* method_pointer = sp8;
+ *(reinterpret_cast<mirror::ArtMethod**>(method_pointer)) = method;
+ *m = reinterpret_cast<mirror::ArtMethod**>(method_pointer);
+
+ // Reference cookie and padding
+ sp8 -= 8;
+ // Store Sirt size
+ *reinterpret_cast<uint32_t*>(sp8) = static_cast<uint32_t>(sirt_size & 0xFFFFFFFF);
+
+ // Next comes the native call stack.
sp8 -= GetStackSize();
- // Now align the call stack under the Sirt. This aligns by 16.
+ // Now align the call stack below. This aligns by 16, as AArch64 seems to require.
uintptr_t mask = ~0x0F;
sp8 = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(sp8) & mask);
*start_stack = reinterpret_cast<uintptr_t*>(sp8);
@@ -1212,10 +1208,14 @@
*start_gpr = reinterpret_cast<uintptr_t*>(sp8);
// reserve space for the code pointer
- sp8 -= sizeof(void*);
+ sp8 -= kPointerSize;
*code_return = reinterpret_cast<void*>(sp8);
*overall_size = reinterpret_cast<uint8_t*>(sp) - sp8;
+
+ // The new SP is stored at the end of the alloca, so it can be immediately popped
+ sp8 = reinterpret_cast<uint8_t*>(sp) - 5 * KB;
+ *(reinterpret_cast<uint8_t**>(sp8)) = method_pointer;
}
void ComputeSirtOffset() { } // nothing to do, static right now
@@ -1291,21 +1291,21 @@
// of transitioning into native code.
class BuildGenericJniFrameVisitor FINAL : public QuickArgumentVisitor {
public:
- BuildGenericJniFrameVisitor(mirror::ArtMethod** sp, bool is_static, const char* shorty,
+ BuildGenericJniFrameVisitor(mirror::ArtMethod*** sp, bool is_static, const char* shorty,
uint32_t shorty_len, Thread* self) :
- QuickArgumentVisitor(sp, is_static, shorty, shorty_len), sm_(this) {
+ QuickArgumentVisitor(*sp, is_static, shorty, shorty_len), sm_(this) {
ComputeGenericJniFrameSize fsc;
- fsc.ComputeLayout(is_static, shorty, shorty_len, sp, &cur_sirt_entry_, &sirt_,
- &sirt_expected_refs_, &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_,
- &code_return_, &alloca_used_size_);
+ fsc.ComputeLayout(sp, is_static, shorty, shorty_len, *sp, &sirt_, &sirt_expected_refs_,
+ &cur_stack_arg_, &cur_gpr_reg_, &cur_fpr_reg_, &code_return_,
+ &alloca_used_size_);
sirt_number_of_references_ = 0;
- top_of_sirt_ = cur_sirt_entry_;
+ cur_sirt_entry_ = reinterpret_cast<StackReference<mirror::Object>*>(GetFirstSirtEntry());
// jni environment is always first argument
sm_.AdvancePointer(self->GetJniEnv());
if (is_static) {
- sm_.AdvanceSirt((*sp)->GetDeclaringClass());
+ sm_.AdvanceSirt((**sp)->GetDeclaringClass());
}
}
@@ -1359,7 +1359,7 @@
// Initialize padding entries.
while (sirt_number_of_references_ < sirt_expected_refs_) {
*cur_sirt_entry_ = StackReference<mirror::Object>();
- cur_sirt_entry_--;
+ cur_sirt_entry_++;
sirt_number_of_references_++;
}
sirt_->SetNumberOfReferences(sirt_expected_refs_);
@@ -1368,8 +1368,8 @@
self->PushSirt(sirt_);
}
- jobject GetFirstSirtEntry() {
- return reinterpret_cast<jobject>(top_of_sirt_);
+ jobject GetFirstSirtEntry() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ return reinterpret_cast<jobject>(sirt_->GetStackReference(0));
}
void PushGpr(uintptr_t val) {
@@ -1394,9 +1394,15 @@
}
uintptr_t PushSirt(mirror::Object* ref) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
- uintptr_t tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
- cur_sirt_entry_--;
+ uintptr_t tmp;
+ if (ref == nullptr) {
+ *cur_sirt_entry_ = StackReference<mirror::Object>();
+ tmp = reinterpret_cast<uintptr_t>(nullptr);
+ } else {
+ *cur_sirt_entry_ = StackReference<mirror::Object>::FromMirrorPtr(ref);
+ tmp = reinterpret_cast<uintptr_t>(cur_sirt_entry_);
+ }
+ cur_sirt_entry_++;
sirt_number_of_references_++;
return tmp;
}
@@ -1418,7 +1424,7 @@
uintptr_t* cur_gpr_reg_;
uint32_t* cur_fpr_reg_;
uintptr_t* cur_stack_arg_;
- StackReference<mirror::Object>* top_of_sirt_;
+ // StackReference<mirror::Object>* top_of_sirt_;
void* code_return_;
size_t alloca_used_size_;
@@ -1432,20 +1438,22 @@
* Create a Sirt and call stack and fill a mini stack with values to be pushed to registers.
* The final element on the stack is a pointer to the native code.
*
+ * On entry, the stack has a standard callee-save frame above sp, and an alloca below it.
+ * We need to fix this, as the Sirt needs to go into the callee-save frame.
+ *
* The return of this function denotes:
* 1) How many bytes of the alloca can be released, if the value is non-negative.
* 2) An error, if the value is negative.
*/
extern "C" ssize_t artQuickGenericJniTrampoline(Thread* self, mirror::ArtMethod** sp)
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
- uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
mirror::ArtMethod* called = *sp;
- DCHECK(called->IsNative());
+ DCHECK(called->IsNative()) << PrettyMethod(called, true);
// run the visitor
MethodHelper mh(called);
- BuildGenericJniFrameVisitor visitor(sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
+ BuildGenericJniFrameVisitor visitor(&sp, called->IsStatic(), mh.GetShorty(), mh.GetShortyLength(),
self);
visitor.VisitArguments();
visitor.FinalizeSirt(self);
@@ -1462,11 +1470,14 @@
if (self->IsExceptionPending()) {
self->PopSirt();
// A negative value denotes an error.
+ // TODO: Do we still need to fix the stack pointer? I think so. Then it's necessary to push
+ // that value!
return -1;
}
} else {
cookie = JniMethodStart(self);
}
+ uint32_t* sp32 = reinterpret_cast<uint32_t*>(sp);
*(sp32 - 1) = cookie;
// retrieve native code
@@ -1480,8 +1491,8 @@
size_t window_size = visitor.GetAllocaUsedSize();
*code_pointer = reinterpret_cast<uintptr_t>(nativeCode);
- // 5K reserved, window_size used.
- return (5 * KB) - window_size;
+ // 5K reserved, window_size + frame pointer used.
+ return (5 * KB) - window_size - kPointerSize;
}
/*
@@ -1501,10 +1512,10 @@
if (return_shorty_char == 'L') {
// the only special ending call
if (called->IsSynchronized()) {
- ComputeGenericJniFrameSize fsc;
- fsc.ComputeSirtOffset();
- uint32_t offset = fsc.GetFirstSirtEntryOffset();
- jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp) - offset);
+ StackIndirectReferenceTable* table =
+ reinterpret_cast<StackIndirectReferenceTable*>(
+ reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0));
return reinterpret_cast<uint64_t>(JniMethodEndWithReferenceSynchronized(result.l, cookie, tmp,
self));
@@ -1513,10 +1524,10 @@
}
} else {
if (called->IsSynchronized()) {
- ComputeGenericJniFrameSize fsc;
- fsc.ComputeSirtOffset();
- uint32_t offset = fsc.GetFirstSirtEntryOffset();
- jobject tmp = reinterpret_cast<jobject>(reinterpret_cast<uint8_t*>(sp) - offset);
+ StackIndirectReferenceTable* table =
+ reinterpret_cast<StackIndirectReferenceTable*>(
+ reinterpret_cast<uint8_t*>(sp) + kPointerSize);
+ jobject tmp = reinterpret_cast<jobject>(table->GetStackReference(0));
JniMethodEndSynchronized(cookie, tmp, self);
} else {
diff --git a/runtime/exception_test.cc b/runtime/exception_test.cc
index 5e3f504..4a75152 100644
--- a/runtime/exception_test.cc
+++ b/runtime/exception_test.cc
@@ -76,7 +76,7 @@
method_f_ = my_klass_->FindVirtualMethod("f", "()I");
ASSERT_TRUE(method_f_ != NULL);
- method_f_->SetFrameSizeInBytes(kStackAlignment);
+ method_f_->SetFrameSizeInBytes(4 * kPointerSize);
method_f_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
method_f_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
method_f_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
@@ -84,7 +84,7 @@
method_g_ = my_klass_->FindVirtualMethod("g", "(I)V");
ASSERT_TRUE(method_g_ != NULL);
- method_g_->SetFrameSizeInBytes(kStackAlignment);
+ method_g_->SetFrameSizeInBytes(4 * kPointerSize);
method_g_->SetEntryPointFromQuickCompiledCode(&fake_code_[sizeof(code_size)]);
method_g_->SetMappingTable(&fake_mapping_data_.GetData()[0]);
method_g_->SetVmapTable(&fake_vmap_table_data_.GetData()[0]);
@@ -151,7 +151,7 @@
std::vector<uintptr_t> fake_stack;
ASSERT_EQ(kStackAlignment, 16U);
- ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
+ // ASSERT_EQ(sizeof(uintptr_t), sizeof(uint32_t));
if (!kUsePortableCompiler) {
// Create two fake stack frames with mapping data created in SetUp. We map offset 3 in the code
diff --git a/runtime/gc/allocator/rosalloc.cc b/runtime/gc/allocator/rosalloc.cc
index e13bd71..ace9f9e 100644
--- a/runtime/gc/allocator/rosalloc.cc
+++ b/runtime/gc/allocator/rosalloc.cc
@@ -48,8 +48,8 @@
bulk_free_lock_("rosalloc bulk free lock", kRosAllocBulkFreeLock),
page_release_mode_(page_release_mode),
page_release_size_threshold_(page_release_size_threshold) {
- DCHECK(RoundUp(capacity, kPageSize) == capacity);
- DCHECK(RoundUp(max_capacity, kPageSize) == max_capacity);
+ DCHECK_EQ(RoundUp(capacity, kPageSize), capacity);
+ DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity);
CHECK_LE(capacity, max_capacity);
CHECK(IsAligned<kPageSize>(page_release_size_threshold_));
if (!initialized_) {
@@ -151,7 +151,7 @@
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
// There is a free page run at the end.
DCHECK(last_free_page_run->IsFree());
- DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
+ DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
last_free_page_run_size = last_free_page_run->ByteSize(this);
} else {
// There is no free page run at the end.
@@ -176,7 +176,7 @@
DCHECK_EQ(last_free_page_run_size, last_free_page_run->ByteSize(this));
last_free_page_run->SetByteSize(this, last_free_page_run_size + increment);
DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
- DCHECK(last_free_page_run->End(this) == base_ + new_footprint);
+ DCHECK_EQ(last_free_page_run->End(this), base_ + new_footprint);
} else {
// Otherwise, insert a new free page run at the end.
FreePageRun* new_free_page_run = reinterpret_cast<FreePageRun*>(base_ + footprint_);
@@ -186,7 +186,7 @@
new_free_page_run->SetByteSize(this, increment);
DCHECK_EQ(new_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
free_page_runs_.insert(new_free_page_run);
- DCHECK(*free_page_runs_.rbegin() == new_free_page_run);
+ DCHECK_EQ(*free_page_runs_.rbegin(), new_free_page_run);
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::AlloPages() : Grew the heap by inserting run 0x"
<< std::hex << reinterpret_cast<intptr_t>(new_free_page_run)
@@ -240,7 +240,7 @@
// Update the page map.
size_t page_map_idx = ToPageMapIndex(res);
for (size_t i = 0; i < num_pages; i++) {
- DCHECK(page_map_[page_map_idx + i] == kPageMapEmpty);
+ DCHECK_EQ(page_map_[page_map_idx + i], kPageMapEmpty);
}
switch (page_map_type) {
case kPageMapRun:
@@ -282,7 +282,7 @@
void RosAlloc::FreePages(Thread* self, void* ptr) {
lock_.AssertHeld(self);
size_t pm_idx = ToPageMapIndex(ptr);
- DCHECK(pm_idx < page_map_size_);
+ DCHECK_LT(pm_idx, page_map_size_);
byte pm_type = page_map_[pm_idx];
DCHECK(pm_type == kPageMapRun || pm_type == kPageMapLargeObject);
byte pm_part_type;
@@ -425,7 +425,7 @@
}
void* RosAlloc::AllocLargeObject(Thread* self, size_t size, size_t* bytes_allocated) {
- DCHECK(size > kLargeSizeThreshold);
+ DCHECK_GT(size, kLargeSizeThreshold);
size_t num_pages = RoundUp(size, kPageSize) / kPageSize;
void* r;
{
@@ -461,13 +461,14 @@
}
void RosAlloc::FreeInternal(Thread* self, void* ptr) {
- DCHECK(base_ <= ptr && ptr < base_ + footprint_);
+ DCHECK_LE(base_, ptr);
+ DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
bool free_from_run = false;
Run* run = NULL;
{
MutexLock mu(self, lock_);
- DCHECK(pm_idx < page_map_size_);
+ DCHECK_LT(pm_idx, page_map_size_);
byte page_map_entry = page_map_[pm_idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::FreeInternal() : " << std::hex << ptr << ", pm_idx=" << std::dec << pm_idx
@@ -491,11 +492,11 @@
// Find the beginning of the run.
while (page_map_[pi] != kPageMapRun) {
pi--;
- DCHECK(pi < capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / kPageSize);
}
- DCHECK(page_map_[pi] == kPageMapRun);
+ DCHECK_EQ(page_map_[pi], kPageMapRun);
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
break;
}
default:
@@ -551,13 +552,13 @@
}
void* RosAlloc::AllocFromRun(Thread* self, size_t size, size_t* bytes_allocated) {
- DCHECK(size <= kLargeSizeThreshold);
+ DCHECK_LE(size, kLargeSizeThreshold);
size_t bracket_size;
size_t idx = SizeToIndexAndBracketSize(size, &bracket_size);
DCHECK_EQ(idx, SizeToIndex(size));
DCHECK_EQ(bracket_size, IndexToBracketSize(idx));
DCHECK_EQ(bracket_size, bracketSizes[idx]);
- DCHECK(size <= bracket_size);
+ DCHECK_LE(size, bracket_size);
DCHECK(size > 512 || bracket_size - size < 16);
void* slot_addr;
@@ -693,8 +694,9 @@
}
void RosAlloc::FreeFromRun(Thread* self, void* ptr, Run* run) {
- DCHECK(run->magic_num_ == kMagicNum);
- DCHECK(run < ptr && ptr < run->End());
+ DCHECK_EQ(run->magic_num_, kMagicNum);
+ DCHECK_LT(run, ptr);
+ DCHECK_LT(ptr, run->End());
size_t idx = run->size_bracket_idx_;
MutexLock mu(self, *size_bracket_locks_[idx]);
bool run_was_full = false;
@@ -858,11 +860,11 @@
- (reinterpret_cast<byte*>(this) + headerSizes[idx]);
DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
- DCHECK(slot_idx < numOfSlots[idx]);
+ DCHECK_LT(slot_idx, numOfSlots[idx]);
size_t vec_idx = slot_idx / 32;
if (kIsDebugBuild) {
size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
- DCHECK(vec_idx < num_vec);
+ DCHECK_LT(vec_idx, num_vec);
}
size_t vec_off = slot_idx % 32;
uint32_t* vec = &alloc_bit_map_[vec_idx];
@@ -960,11 +962,11 @@
- (reinterpret_cast<byte*>(this) + headerSizes[idx]);
DCHECK_EQ(offset_from_slot_base % bracketSizes[idx], static_cast<size_t>(0));
size_t slot_idx = offset_from_slot_base / bracketSizes[idx];
- DCHECK(slot_idx < numOfSlots[idx]);
+ DCHECK_LT(slot_idx, numOfSlots[idx]);
size_t vec_idx = slot_idx / 32;
if (kIsDebugBuild) {
size_t num_vec = RoundUp(numOfSlots[idx], 32) / 32;
- DCHECK(vec_idx < num_vec);
+ DCHECK_LT(vec_idx, num_vec);
}
size_t vec_off = slot_idx % 32;
uint32_t* vec = &free_bit_map_base[vec_idx];
@@ -997,11 +999,13 @@
size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
- DCHECK(num_slots >= slots);
+ DCHECK_GE(num_slots, slots);
uint32_t vec = alloc_bit_map_[v];
uint32_t mask = (num_slots - slots >= 32) ? static_cast<uint32_t>(-1)
: (1 << (num_slots - slots)) - 1;
- DCHECK(num_slots - slots >= 32 ? mask == static_cast<uint32_t>(-1) : true);
+ if ((num_slots - slots) >= 32) {
+ DCHECK_EQ(mask, static_cast<uint32_t>(-1));
+ }
if (vec != mask) {
return false;
}
@@ -1052,7 +1056,7 @@
size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
- DCHECK(num_slots >= slots);
+ DCHECK_GE(num_slots, slots);
uint32_t vec = alloc_bit_map_[v];
size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
for (size_t i = 0; i < end; ++i) {
@@ -1094,7 +1098,8 @@
for (size_t i = 0; i < num_ptrs; i++) {
void* ptr = ptrs[i];
ptrs[i] = NULL;
- DCHECK(base_ <= ptr && ptr < base_ + footprint_);
+ DCHECK_LE(base_, ptr);
+ DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
Run* run = NULL;
if (kReadPageMapEntryWithoutLockInBulkFree) {
@@ -1107,18 +1112,18 @@
}
if (LIKELY(page_map_entry == kPageMapRun)) {
run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (LIKELY(page_map_entry == kPageMapRunPart)) {
size_t pi = pm_idx;
DCHECK(page_map_[pi] == kPageMapRun || page_map_[pi] == kPageMapRunPart);
// Find the beginning of the run.
while (page_map_[pi] != kPageMapRun) {
pi--;
- DCHECK(pi < capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / kPageSize);
}
- DCHECK(page_map_[pi] == kPageMapRun);
+ DCHECK_EQ(page_map_[pi], kPageMapRun);
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
MutexLock mu(self, lock_);
FreePages(self, ptr);
@@ -1142,7 +1147,7 @@
bool free_from_run = false;
{
MutexLock mu(self, lock_);
- DCHECK(pm_idx < page_map_size_);
+ DCHECK_LT(pm_idx, page_map_size_);
byte page_map_entry = page_map_[pm_idx];
if (kTraceRosAlloc) {
LOG(INFO) << "RosAlloc::BulkFree() : " << std::hex << ptr << ", pm_idx="
@@ -1152,7 +1157,7 @@
if (LIKELY(page_map_entry == kPageMapRun)) {
free_from_run = true;
run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (LIKELY(page_map_entry == kPageMapRunPart)) {
free_from_run = true;
size_t pi = pm_idx;
@@ -1160,11 +1165,11 @@
// Find the beginning of the run.
while (page_map_[pi] != kPageMapRun) {
pi--;
- DCHECK(pi < capacity_ / kPageSize);
+ DCHECK_LT(pi, capacity_ / kPageSize);
}
- DCHECK(page_map_[pi] == kPageMapRun);
+ DCHECK_EQ(page_map_[pi], kPageMapRun);
run = reinterpret_cast<Run*>(base_ + pi * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
} else if (page_map_entry == kPageMapLargeObject) {
FreePages(self, ptr);
} else {
@@ -1393,7 +1398,8 @@
}
size_t RosAlloc::UsableSize(void* ptr) {
- DCHECK(base_ <= ptr && ptr < base_ + footprint_);
+ DCHECK_LE(base_, ptr);
+ DCHECK_LT(ptr, base_ + footprint_);
size_t pm_idx = RoundDownToPageMapIndex(ptr);
MutexLock mu(Thread::Current(), lock_);
switch (page_map_[pm_idx]) {
@@ -1420,11 +1426,11 @@
// Find the beginning of the run.
while (page_map_[pm_idx] != kPageMapRun) {
pm_idx--;
- DCHECK(pm_idx < capacity_ / kPageSize);
+ DCHECK_LT(pm_idx, capacity_ / kPageSize);
}
- DCHECK(page_map_[pm_idx] == kPageMapRun);
+ DCHECK_EQ(page_map_[pm_idx], kPageMapRun);
Run* run = reinterpret_cast<Run*>(base_ + pm_idx * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
size_t offset_from_slot_base = reinterpret_cast<byte*>(ptr)
- (reinterpret_cast<byte*>(run) + headerSizes[idx]);
@@ -1446,7 +1452,7 @@
if (it != free_page_runs_.rend() && (last_free_page_run = *it)->End(this) == base_ + footprint_) {
// Remove the last free page run, if any.
DCHECK(last_free_page_run->IsFree());
- DCHECK(page_map_[ToPageMapIndex(last_free_page_run)] == kPageMapEmpty);
+ DCHECK_EQ(page_map_[ToPageMapIndex(last_free_page_run)], kPageMapEmpty);
DCHECK_EQ(last_free_page_run->ByteSize(this) % kPageSize, static_cast<size_t>(0));
DCHECK_EQ(last_free_page_run->End(this), base_ + footprint_);
free_page_runs_.erase(last_free_page_run);
@@ -1547,7 +1553,7 @@
case kPageMapRun: {
// The start of a run.
Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum);
+ DCHECK_EQ(run->magic_num_, kMagicNum);
run->InspectAllSlots(handler, arg);
size_t num_pages = numOfPages[run->size_bracket_idx_];
if (kIsDebugBuild) {
@@ -1656,7 +1662,7 @@
} else if (i == kNumOfSizeBrackets - 2) {
bracketSizes[i] = 1 * KB;
} else {
- DCHECK(i == kNumOfSizeBrackets - 1);
+ DCHECK_EQ(i, kNumOfSizeBrackets - 1);
bracketSizes[i] = 2 * KB;
}
if (kTraceRosAlloc) {
@@ -1674,10 +1680,10 @@
} else if (i < 32) {
numOfPages[i] = 8;
} else if (i == 32) {
- DCHECK(i = kNumOfSizeBrackets - 2);
+ DCHECK_EQ(i, kNumOfSizeBrackets - 2);
numOfPages[i] = 16;
} else {
- DCHECK(i = kNumOfSizeBrackets - 1);
+ DCHECK_EQ(i, kNumOfSizeBrackets - 1);
numOfPages[i] = 32;
}
if (kTraceRosAlloc) {
@@ -1726,7 +1732,7 @@
DCHECK(num_of_slots > 0 && header_size > 0 && bulk_free_bit_map_offset > 0);
// Add the padding for the alignment remainder.
header_size += run_size % bracket_size;
- DCHECK(header_size + num_of_slots * bracket_size == run_size);
+ DCHECK_EQ(header_size + num_of_slots * bracket_size, run_size);
numOfSlots[i] = num_of_slots;
headerSizes[i] = header_size;
bulkFreeBitMapOffsets[i] = bulk_free_bit_map_offset;
@@ -1773,7 +1779,7 @@
case kPageMapEmpty: {
// The start of a free page run.
FreePageRun* fpr = reinterpret_cast<FreePageRun*>(base_ + i * kPageSize);
- DCHECK(fpr->magic_num_ == kMagicNumFree) << "Bad magic number : " << fpr->magic_num_;
+ DCHECK_EQ(fpr->magic_num_, kMagicNumFree);
CHECK(free_page_runs_.find(fpr) != free_page_runs_.end())
<< "An empty page must belong to the free page run set";
size_t fpr_size = fpr->ByteSize(this);
@@ -1805,7 +1811,7 @@
void* start = base_ + i * kPageSize;
mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
size_t obj_size = obj->SizeOf();
- CHECK(obj_size > kLargeSizeThreshold)
+ CHECK_GT(obj_size, kLargeSizeThreshold)
<< "A rosalloc large object size must be > " << kLargeSizeThreshold;
CHECK_EQ(num_pages, RoundUp(obj_size, kPageSize) / kPageSize)
<< "A rosalloc large object size " << obj_size
@@ -1822,9 +1828,9 @@
case kPageMapRun: {
// The start of a run.
Run* run = reinterpret_cast<Run*>(base_ + i * kPageSize);
- DCHECK(run->magic_num_ == kMagicNum) << "Bad magic number" << run->magic_num_;
+ DCHECK_EQ(run->magic_num_, kMagicNum);
size_t idx = run->size_bracket_idx_;
- CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
+ CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << idx;
size_t num_pages = numOfPages[idx];
CHECK_GT(num_pages, static_cast<uintptr_t>(0))
<< "Run size must be > 0 : " << num_pages;
@@ -1858,9 +1864,9 @@
}
void RosAlloc::Run::Verify(Thread* self, RosAlloc* rosalloc) {
- DCHECK(magic_num_ == kMagicNum) << "Bad magic number : " << Dump();
+ DCHECK_EQ(magic_num_, kMagicNum) << "Bad magic number : " << Dump();
size_t idx = size_bracket_idx_;
- CHECK(idx < kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
+ CHECK_LT(idx, kNumOfSizeBrackets) << "Out of range size bracket index : " << Dump();
byte* slot_base = reinterpret_cast<byte*>(this) + headerSizes[idx];
size_t num_slots = numOfSlots[idx];
size_t bracket_size = IndexToBracketSize(idx);
@@ -1951,7 +1957,7 @@
size_t num_vec = RoundUp(num_slots, 32) / 32;
size_t slots = 0;
for (size_t v = 0; v < num_vec; v++, slots += 32) {
- DCHECK(num_slots >= slots) << "Out of bounds";
+ DCHECK_GE(num_slots, slots) << "Out of bounds";
uint32_t vec = alloc_bit_map_[v];
uint32_t thread_local_free_vec = ThreadLocalFreeBitMap()[v];
size_t end = std::min(num_slots - slots, static_cast<size_t>(32));
diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc
index 6b897cb..fe27992 100644
--- a/runtime/mirror/art_method.cc
+++ b/runtime/mirror/art_method.cc
@@ -320,15 +320,6 @@
self->PopManagedStackFragment(fragment);
}
-#ifndef NDEBUG
-size_t ArtMethod::GetSirtOffsetInBytes() {
- CHECK(IsNative());
- // TODO: support Sirt access from generic JNI trampoline.
- CHECK_NE(GetEntryPointFromQuickCompiledCode(), GetQuickGenericJniTrampoline());
- return kPointerSize;
-}
-#endif
-
bool ArtMethod::IsRegistered() {
void* native_method =
GetFieldPtr<void*>(OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), false);
diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h
index 8c22e67..a9da66c 100644
--- a/runtime/mirror/art_method.h
+++ b/runtime/mirror/art_method.h
@@ -342,13 +342,9 @@
return GetFrameSizeInBytes() - kPointerSize;
}
-#ifndef NDEBUG
- size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-#else
size_t GetSirtOffsetInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
return kPointerSize;
}
-#endif
bool IsRegistered();
diff --git a/runtime/oat.cc b/runtime/oat.cc
index d4eea85..454786d 100644
--- a/runtime/oat.cc
+++ b/runtime/oat.cc
@@ -132,7 +132,8 @@
uint32_t OatHeader::GetInterpreterToInterpreterBridgeOffset() const {
DCHECK(IsValid());
- CHECK_GE(interpreter_to_interpreter_bridge_offset_, executable_offset_);
+ CHECK(interpreter_to_interpreter_bridge_offset_ == 0 ||
+ interpreter_to_interpreter_bridge_offset_ >= executable_offset_);
return interpreter_to_interpreter_bridge_offset_;
}
diff --git a/runtime/object_utils.h b/runtime/object_utils.h
index dd2bd4f..5c79a71 100644
--- a/runtime/object_utils.h
+++ b/runtime/object_utils.h
@@ -394,6 +394,20 @@
return shorty_len_;
}
+ // Counts the number of references in the parameter list of the corresponding method.
+ // Note: Thus does _not_ include "this" for non-static methods.
+ uint32_t GetNumberOfReferenceArgsWithoutReceiver() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ const char* shorty = GetShorty();
+ uint32_t refs = 0;
+ for (uint32_t i = 1; i < shorty_len_ ; ++i) {
+ if (shorty[i] == 'L') {
+ refs++;
+ }
+ }
+
+ return refs;
+ }
+
const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
const DexFile& dex_file = GetDexFile();
uint32_t dex_method_idx = method_->GetDexMethodIndex();
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 15b288e..26b4de3 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -108,17 +108,11 @@
return NULL;
} else if (m->IsNative()) {
if (cur_quick_frame_ != NULL) {
- if (m->GetEntryPointFromQuickCompiledCode() == GetQuickGenericJniTrampoline()) {
- UNIMPLEMENTED(ERROR) << "Failed to determine this object of native method: "
- << PrettyMethod(m);
- return nullptr;
- } else {
- StackIndirectReferenceTable* sirt =
- reinterpret_cast<StackIndirectReferenceTable*>(
- reinterpret_cast<char*>(cur_quick_frame_) +
- m->GetSirtOffsetInBytes());
- return sirt->GetReference(0);
- }
+ StackIndirectReferenceTable* sirt =
+ reinterpret_cast<StackIndirectReferenceTable*>(
+ reinterpret_cast<char*>(cur_quick_frame_) +
+ m->GetSirtOffsetInBytes());
+ return sirt->GetReference(0);
} else {
return cur_shadow_frame_->GetVRegReference(0);
}
diff --git a/runtime/stack_indirect_reference_table.h b/runtime/stack_indirect_reference_table.h
index c2d6a59..e6dda85 100644
--- a/runtime/stack_indirect_reference_table.h
+++ b/runtime/stack_indirect_reference_table.h
@@ -39,7 +39,7 @@
~StackIndirectReferenceTable() {}
- // Number of references contained within this SIRT
+ // Number of references contained within this SIRT.
uint32_t NumberOfReferences() const {
return number_of_references_;
}
@@ -51,7 +51,13 @@
return header_size + data_size;
}
- // Link to previous SIRT or NULL
+ // Get the size of the SIRT for the number of entries, with padding added for potential alignment.
+ static size_t GetAlignedSirtSize(uint32_t num_references) {
+ size_t sirt_size = SizeOf(num_references);
+ return RoundUp(sirt_size, 8);
+ }
+
+ // Link to previous SIRT or NULL.
StackIndirectReferenceTable* GetLink() const {
return link_;
}
@@ -72,6 +78,12 @@
return references_[i].AsMirrorPtr();
}
+ StackReference<mirror::Object>* GetStackReference(size_t i)
+ SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+ DCHECK_LT(i, number_of_references_);
+ return &references_[i];
+ }
+
void SetReference(size_t i, mirror::Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
DCHECK_LT(i, number_of_references_);
references_[i].Assign(object);
diff --git a/runtime/thread.h b/runtime/thread.h
index 2ebc107..94a8bd8 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -95,7 +95,13 @@
class PACKED(4) Thread {
public:
// Space to throw a StackOverflowError in.
- static const size_t kStackOverflowReservedBytes = 16 * KB;
+#if !defined(NDEBUG) && defined(__clang__)
+ // TODO: debug clang builds have large switch based interpreter frames that require more stack
+ // space to handle stack overflow exceptions.
+ static constexpr size_t kStackOverflowReservedBytes = 18 * KB;
+#else
+ static constexpr size_t kStackOverflowReservedBytes = 16 * KB;
+#endif
// Creates a new native thread corresponding to the given managed peer.
// Used to implement Thread.start.
diff --git a/test/202-thread-oome/src/Main.java b/test/202-thread-oome/src/Main.java
index bacb842..f7df93b 100644
--- a/test/202-thread-oome/src/Main.java
+++ b/test/202-thread-oome/src/Main.java
@@ -16,7 +16,7 @@
public class Main {
public static void main(String[] args) throws Exception {
- Thread t = new Thread(null, new Runnable() { public void run() {} }, "", 3*1024*1024*1024);
+ Thread t = new Thread(null, new Runnable() { public void run() {} }, "", 3L*1024*1024*1024);
try {
t.start();
} catch (OutOfMemoryError expected) {
diff --git a/test/Android.mk b/test/Android.mk
index f4a0426..da5b35f 100644
--- a/test/Android.mk
+++ b/test/Android.mk
@@ -44,8 +44,7 @@
TEST_OAT_DIRECTORIES := \
Main \
HelloWorld \
- \
- InterfaceTest \
+ InterfaceTest \
JniTest \
NativeAllocations \
ParallelGC \
@@ -110,12 +109,12 @@
test-art-target-oat-$(1): $(ART_TEST_OUT)/oat-test-dex-$(1).jar test-art-target-sync
adb shell touch $(ART_TEST_DIR)/test-art-target-oat-$(1)
adb shell rm $(ART_TEST_DIR)/test-art-target-oat-$(1)
- adb shell sh -c "/system/bin/dalvikvm -XXlib:libartd.so -Ximage:$(ART_TEST_DIR)/core.art -classpath $(ART_TEST_DIR)/oat-test-dex-$(1).jar -Djava.library.path=$(ART_TEST_DIR) $(1) $(2) && touch $(ART_TEST_DIR)/test-art-target-oat-$(1)"
+ adb shell sh -c "/system/bin/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd.so -Ximage:$(ART_TEST_DIR)/core.art -classpath $(ART_TEST_DIR)/oat-test-dex-$(1).jar -Djava.library.path=$(ART_TEST_DIR) $(1) $(2) && touch $(ART_TEST_DIR)/test-art-target-oat-$(1)"
$(hide) (adb pull $(ART_TEST_DIR)/test-art-target-oat-$(1) /tmp/ && echo test-art-target-oat-$(1) PASSED) || (echo test-art-target-oat-$(1) FAILED && exit 1)
$(hide) rm /tmp/test-art-target-oat-$(1)
$(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).odex: $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar $(HOST_CORE_IMG_OUT) | $(DEX2OAT)
- $(DEX2OAT) --runtime-arg -Xms16m --runtime-arg -Xmx16m --boot-image=$(HOST_CORE_IMG_OUT) --dex-file=$(PWD)/$$< --oat-file=$(PWD)/$$@ --instruction-set=$(HOST_ARCH) --host --android-root=$(HOST_OUT)
+ $(DEX2OAT) $(DEX2OAT_FLAGS) --runtime-arg -Xms16m --runtime-arg -Xmx16m --boot-image=$(HOST_CORE_IMG_OUT) --dex-file=$(PWD)/$$< --oat-file=$(PWD)/$$@ --instruction-set=$(ART_HOST_ARCH) --host --android-root=$(HOST_OUT)
.PHONY: test-art-host-oat-default-$(1)
test-art-host-oat-default-$(1): $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).odex test-art-host-dependencies
@@ -123,7 +122,7 @@
ANDROID_DATA=/tmp/android-data/test-art-host-oat-default-$(1) \
ANDROID_ROOT=$(HOST_OUT) \
LD_LIBRARY_PATH=$(HOST_OUT_SHARED_LIBRARIES) \
- $(HOST_OUT_EXECUTABLES)/dalvikvm -XXlib:libartd.so -Ximage:$(shell pwd)/$(HOST_CORE_IMG_OUT) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
+ $(HOST_OUT_EXECUTABLES)/dalvikvm $(DALVIKVM_FLAGS) -XXlib:libartd.so -Ximage:$(shell pwd)/$(HOST_CORE_IMG_OUT) -classpath $(HOST_OUT_JAVA_LIBRARIES)/oat-test-dex-$(1).jar -Djava.library.path=$(HOST_OUT_SHARED_LIBRARIES) $(1) $(2) \
&& echo test-art-host-oat-default-$(1) PASSED || (echo test-art-host-oat-default-$(1) FAILED && exit 1)
$(hide) rm -r /tmp/android-data/test-art-host-oat-default-$(1)