ART: Even more Quick cleanup
Remove Backend.
Change-Id: I247cc65ccda6a362ba1a8f5e73e7f12ecd980a87
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc
index 52b2e15..04113db 100644
--- a/compiler/dex/quick/codegen_util.cc
+++ b/compiler/dex/quick/codegen_util.cc
@@ -967,12 +967,12 @@
// TODO: move to mir_to_lir.cc
Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena)
- : Backend(arena),
- literal_list_(nullptr),
+ : literal_list_(nullptr),
method_literal_list_(nullptr),
class_literal_list_(nullptr),
code_literal_list_(nullptr),
first_fixup_(nullptr),
+ arena_(arena),
cu_(cu),
mir_graph_(mir_graph),
switch_tables_(arena->Adapter(kArenaAllocSwitchTable)),
diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h
index 64ecf94..888c34e 100644
--- a/compiler/dex/quick/mir_to_lir.h
+++ b/compiler/dex/quick/mir_to_lir.h
@@ -23,7 +23,6 @@
#include "dex/dex_types.h"
#include "dex/reg_location.h"
#include "dex/reg_storage.h"
-#include "dex/backend.h"
#include "dex/quick/resource_mask.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "invoke_type.h"
@@ -201,7 +200,7 @@
// Mask to denote sreg as the start of a 64-bit item. Must not interfere with low 16 bits.
#define STARTING_WIDE_SREG 0x10000
-class Mir2Lir : public Backend {
+class Mir2Lir {
public:
static constexpr bool kFailOnSizeError = true && kIsDebugBuild;
static constexpr bool kReportSizeError = true && kIsDebugBuild;
@@ -1465,6 +1464,30 @@
virtual LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) = 0;
+ // Queries for backend support for vectors
+ /*
+ * Return the number of bits in a vector register.
+ * @return 0 if vector registers are not supported, or the
+ * number of bits in the vector register if supported.
+ */
+ virtual int VectorRegisterSize() {
+ return 0;
+ }
+
+ /*
+ * Return the number of reservable vector registers supported
+ * @param long_or_fp, true if floating point computations will be
+ * executed or the operations will be long type while vector
+ * registers are reserved.
+ * @return the number of vector registers that are available
+ * @note The backend should ensure that sufficient vector registers
+ * are held back to generate scalar code without exhausting vector
+ * registers, if scalar code also uses the vector registers.
+ */
+ virtual int NumReservableVectorRegisters(bool long_or_fp ATTRIBUTE_UNUSED) {
+ return 0;
+ }
+
protected:
Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena);
@@ -1687,6 +1710,7 @@
LIR* first_fixup_; // Doubly-linked list of LIR nodes requiring fixups.
protected:
+ ArenaAllocator* const arena_;
CompilationUnit* const cu_;
MIRGraph* const mir_graph_;
ArenaVector<SwitchTable*> switch_tables_;
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
index 11808ad..3a34fcd 100644
--- a/compiler/dex/quick/quick_compiler.cc
+++ b/compiler/dex/quick/quick_compiler.cc
@@ -25,7 +25,6 @@
#include "compiler.h"
#include "dex_file-inl.h"
#include "dex_file_to_method_inliner_map.h"
-#include "dex/backend.h"
#include "dex/compiler_ir.h"
#include "dex/dex_flags.h"
#include "dex/mir_graph.h"
@@ -35,6 +34,7 @@
#include "driver/compiler_options.h"
#include "elf_writer_quick.h"
#include "jni/quick/jni_compiler.h"
+#include "mir_to_lir.h"
#include "mirror/art_method-inl.h"
#include "mirror/object.h"
#include "runtime.h"
@@ -81,7 +81,7 @@
OVERRIDE
SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
- Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
+ Mir2Lir* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const;
void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
@@ -819,7 +819,7 @@
*GetCompilerDriver());
}
-Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+Mir2Lir* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
UNUSED(compilation_unit);
Mir2Lir* mir_to_lir = nullptr;
switch (cu->instruction_set) {