Quick compiler: Single .so for all targets
With this CL, all targets can be built into a single .so (but
we're not yet doing so - the compiler driver needs to be reworked).
A new Codgen class is introduced (see compiler/codegen/codegen.h),
along with target-specific sub-classes ArmCodegen, MipsCodegens and
X86Codegen (see compiler/codegen/*/codegen_[Arm|Mips|X86].h).
Additional minor code, comment and format refactoring. Some source
files combined, temporary header files deleted and a few file
renames to better identify their function.
Next up is combining the Quick and Portable .so files.
Note: building all targets into libdvm-compiler.so increases its
size by 140K bytes. I'm inclined to not bother introducing conditional
compilation to limit code to the specific target - the added build and
testing complexity doesn't doesn't seem worth such a modest size savings.
Change-Id: Id9c5b4502ad6b77cdb31f71d3126f51a4f2e9dfe
diff --git a/src/compiler/codegen/x86/assemble_x86.cc b/src/compiler/codegen/x86/assemble_x86.cc
index 2363c20..1e04e18 100644
--- a/src/compiler/codegen/x86/assemble_x86.cc
+++ b/src/compiler/codegen/x86/assemble_x86.cc
@@ -15,13 +15,14 @@
*/
#include "x86_lir.h"
+#include "codegen_x86.h"
#include "../codegen_util.h"
namespace art {
#define MAX_ASSEMBLER_RETRIES 50
-X86EncodingMap EncodingMap[kX86Last] = {
+const X86EncodingMap X86Codegen::EncodingMap[kX86Last] = {
{ kX8632BitData, kData, IS_UNARY_OP, { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data", "0x!0d" },
{ kX86Bkpt, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" },
{ kX86Nop, kNop, IS_UNARY_OP, { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop", "" },
@@ -329,7 +330,7 @@
{ kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "PcRelAdr", "!0r,!1d" },
};
-static size_t ComputeSize(X86EncodingMap* entry, int displacement, bool has_sib) {
+static size_t ComputeSize(const X86EncodingMap* entry, int displacement, bool has_sib) {
size_t size = 0;
if (entry->skeleton.prefix1 > 0) {
++size;
@@ -358,8 +359,8 @@
return size;
}
-int GetInsnSize(LIR* lir) {
- X86EncodingMap* entry = &EncodingMap[lir->opcode];
+int X86Codegen::GetInsnSize(LIR* lir) {
+ const X86EncodingMap* entry = &X86Codegen::EncodingMap[lir->opcode];
switch (entry->kind) {
case kData:
return 4; // 4 bytes of data
@@ -498,7 +499,7 @@
case kMacro:
DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod));
return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ +
- ComputeSize(&EncodingMap[kX86Sub32RI], 0, false) -
+ ComputeSize(&X86Codegen::EncodingMap[kX86Sub32RI], 0, false) -
(lir->operands[0] == rAX ? 1 : 0); // shorter ax encoding
default:
break;
@@ -1173,12 +1174,14 @@
DCHECK_LT(reg, 8);
cu->code_buffer.push_back(0x58 + reg); // pop reg
- EmitRegImm(cu, &EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
+ EmitRegImm(cu, &X86Codegen::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */);
}
static void EmitUnimplemented(CompilationUnit* cu, const X86EncodingMap* entry, LIR* lir) {
- UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " " << BuildInsnString(entry->fmt, lir, 0);
- for (int i = 0; i < GetInsnSize(lir); ++i) {
+ Codegen* cg = cu->cg.get();
+ UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " "
+ << cg->BuildInsnString(entry->fmt, lir, 0);
+ for (int i = 0; i < cg->GetInsnSize(lir); ++i) {
cu->code_buffer.push_back(0xCC); // push breakpoint instruction - int 3
}
}
@@ -1189,7 +1192,7 @@
* instruction. In those cases we will try to substitute a new code
* sequence or request that the trace be shortened and retried.
*/
-AssemblerStatus AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr) {
+AssemblerStatus X86Codegen::AssembleInstructions(CompilationUnit *cu, uintptr_t start_addr) {
LIR *lir;
AssemblerStatus res = kSuccess; // Assume success
@@ -1305,7 +1308,7 @@
continue;
}
CHECK_EQ(static_cast<size_t>(lir->offset), cu->code_buffer.size());
- const X86EncodingMap *entry = &EncodingMap[lir->opcode];
+ const X86EncodingMap *entry = &X86Codegen::EncodingMap[lir->opcode];
size_t starting_cbuf_size = cu->code_buffer.size();
switch (entry->kind) {
case kData: // 4 bytes of data
@@ -1409,7 +1412,7 @@
}
CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)),
cu->code_buffer.size() - starting_cbuf_size)
- << "Instruction size mismatch for entry: " << EncodingMap[lir->opcode].name;
+ << "Instruction size mismatch for entry: " << X86Codegen::EncodingMap[lir->opcode].name;
}
return res;
}
@@ -1418,7 +1421,7 @@
* Target-dependent offset assignment.
* independent.
*/
-int AssignInsnOffsets(CompilationUnit* cu)
+int X86Codegen::AssignInsnOffsets(CompilationUnit* cu)
{
LIR* x86_lir;
int offset = 0;
diff --git a/src/compiler/codegen/x86/call_x86.cc b/src/compiler/codegen/x86/call_x86.cc
index e24831d..80de901 100644
--- a/src/compiler/codegen/x86/call_x86.cc
+++ b/src/compiler/codegen/x86/call_x86.cc
@@ -17,13 +17,14 @@
/* This file contains codegen for the X86 ISA */
#include "x86_lir.h"
+#include "codegen_x86.h"
#include "../codegen_util.h"
#include "../ralloc_util.h"
namespace art {
-void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
- SpecialCaseHandler special_case)
+void X86Codegen::GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case)
{
// TODO
}
@@ -32,10 +33,7 @@
* The sparse table in the literal pool is an array of <key,displacement>
* pairs.
*/
-BasicBlock *FindBlock(CompilationUnit* cu, unsigned int code_offset,
- bool split, bool create, BasicBlock** immed_pred_block_p);
-void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
- RegLocation rl_src)
+void X86Codegen::GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
{
const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
if (cu->verbose) {
@@ -47,9 +45,7 @@
rl_src = LoadValue(cu, rl_src, kCoreReg);
for (int i = 0; i < entries; i++) {
int key = keys[i];
- BasicBlock* case_block = FindBlock(cu,
- cu->current_dalvik_offset + targets[i],
- false, false, NULL);
+ BasicBlock* case_block = FindBlock(cu, cu->current_dalvik_offset + targets[i]);
LIR* label_list = cu->block_label_list;
OpCmpImmBranch(cu, kCondEq, rl_src.low_reg, key,
&label_list[case_block->id]);
@@ -72,8 +68,7 @@
* jmp r_start_of_method
* done:
*/
-void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
- RegLocation rl_src)
+void X86Codegen::GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
{
const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
if (cu->verbose) {
@@ -122,8 +117,6 @@
branch_over->target = target;
}
-void CallRuntimeHelperRegReg(CompilationUnit* cu, int helper_offset,
- int arg0, int arg1, bool safepoint_pc);
/*
* Array data table format:
* ushort ident = 0x0300 magic value
@@ -134,8 +127,7 @@
*
* Total size is 4+(width * size + 1)/2 16-bit code units.
*/
-void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
- RegLocation rl_src)
+void X86Codegen::GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
{
const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
// Add the table to the list - we'll process it later
@@ -160,7 +152,7 @@
rX86_ARG1, true);
}
-void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void X86Codegen::GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
FlushAllRegs(cu);
LoadValueDirectFixed(cu, rl_src, rCX); // Get obj
@@ -178,7 +170,7 @@
branch->target = NewLIR0(cu, kPseudoTargetLabel);
}
-void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
+void X86Codegen::GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
{
FlushAllRegs(cu);
LoadValueDirectFixed(cu, rl_src, rAX); // Get obj
@@ -202,7 +194,7 @@
/*
* Mark garbage collection card. Skip if the value we're storing is null.
*/
-void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
+void X86Codegen::MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
{
int reg_card_base = AllocTemp(cu);
int reg_card_no = AllocTemp(cu);
@@ -217,8 +209,7 @@
FreeTemp(cu, reg_card_no);
}
-void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
- RegLocation rl_method)
+void X86Codegen::GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs, RegLocation rl_method)
{
/*
* On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
@@ -261,7 +252,7 @@
FreeTemp(cu, rX86_ARG2);
}
-void GenExitSequence(CompilationUnit* cu) {
+void X86Codegen::GenExitSequence(CompilationUnit* cu) {
/*
* In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
* allocated by the register utilities as temps.
diff --git a/src/compiler/codegen/x86/codegen_x86.h b/src/compiler/codegen/x86/codegen_x86.h
new file mode 100644
index 0000000..2a01d9a
--- /dev/null
+++ b/src/compiler/codegen/x86/codegen_x86.h
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_SRC_COMPILER_CODEGEN_X86_CODEGENX86_H_
+#define ART_SRC_COMPILER_CODEGEN_X86_CODEGENX86_H_
+
+#include "../../compiler_internals.h"
+
+namespace art {
+
+class X86Codegen : public Codegen {
+ public:
+ // Required for target - codegen helpers.
+ virtual bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit);
+ virtual int LoadHelper(CompilationUnit* cu, int offset);
+ virtual LIR* LoadBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_dest,
+ OpSize size, int s_reg);
+ virtual LIR* LoadBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_dest_lo,
+ int r_dest_hi, int s_reg);
+ virtual LIR* LoadBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_dest, int scale,
+ OpSize size);
+ virtual LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg);
+ virtual LIR* LoadConstantNoClobber(CompilationUnit* cu, int r_dest, int value);
+ virtual LIR* LoadConstantValueWide(CompilationUnit* cu, int r_dest_lo, int r_dest_hi,
+ int val_lo, int val_hi);
+ virtual void LoadPair(CompilationUnit* cu, int base, int low_reg, int high_reg);
+ virtual LIR* StoreBaseDisp(CompilationUnit* cu, int rBase, int displacement, int r_src,
+ OpSize size);
+ virtual LIR* StoreBaseDispWide(CompilationUnit* cu, int rBase, int displacement, int r_src_lo,
+ int r_src_hi);
+ virtual LIR* StoreBaseIndexed(CompilationUnit* cu, int rBase, int r_index, int r_src, int scale,
+ OpSize size);
+ virtual LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg);
+ virtual void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg);
+
+ // Required for target - register utilities.
+ virtual bool IsFpReg(int reg);
+ virtual bool SameRegType(int reg1, int reg2);
+ virtual int AllocTypedTemp(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int AllocTypedTempPair(CompilationUnit* cu, bool fp_hint, int reg_class);
+ virtual int S2d(int low_reg, int high_reg);
+ virtual int TargetReg(SpecialTargetRegister reg);
+ virtual RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg);
+ virtual RegLocation GetReturnAlt(CompilationUnit* cu);
+ virtual RegLocation GetReturnWideAlt(CompilationUnit* cu);
+ virtual RegLocation LocCReturn();
+ virtual RegLocation LocCReturnDouble();
+ virtual RegLocation LocCReturnFloat();
+ virtual RegLocation LocCReturnWide();
+ virtual uint32_t FpRegMask();
+ virtual uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg);
+ virtual void AdjustSpillMask(CompilationUnit* cu);
+ virtual void ClobberCalleeSave(CompilationUnit *cu);
+ virtual void FlushReg(CompilationUnit* cu, int reg);
+ virtual void FlushRegWide(CompilationUnit* cu, int reg1, int reg2);
+ virtual void FreeCallTemps(CompilationUnit* cu);
+ virtual void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep, RegLocation rl_free);
+ virtual void LockCallTemps(CompilationUnit* cu);
+ virtual void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg);
+ virtual void CompilerInitializeRegAlloc(CompilationUnit* cu);
+
+ // Required for target - miscellaneous.
+ virtual AssemblerStatus AssembleInstructions(CompilationUnit* cu, uintptr_t start_addr);
+ virtual void DumpResourceMask(LIR* lir, uint64_t mask, const char* prefix);
+ virtual void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir);
+ virtual const char* GetTargetInstFmt(int opcode);
+ virtual const char* GetTargetInstName(int opcode);
+ virtual int AssignInsnOffsets(CompilationUnit* cu);
+ virtual std::string BuildInsnString(const char* fmt, LIR* lir, unsigned char* base_addr);
+ virtual uint64_t GetPCUseDefEncoding();
+ virtual uint64_t GetTargetInstFlags(int opcode);
+ virtual int GetInsnSize(LIR* lir);
+ virtual bool IsUnconditionalBranch(LIR* lir);
+
+ // Required for target - Dalvik-level generators.
+ virtual bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual bool GenArithOpDouble(CompilationUnit* cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual bool GenCmpFP(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2);
+ virtual bool GenConversion(CompilationUnit* cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src);
+ virtual bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier);
+ virtual bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min);
+ virtual bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info);
+ virtual bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code, int reg1, int base,
+ int offset, ThrowKind kind);
+ virtual RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi,
+ bool is_div);
+ virtual RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit,
+ bool is_div);
+ virtual void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2);
+ virtual void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi);
+ virtual void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
+ RegLocation rl_method);
+ virtual void GenExitSequence(CompilationUnit* cu);
+ virtual void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double);
+ virtual void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir);
+ virtual void GenMemBarrier(CompilationUnit* cu, MemBarrierKind barrier_kind);
+ virtual void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src);
+ virtual void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit, int first_bit,
+ int second_bit);
+ virtual void GenNegDouble(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenNegFloat(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src);
+ virtual void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
+ RegLocation rl_src);
+ virtual void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler special_case);
+
+ // Single operation generators.
+ virtual LIR* OpUnconditionalBranch(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target);
+ virtual LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg, int check_value,
+ LIR* target);
+ virtual LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target);
+ virtual LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg,
+ LIR* target);
+ virtual LIR* OpFpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide);
+ virtual LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp);
+ virtual LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target);
+ virtual LIR* OpReg(CompilationUnit* cu, OpKind op, int r_dest_src);
+ virtual LIR* OpRegCopy(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegCopyNoInsert(CompilationUnit* cu, int r_dest, int r_src);
+ virtual LIR* OpRegImm(CompilationUnit* cu, OpKind op, int r_dest_src1, int value);
+ virtual LIR* OpRegMem(CompilationUnit* cu, OpKind op, int r_dest, int rBase, int offset);
+ virtual LIR* OpRegReg(CompilationUnit* cu, OpKind op, int r_dest_src1, int r_src2);
+ virtual LIR* OpRegRegImm(CompilationUnit* cu, OpKind op, int r_dest, int r_src1, int value);
+ virtual LIR* OpRegRegReg(CompilationUnit* cu, OpKind op, int r_dest, int r_src1,
+ int r_src2);
+ virtual LIR* OpTestSuspend(CompilationUnit* cu, LIR* target);
+ virtual LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset);
+ virtual LIR* OpVldm(CompilationUnit* cu, int rBase, int count);
+ virtual LIR* OpVstm(CompilationUnit* cu, int rBase, int count);
+ virtual void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale,
+ int offset);
+ virtual void OpRegCopyWide(CompilationUnit* cu, int dest_lo, int dest_hi, int src_lo,
+ int src_hi);
+ virtual void OpTlsCmp(CompilationUnit* cu, int offset, int val);
+
+ void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset);
+ void SpillCoreRegs(CompilationUnit* cu);
+ void UnSpillCoreRegs(CompilationUnit* cu);
+ static const X86EncodingMap EncodingMap[kX86Last];
+};
+
+} // namespace art
+
+#endif // ART_SRC_COMPILER_CODEGEN_X86_CODEGENX86_H_
diff --git a/src/compiler/codegen/x86/fp_x86.cc b/src/compiler/codegen/x86/fp_x86.cc
index def4896..14f8b92 100644
--- a/src/compiler/codegen/x86/fp_x86.cc
+++ b/src/compiler/codegen/x86/fp_x86.cc
@@ -15,13 +15,14 @@
*/
#include "x86_lir.h"
+#include "codegen_x86.h"
#include "../codegen_util.h"
#include "../ralloc_util.h"
namespace art {
-bool GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+bool X86Codegen::GenArithOpFloat(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
X86OpCode op = kX86Nop;
RegLocation rl_result;
@@ -70,8 +71,8 @@
return false;
}
-bool GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
+bool X86Codegen::GenArithOpDouble(CompilationUnit *cu, Instruction::Code opcode,
+ RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) {
X86OpCode op = kX86Nop;
RegLocation rl_result;
@@ -119,8 +120,8 @@
return false;
}
-bool GenConversion(CompilationUnit *cu, Instruction::Code opcode,
- RegLocation rl_dest, RegLocation rl_src) {
+bool X86Codegen::GenConversion(CompilationUnit *cu, Instruction::Code opcode, RegLocation rl_dest,
+ RegLocation rl_src) {
RegisterClass rcSrc = kFPReg;
X86OpCode op = kX86Nop;
int src_reg;
@@ -212,8 +213,8 @@
return false;
}
-bool GenCmpFP(CompilationUnit *cu, Instruction::Code code, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2) {
+bool X86Codegen::GenCmpFP(CompilationUnit *cu, Instruction::Code code, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2) {
bool single = (code == Instruction::CMPL_FLOAT) || (code == Instruction::CMPG_FLOAT);
bool unordered_gt = (code == Instruction::CMPG_DOUBLE) || (code == Instruction::CMPG_FLOAT);
int src_reg1;
@@ -263,8 +264,8 @@
return false;
}
-void GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
- bool gt_bias, bool is_double) {
+void X86Codegen::GenFusedFPCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir, bool gt_bias,
+ bool is_double) {
LIR* label_list = cu->block_label_list;
LIR* taken = &label_list[bb->taken->id];
LIR* not_taken = &label_list[bb->fall_through->id];
@@ -333,7 +334,7 @@
OpCondBranch(cu, ccode, taken);
}
-void GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void X86Codegen::GenNegFloat(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
{
RegLocation rl_result;
rl_src = LoadValue(cu, rl_src, kCoreReg);
@@ -342,7 +343,7 @@
StoreValue(cu, rl_dest, rl_result);
}
-void GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
+void X86Codegen::GenNegDouble(CompilationUnit *cu, RegLocation rl_dest, RegLocation rl_src)
{
RegLocation rl_result;
rl_src = LoadValueWide(cu, rl_src, kCoreReg);
@@ -352,7 +353,7 @@
StoreValueWide(cu, rl_dest, rl_result);
}
-bool GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
+bool X86Codegen::GenInlinedSqrt(CompilationUnit* cu, CallInfo* info) {
DCHECK_NE(cu->instruction_set, kThumb2);
return false;
}
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
index f6eaaf5..190208b 100644
--- a/src/compiler/codegen/x86/int_x86.cc
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -17,6 +17,7 @@
/* This file contains codegen for the X86 ISA */
#include "x86_lir.h"
+#include "codegen_x86.h"
#include "../codegen_util.h"
#include "../ralloc_util.h"
@@ -25,8 +26,8 @@
/*
* Perform register memory operation.
*/
-LIR* GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
- int reg1, int base, int offset, ThrowKind kind)
+LIR* X86Codegen::GenRegMemCheck(CompilationUnit* cu, ConditionCode c_code,
+ int reg1, int base, int offset, ThrowKind kind)
{
LIR* tgt = RawLIR(cu, 0, kPseudoThrowTarget, kind,
cu->current_dalvik_offset, reg1, base, offset);
@@ -53,8 +54,8 @@
* finish:
*
*/
-void GenCmpLong(CompilationUnit* cu, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2)
+void X86Codegen::GenCmpLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
{
FlushAllRegs(cu);
LockCallTemps(cu); // Prepare for explicit register usage
@@ -96,8 +97,8 @@
return kX86CondO;
}
-LIR* OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1,
- int src2, LIR* target)
+LIR* X86Codegen::OpCmpBranch(CompilationUnit* cu, ConditionCode cond, int src1, int src2,
+ LIR* target)
{
NewLIR2(cu, kX86Cmp32RR, src1, src2);
X86ConditionCode cc = X86ConditionEncoding(cond);
@@ -107,8 +108,8 @@
return branch;
}
-LIR* OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
- int check_value, LIR* target)
+LIR* X86Codegen::OpCmpImmBranch(CompilationUnit* cu, ConditionCode cond, int reg,
+ int check_value, LIR* target)
{
if ((check_value == 0) && (cond == kCondEq || cond == kCondNe)) {
// TODO: when check_value == 0 and reg is rCX, use the jcxz/nz opcode
@@ -122,10 +123,10 @@
return branch;
}
-LIR* OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Codegen::OpRegCopyNoInsert(CompilationUnit *cu, int r_dest, int r_src)
{
if (X86_FPREG(r_dest) || X86_FPREG(r_src))
- return FpRegCopy(cu, r_dest, r_src);
+ return OpFpRegCopy(cu, r_dest, r_src);
LIR* res = RawLIR(cu, cu->current_dalvik_offset, kX86Mov32RR,
r_dest, r_src);
if (r_dest == r_src) {
@@ -134,15 +135,15 @@
return res;
}
-LIR* OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Codegen::OpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
{
LIR *res = OpRegCopyNoInsert(cu, r_dest, r_src);
AppendLIR(cu, res);
return res;
}
-void OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
- int src_lo, int src_hi)
+void X86Codegen::OpRegCopyWide(CompilationUnit *cu, int dest_lo, int dest_hi,
+ int src_lo, int src_hi)
{
bool dest_fp = X86_FPREG(dest_lo) && X86_FPREG(dest_hi);
bool src_fp = X86_FPREG(src_lo) && X86_FPREG(src_hi);
@@ -177,7 +178,7 @@
}
}
-void GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
+void X86Codegen::GenFusedLongCmpBranch(CompilationUnit* cu, BasicBlock* bb, MIR* mir) {
LIR* label_list = cu->block_label_list;
LIR* taken = &label_list[bb->taken->id];
RegLocation rl_src1 = GetSrcWide(cu, mir, 0);
@@ -216,19 +217,22 @@
}
OpCondBranch(cu, ccode, taken);
}
-RegLocation GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int lit, bool is_div)
+
+RegLocation X86Codegen::GenDivRemLit(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+ int lit, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRemLit for x86";
return rl_dest;
}
-RegLocation GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo, int reg_hi, bool is_div)
+RegLocation X86Codegen::GenDivRem(CompilationUnit* cu, RegLocation rl_dest, int reg_lo,
+ int reg_hi, bool is_div)
{
LOG(FATAL) << "Unexpected use of GenDivRem for x86";
return rl_dest;
}
-bool GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
+bool X86Codegen::GenInlinedMinMaxInt(CompilationUnit *cu, CallInfo* info, bool is_min)
{
DCHECK_EQ(cu->instruction_set, kX86);
RegLocation rl_src1 = info->args[0];
@@ -249,41 +253,41 @@
return true;
}
-void OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
+void X86Codegen::OpLea(CompilationUnit* cu, int rBase, int reg1, int reg2, int scale, int offset)
{
NewLIR5(cu, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
}
-void OpTlsCmp(CompilationUnit* cu, int offset, int val)
+void X86Codegen::OpTlsCmp(CompilationUnit* cu, int offset, int val)
{
NewLIR2(cu, kX86Cmp16TI8, offset, val);
}
-bool GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
+bool X86Codegen::GenInlinedCas32(CompilationUnit* cu, CallInfo* info, bool need_write_barrier) {
DCHECK_NE(cu->instruction_set, kThumb2);
return false;
}
-LIR* OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
+LIR* X86Codegen::OpPcRelLoad(CompilationUnit* cu, int reg, LIR* target) {
LOG(FATAL) << "Unexpected use of OpPcRelLoad for x86";
return NULL;
}
-LIR* OpVldm(CompilationUnit* cu, int rBase, int count)
+LIR* X86Codegen::OpVldm(CompilationUnit* cu, int rBase, int count)
{
LOG(FATAL) << "Unexpected use of OpVldm for x86";
return NULL;
}
-LIR* OpVstm(CompilationUnit* cu, int rBase, int count)
+LIR* X86Codegen::OpVstm(CompilationUnit* cu, int rBase, int count)
{
LOG(FATAL) << "Unexpected use of OpVstm for x86";
return NULL;
}
-void GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
- RegLocation rl_result, int lit,
- int first_bit, int second_bit)
+void X86Codegen::GenMultiplyByTwoBitMultiplier(CompilationUnit* cu, RegLocation rl_src,
+ RegLocation rl_result, int lit,
+ int first_bit, int second_bit)
{
int t_reg = AllocTemp(cu);
OpRegRegImm(cu, kOpLsl, t_reg, rl_src.low_reg, second_bit - first_bit);
@@ -294,7 +298,7 @@
}
}
-void GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
+void X86Codegen::GenDivZeroCheck(CompilationUnit* cu, int reg_lo, int reg_hi)
{
int t_reg = AllocTemp(cu);
OpRegRegReg(cu, kOpOr, t_reg, reg_lo, reg_hi);
@@ -303,33 +307,33 @@
}
// Test suspend flag, return target of taken suspend branch
-LIR* OpTestSuspend(CompilationUnit* cu, LIR* target)
+LIR* X86Codegen::OpTestSuspend(CompilationUnit* cu, LIR* target)
{
OpTlsCmp(cu, Thread::ThreadFlagsOffset().Int32Value(), 0);
return OpCondBranch(cu, (target == NULL) ? kCondNe : kCondEq, target);
}
// Decrement register and branch on condition
-LIR* OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
+LIR* X86Codegen::OpDecAndBranch(CompilationUnit* cu, ConditionCode c_code, int reg, LIR* target)
{
OpRegImm(cu, kOpSub, reg, 1);
return OpCmpImmBranch(cu, c_code, reg, 0, target);
}
-bool SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
- RegLocation rl_src, RegLocation rl_dest, int lit)
+bool X86Codegen::SmallLiteralDivide(CompilationUnit* cu, Instruction::Code dalvik_opcode,
+ RegLocation rl_src, RegLocation rl_dest, int lit)
{
LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
return false;
}
-LIR* OpIT(CompilationUnit* cu, ArmConditionCode cond, const char* guide)
+LIR* X86Codegen::OpIT(CompilationUnit* cu, ConditionCode cond, const char* guide)
{
LOG(FATAL) << "Unexpected use of OpIT in x86";
return NULL;
}
-bool GenAddLong(CompilationUnit* cu, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenAddLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
{
FlushAllRegs(cu);
LockCallTemps(cu); // Prepare for explicit register usage
@@ -344,8 +348,8 @@
return false;
}
-bool GenSubLong(CompilationUnit* cu, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenSubLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
{
FlushAllRegs(cu);
LockCallTemps(cu); // Prepare for explicit register usage
@@ -360,8 +364,8 @@
return false;
}
-bool GenAndLong(CompilationUnit* cu, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenAndLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src1,
+ RegLocation rl_src2)
{
FlushAllRegs(cu);
LockCallTemps(cu); // Prepare for explicit register usage
@@ -376,8 +380,8 @@
return false;
}
-bool GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenOrLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
FlushAllRegs(cu);
LockCallTemps(cu); // Prepare for explicit register usage
@@ -392,8 +396,8 @@
return false;
}
-bool GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
- RegLocation rl_src1, RegLocation rl_src2)
+bool X86Codegen::GenXorLong(CompilationUnit* cu, RegLocation rl_dest,
+ RegLocation rl_src1, RegLocation rl_src2)
{
FlushAllRegs(cu);
LockCallTemps(cu); // Prepare for explicit register usage
@@ -408,8 +412,7 @@
return false;
}
-bool GenNegLong(CompilationUnit* cu, RegLocation rl_dest,
- RegLocation rl_src)
+bool X86Codegen::GenNegLong(CompilationUnit* cu, RegLocation rl_dest, RegLocation rl_src)
{
FlushAllRegs(cu);
LockCallTemps(cu); // Prepare for explicit register usage
@@ -424,7 +427,7 @@
return false;
}
-void OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset) {
+void X86Codegen::OpRegThreadMem(CompilationUnit* cu, OpKind op, int r_dest, int thread_offset) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpCmp: opcode = kX86Cmp32RT; break;
diff --git a/src/compiler/codegen/x86/target_x86.cc b/src/compiler/codegen/x86/target_x86.cc
index ee5c215..c3c79f1 100644
--- a/src/compiler/codegen/x86/target_x86.cc
+++ b/src/compiler/codegen/x86/target_x86.cc
@@ -16,6 +16,7 @@
#include "../../compiler_internals.h"
#include "x86_lir.h"
+#include "codegen_x86.h"
#include "../ralloc_util.h"
#include "../codegen_util.h"
@@ -45,32 +46,32 @@
#endif
};
-RegLocation LocCReturn()
+RegLocation X86Codegen::LocCReturn()
{
RegLocation res = X86_LOC_C_RETURN;
return res;
}
-RegLocation LocCReturnWide()
+RegLocation X86Codegen::LocCReturnWide()
{
RegLocation res = X86_LOC_C_RETURN_WIDE;
return res;
}
-RegLocation LocCReturnFloat()
+RegLocation X86Codegen::LocCReturnFloat()
{
RegLocation res = X86_LOC_C_RETURN_FLOAT;
return res;
}
-RegLocation LocCReturnDouble()
+RegLocation X86Codegen::LocCReturnDouble()
{
RegLocation res = X86_LOC_C_RETURN_DOUBLE;
return res;
}
// Return a target-dependent special register.
-int TargetReg(SpecialTargetRegister reg) {
+int X86Codegen::TargetReg(SpecialTargetRegister reg) {
int res = INVALID_REG;
switch (reg) {
case kSelf: res = rX86_SELF; break;
@@ -95,37 +96,19 @@
}
// Create a double from a pair of singles.
-int S2d(int low_reg, int high_reg)
+int X86Codegen::S2d(int low_reg, int high_reg)
{
return X86_S2D(low_reg, high_reg);
}
-// Is reg a single or double?
-bool FpReg(int reg)
-{
- return X86_FPREG(reg);
-}
-
-// Is reg a single?
-bool SingleReg(int reg)
-{
- return X86_SINGLEREG(reg);
-}
-
-// Is reg a double?
-bool DoubleReg(int reg)
-{
- return X86_DOUBLEREG(reg);
-}
-
// Return mask to strip off fp reg flags and bias.
-uint32_t FpRegMask()
+uint32_t X86Codegen::FpRegMask()
{
return X86_FP_REG_MASK;
}
// True if both regs single, both core or both double.
-bool SameRegType(int reg1, int reg2)
+bool X86Codegen::SameRegType(int reg1, int reg2)
{
return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
}
@@ -133,7 +116,7 @@
/*
* Decode the register id.
*/
-uint64_t GetRegMaskCommon(CompilationUnit* cu, int reg)
+uint64_t X86Codegen::GetRegMaskCommon(CompilationUnit* cu, int reg)
{
uint64_t seed;
int shift;
@@ -149,7 +132,7 @@
return (seed << shift);
}
-uint64_t GetPCUseDefEncoding()
+uint64_t X86Codegen::GetPCUseDefEncoding()
{
/*
* FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
@@ -159,12 +142,12 @@
return 0ULL;
}
-void SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
+void X86Codegen::SetupTargetResourceMasks(CompilationUnit* cu, LIR* lir)
{
DCHECK_EQ(cu->instruction_set, kX86);
// X86-specific resource map setup here.
- uint64_t flags = EncodingMap[lir->opcode].flags;
+ uint64_t flags = X86Codegen::EncodingMap[lir->opcode].flags;
if (flags & REG_USE_SP) {
lir->use_mask |= ENCODE_X86_REG_SP;
@@ -223,7 +206,7 @@
* Interpret a format string and build a string no longer than size
* See format key in Assemble.cc.
*/
-std::string BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
+std::string X86Codegen::BuildInsnString(const char *fmt, LIR *lir, unsigned char* base_addr) {
std::string buf;
size_t i = 0;
size_t fmt_len = strlen(fmt);
@@ -281,7 +264,7 @@
return buf;
}
-void DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
+void X86Codegen::DumpResourceMask(LIR *x86LIR, uint64_t mask, const char *prefix)
{
char buf[256];
buf[0] = 0;
@@ -322,7 +305,8 @@
LOG(INFO) << prefix << ": " << buf;
}
}
-void AdjustSpillMask(CompilationUnit* cu) {
+
+void X86Codegen::AdjustSpillMask(CompilationUnit* cu) {
// Adjustment for LR spilling, x86 has no LR so nothing to do here
cu->core_spill_mask |= (1 << rRET);
cu->num_core_spills++;
@@ -334,7 +318,7 @@
* include any holes in the mask. Associate holes with
* Dalvik register INVALID_VREG (0xFFFFU).
*/
-void MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
+void X86Codegen::MarkPreservedSingle(CompilationUnit* cu, int v_reg, int reg)
{
UNIMPLEMENTED(WARNING) << "MarkPreservedSingle";
#if 0
@@ -342,7 +326,7 @@
#endif
}
-void FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
+void X86Codegen::FlushRegWide(CompilationUnit* cu, int reg1, int reg2)
{
RegisterInfo* info1 = GetRegInfo(cu, reg1);
RegisterInfo* info2 = GetRegInfo(cu, reg2);
@@ -364,7 +348,7 @@
}
}
-void FlushReg(CompilationUnit* cu, int reg)
+void X86Codegen::FlushReg(CompilationUnit* cu, int reg)
{
RegisterInfo* info = GetRegInfo(cu, reg);
if (info->live && info->dirty) {
@@ -375,19 +359,19 @@
}
/* Give access to the target-dependent FP register encoding to common code */
-bool IsFpReg(int reg) {
+bool X86Codegen::IsFpReg(int reg) {
return X86_FPREG(reg);
}
/* Clobber all regs that might be used by an external C call */
-void ClobberCalleeSave(CompilationUnit *cu)
+void X86Codegen::ClobberCalleeSave(CompilationUnit *cu)
{
Clobber(cu, rAX);
Clobber(cu, rCX);
Clobber(cu, rDX);
}
-RegLocation GetReturnWideAlt(CompilationUnit* cu) {
+RegLocation X86Codegen::GetReturnWideAlt(CompilationUnit* cu) {
RegLocation res = LocCReturnWide();
CHECK(res.low_reg == rAX);
CHECK(res.high_reg == rDX);
@@ -399,7 +383,7 @@
return res;
}
-RegLocation GetReturnAlt(CompilationUnit* cu)
+RegLocation X86Codegen::GetReturnAlt(CompilationUnit* cu)
{
RegLocation res = LocCReturn();
res.low_reg = rDX;
@@ -408,14 +392,14 @@
return res;
}
-RegisterInfo* GetRegInfo(CompilationUnit* cu, int reg)
+RegisterInfo* X86Codegen::GetRegInfo(CompilationUnit* cu, int reg)
{
return X86_FPREG(reg) ? &cu->reg_pool->FPRegs[reg & X86_FP_REG_MASK]
: &cu->reg_pool->core_regs[reg];
}
/* To be used when explicitly managing register use */
-void LockCallTemps(CompilationUnit* cu)
+void X86Codegen::LockCallTemps(CompilationUnit* cu)
{
LockTemp(cu, rX86_ARG0);
LockTemp(cu, rX86_ARG1);
@@ -424,7 +408,7 @@
}
/* To be used when explicitly managing register use */
-void FreeCallTemps(CompilationUnit* cu)
+void X86Codegen::FreeCallTemps(CompilationUnit* cu)
{
FreeTemp(cu, rX86_ARG0);
FreeTemp(cu, rX86_ARG1);
@@ -432,13 +416,7 @@
FreeTemp(cu, rX86_ARG3);
}
-/* Architecture-specific initializations and checks go here */
-bool ArchVariantInit(void)
-{
- return true;
-}
-
-void GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
+void X86Codegen::GenMemBarrier(CompilationUnit *cu, MemBarrierKind barrier_kind)
{
#if ANDROID_SMP != 0
// TODO: optimize fences
@@ -449,7 +427,7 @@
* Alloc a pair of core registers, or a double. Low reg in low byte,
* high reg in next byte.
*/
-int AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
+int X86Codegen::AllocTypedTempPair(CompilationUnit *cu, bool fp_hint,
int reg_class)
{
int high_reg;
@@ -469,14 +447,14 @@
return res;
}
-int AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class) {
+int X86Codegen::AllocTypedTemp(CompilationUnit *cu, bool fp_hint, int reg_class) {
if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) {
return AllocTempFloat(cu);
}
return AllocTemp(cu);
}
-void CompilerInitializeRegAlloc(CompilationUnit* cu) {
+void X86Codegen::CompilerInitializeRegAlloc(CompilationUnit* cu) {
int num_regs = sizeof(core_regs)/sizeof(*core_regs);
int num_reserved = sizeof(ReservedRegs)/sizeof(*ReservedRegs);
int num_temps = sizeof(core_temps)/sizeof(*core_temps);
@@ -524,7 +502,7 @@
}
}
-void FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
+void X86Codegen::FreeRegLocTemps(CompilationUnit* cu, RegLocation rl_keep,
RegLocation rl_free)
{
if ((rl_free.low_reg != rl_keep.low_reg) && (rl_free.low_reg != rl_keep.high_reg) &&
@@ -535,7 +513,7 @@
}
}
-void SpillCoreRegs(CompilationUnit* cu) {
+void X86Codegen::SpillCoreRegs(CompilationUnit* cu) {
if (cu->num_core_spills == 0) {
return;
}
@@ -550,7 +528,7 @@
}
}
-void UnSpillCoreRegs(CompilationUnit* cu) {
+void X86Codegen::UnSpillCoreRegs(CompilationUnit* cu) {
if (cu->num_core_spills == 0) {
return;
}
@@ -565,46 +543,44 @@
}
}
-bool BranchUnconditional(LIR* lir)
+bool X86Codegen::IsUnconditionalBranch(LIR* lir)
{
return (lir->opcode == kX86Jmp8 || lir->opcode == kX86Jmp32);
}
/* Common initialization routine for an architecture family */
-bool ArchInit() {
- int i;
-
- for (i = 0; i < kX86Last; i++) {
- if (EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
+bool InitX86Codegen(CompilationUnit* cu) {
+ cu->cg.reset(new X86Codegen());
+ for (int i = 0; i < kX86Last; i++) {
+ if (X86Codegen::EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << X86Codegen::EncodingMap[i].name
<< " is wrong: expecting " << i << ", seeing "
- << static_cast<int>(EncodingMap[i].opcode);
+ << static_cast<int>(X86Codegen::EncodingMap[i].opcode);
}
}
-
- return ArchVariantInit();
+ return true;
}
// Not used in x86
-int LoadHelper(CompilationUnit* cu, int offset)
+int X86Codegen::LoadHelper(CompilationUnit* cu, int offset)
{
LOG(FATAL) << "Unexpected use of LoadHelper in x86";
return INVALID_REG;
}
-uint64_t GetTargetInstFlags(int opcode)
+uint64_t X86Codegen::GetTargetInstFlags(int opcode)
{
- return EncodingMap[opcode].flags;
+ return X86Codegen::EncodingMap[opcode].flags;
}
-const char* GetTargetInstName(int opcode)
+const char* X86Codegen::GetTargetInstName(int opcode)
{
- return EncodingMap[opcode].name;
+ return X86Codegen::EncodingMap[opcode].name;
}
-const char* GetTargetInstFmt(int opcode)
+const char* X86Codegen::GetTargetInstFmt(int opcode)
{
- return EncodingMap[opcode].fmt;
+ return X86Codegen::EncodingMap[opcode].fmt;
}
} // namespace art
diff --git a/src/compiler/codegen/x86/utility_x86.cc b/src/compiler/codegen/x86/utility_x86.cc
index 22037f3..bdbc547 100644
--- a/src/compiler/codegen/x86/utility_x86.cc
+++ b/src/compiler/codegen/x86/utility_x86.cc
@@ -15,6 +15,7 @@
*/
#include "x86_lir.h"
+#include "codegen_x86.h"
#include "../codegen_util.h"
#include "../ralloc_util.h"
@@ -22,13 +23,7 @@
/* This file contains codegen for the X86 ISA */
-void GenBarrier(CompilationUnit *cu);
-void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg);
-LIR *LoadWordDisp(CompilationUnit *cu, int rBase, int displacement, int r_dest);
-LIR *StoreWordDisp(CompilationUnit *cu, int rBase, int displacement, int r_src);
-LIR *LoadConstant(CompilationUnit *cu, int r_dest, int value);
-
-LIR *FpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
+LIR* X86Codegen::OpFpRegCopy(CompilationUnit *cu, int r_dest, int r_src)
{
int opcode;
/* must be both DOUBLE or both not DOUBLE */
@@ -64,7 +59,7 @@
* 1) r_dest is freshly returned from AllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR *LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
+LIR* X86Codegen::LoadConstantNoClobber(CompilationUnit *cu, int r_dest, int value)
{
int r_dest_save = r_dest;
if (X86_FPREG(r_dest)) {
@@ -91,16 +86,14 @@
return res;
}
-LIR* OpBranchUnconditional(CompilationUnit *cu, OpKind op)
+LIR* X86Codegen::OpUnconditionalBranch(CompilationUnit* cu, LIR* target)
{
- CHECK_EQ(op, kOpUncondBr);
- return NewLIR1(cu, kX86Jmp8, 0 /* offset to be patched */ );
+ LIR* res = NewLIR1(cu, kX86Jmp8, 0 /* offset to be patched during assembly*/ );
+ res->target = target;
+ return res;
}
-LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask);
-
-X86ConditionCode X86ConditionEncoding(ConditionCode cond);
-LIR* OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
+LIR* X86Codegen::OpCondBranch(CompilationUnit* cu, ConditionCode cc, LIR* target)
{
LIR* branch = NewLIR2(cu, kX86Jcc8, 0 /* offset to be patched */,
X86ConditionEncoding(cc));
@@ -108,7 +101,7 @@
return branch;
}
-LIR *OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
+LIR* X86Codegen::OpReg(CompilationUnit *cu, OpKind op, int r_dest_src)
{
X86OpCode opcode = kX86Bkpt;
switch (op) {
@@ -121,7 +114,7 @@
return NewLIR1(cu, opcode, r_dest_src);
}
-LIR *OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1, int value)
+LIR* X86Codegen::OpRegImm(CompilationUnit *cu, OpKind op, int r_dest_src1, int value)
{
X86OpCode opcode = kX86Bkpt;
bool byte_imm = IS_SIMM8(value);
@@ -148,7 +141,7 @@
return NewLIR2(cu, opcode, r_dest_src1, value);
}
-LIR *OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
+LIR* X86Codegen::OpRegReg(CompilationUnit *cu, OpKind op, int r_dest_src1, int r_src2)
{
X86OpCode opcode = kX86Nop;
bool src2_must_be_cx = false;
@@ -194,7 +187,7 @@
return NewLIR2(cu, opcode, r_dest_src1, r_src2);
}
-LIR* OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
+LIR* X86Codegen::OpRegMem(CompilationUnit *cu, OpKind op, int r_dest, int rBase,
int offset)
{
X86OpCode opcode = kX86Nop;
@@ -218,7 +211,7 @@
return NewLIR3(cu, opcode, r_dest, rBase, offset);
}
-LIR* OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1,
+LIR* X86Codegen::OpRegRegReg(CompilationUnit *cu, OpKind op, int r_dest, int r_src1,
int r_src2)
{
if (r_dest != r_src1 && r_dest != r_src2) {
@@ -267,7 +260,7 @@
}
}
-LIR* OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src,
+LIR* X86Codegen::OpRegRegImm(CompilationUnit *cu, OpKind op, int r_dest, int r_src,
int value)
{
if (op == kOpMul) {
@@ -294,7 +287,7 @@
return OpRegImm(cu, op, r_dest, value);
}
-LIR* OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
+LIR* X86Codegen::OpThreadMem(CompilationUnit* cu, OpKind op, int thread_offset)
{
X86OpCode opcode = kX86Bkpt;
switch (op) {
@@ -306,7 +299,7 @@
return NewLIR1(cu, opcode, thread_offset);
}
-LIR* OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
+LIR* X86Codegen::OpMem(CompilationUnit* cu, OpKind op, int rBase, int disp)
{
X86OpCode opcode = kX86Bkpt;
switch (op) {
@@ -318,8 +311,8 @@
return NewLIR2(cu, opcode, rBase, disp);
}
-LIR *LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo,
- int r_dest_hi, int val_lo, int val_hi)
+LIR* X86Codegen::LoadConstantValueWide(CompilationUnit *cu, int r_dest_lo,
+ int r_dest_hi, int val_lo, int val_hi)
{
LIR *res;
if (X86_FPREG(r_dest_lo)) {
@@ -345,22 +338,9 @@
return res;
}
-LIR *LoadMultiple(CompilationUnit *cu, int rBase, int r_mask)
-{
- UNIMPLEMENTED(FATAL) << "LoadMultiple";
- NewLIR0(cu, kX86Bkpt);
- return NULL;
-}
-
-LIR *StoreMultiple(CompilationUnit *cu, int rBase, int r_mask)
-{
- UNIMPLEMENTED(FATAL) << "StoreMultiple";
- NewLIR0(cu, kX86Bkpt);
- return NULL;
-}
-
-LIR* LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
- int displacement, int r_dest, int r_dest_hi, OpSize size, int s_reg) {
+LIR* X86Codegen::LoadBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_dest, int r_dest_hi, OpSize size,
+ int s_reg) {
LIR *load = NULL;
LIR *load2 = NULL;
bool is_array = r_index != INVALID_REG;
@@ -428,10 +408,10 @@
}
}
if (rBase == rX86_SP) {
- AnnotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0))
- >> 2, true /* is_load */, is64bit);
+ AnnotateDalvikRegAccess(cu, load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ true /* is_load */, is64bit);
if (pair) {
- AnnotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
+ AnnotateDalvikRegAccess(cu, load2, (displacement + HIWORD_OFFSET) >> 2,
true /* is_load */, is64bit);
}
}
@@ -458,26 +438,27 @@
}
/* Load value from base + scaled index. */
-LIR *LoadBaseIndexed(CompilationUnit *cu, int rBase,
+LIR* X86Codegen::LoadBaseIndexed(CompilationUnit *cu, int rBase,
int r_index, int r_dest, int scale, OpSize size) {
return LoadBaseIndexedDisp(cu, rBase, r_index, scale, 0,
r_dest, INVALID_REG, size, INVALID_SREG);
}
-LIR *LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Codegen::LoadBaseDisp(CompilationUnit *cu, int rBase, int displacement,
int r_dest, OpSize size, int s_reg) {
return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
r_dest, INVALID_REG, size, s_reg);
}
-LIR *LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+LIR* X86Codegen::LoadBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
int r_dest_lo, int r_dest_hi, int s_reg) {
return LoadBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
r_dest_lo, r_dest_hi, kLong, s_reg);
}
-LIR* StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
- int displacement, int r_src, int r_src_hi, OpSize size, int s_reg) {
+LIR* X86Codegen::StoreBaseIndexedDisp(CompilationUnit *cu, int rBase, int r_index, int scale,
+ int displacement, int r_src, int r_src_hi, OpSize size,
+ int s_reg) {
LIR *store = NULL;
LIR *store2 = NULL;
bool is_array = r_index != INVALID_REG;
@@ -533,10 +514,10 @@
store2 = NewLIR3(cu, opcode, rBase, displacement + HIWORD_OFFSET, r_src_hi);
}
if (rBase == rX86_SP) {
- AnnotateDalvikRegAccess(store, (displacement + (pair ? LOWORD_OFFSET : 0))
- >> 2, false /* is_load */, is64bit);
+ AnnotateDalvikRegAccess(cu, store, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
+ false /* is_load */, is64bit);
if (pair) {
- AnnotateDalvikRegAccess(store2, (displacement + HIWORD_OFFSET) >> 2,
+ AnnotateDalvikRegAccess(cu, store2, (displacement + HIWORD_OFFSET) >> 2,
false /* is_load */, is64bit);
}
}
@@ -556,29 +537,29 @@
}
/* store value base base + scaled index. */
-LIR *StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
+LIR* X86Codegen::StoreBaseIndexed(CompilationUnit *cu, int rBase, int r_index, int r_src,
int scale, OpSize size)
{
return StoreBaseIndexedDisp(cu, rBase, r_index, scale, 0,
r_src, INVALID_REG, size, INVALID_SREG);
}
-LIR *StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement,
- int r_src, OpSize size)
+LIR* X86Codegen::StoreBaseDisp(CompilationUnit *cu, int rBase, int displacement,
+ int r_src, OpSize size)
{
return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0,
displacement, r_src, INVALID_REG, size,
INVALID_SREG);
}
-LIR *StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
- int r_src_lo, int r_src_hi)
+LIR* X86Codegen::StoreBaseDispWide(CompilationUnit *cu, int rBase, int displacement,
+ int r_src_lo, int r_src_hi)
{
return StoreBaseIndexedDisp(cu, rBase, INVALID_REG, 0, displacement,
r_src_lo, r_src_hi, kLong, INVALID_SREG);
}
-void LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
+void X86Codegen::LoadPair(CompilationUnit *cu, int base, int low_reg, int high_reg)
{
LoadBaseDispWide(cu, base, 0, low_reg, high_reg, INVALID_SREG);
}
diff --git a/src/compiler/codegen/x86/x86_lir.h b/src/compiler/codegen/x86/x86_lir.h
index d58f587..edfcd4d 100644
--- a/src/compiler/codegen/x86/x86_lir.h
+++ b/src/compiler/codegen/x86/x86_lir.h
@@ -103,13 +103,13 @@
* +========================+
*/
-/* Offset to distingish FP regs */
+// Offset to distingish FP regs.
#define X86_FP_REG_OFFSET 32
-/* Offset to distinguish DP FP regs */
+// Offset to distinguish DP FP regs.
#define X86_FP_DOUBLE (X86_FP_REG_OFFSET + 16)
-/* Offset to distingish the extra regs */
+// Offset to distingish the extra regs.
#define X86_EXTRA_REG_OFFSET (X86_FP_DOUBLE + 16)
-/* Reg types */
+// Reg types.
#define X86_REGTYPE(x) (x & (X86_FP_REG_OFFSET | X86_FP_DOUBLE))
#define X86_FPREG(x) ((x & X86_FP_REG_OFFSET) == X86_FP_REG_OFFSET)
#define X86_EXTRAREG(x) ((x & X86_EXTRA_REG_OFFSET) == X86_EXTRA_REG_OFFSET)
@@ -127,7 +127,7 @@
/* Mask to strip off fp flags */
#define X86_FP_REG_MASK 0xF
-/* RegisterLocation templates return values (rAX, rAX/rDX or XMM0) */
+// RegisterLocation templates return values (rAX, rAX/rDX or XMM0).
// location, wide, defined, const, fp, core, ref, high_word, home, low_reg, high_reg, s_reg_low
#define X86_LOC_C_RETURN {kLocPhysReg, 0, 0, 0, 0, 0, 0, 0, 1, rAX, INVALID_REG, INVALID_SREG, INVALID_SREG}
#define X86_LOC_C_RETURN_WIDE {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, rAX, rDX, INVALID_SREG, INVALID_SREG}
@@ -137,7 +137,7 @@
enum X86ResourceEncodingPos {
kX86GPReg0 = 0,
kX86RegSP = 4,
- kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15
+ kX86FPReg0 = 16, // xmm0 .. xmm7/xmm15.
kX86FPRegEnd = 32,
kX86RegEnd = kX86FPRegEnd,
};
@@ -145,10 +145,6 @@
#define ENCODE_X86_REG_LIST(N) (static_cast<uint64_t>(N))
#define ENCODE_X86_REG_SP (1ULL << kX86RegSP)
-/*
- * Annotate special-purpose core registers:
- */
-
enum X86NativeRegisterPool {
r0 = 0,
rAX = r0,
@@ -169,7 +165,7 @@
r7 = 7,
rDI = r7,
#ifndef TARGET_REX_SUPPORT
- rRET = 8, // fake return address register for core spill mask
+ rRET = 8, // fake return address register for core spill mask.
#else
r8 = 8,
r9 = 9,
@@ -179,7 +175,7 @@
r13 = 13,
r14 = 14,
r15 = 15,
- rRET = 16, // fake return address register for core spill mask
+ rRET = 16, // fake return address register for core spill mask.
#endif
fr0 = 0 + X86_FP_REG_OFFSET,
fr1 = 1 + X86_FP_REG_OFFSET,
@@ -199,10 +195,6 @@
fr15 = 15 + X86_FP_REG_OFFSET,
};
-/*
- * Target-independent aliases
- */
-
#define rX86_ARG0 rAX
#define rX86_ARG1 rCX
#define rX86_ARG2 rDX
@@ -227,7 +219,7 @@
*/
enum X86OpCode {
kX86First = 0,
- kX8632BitData = kX86First, /* data [31..0] */
+ kX8632BitData = kX86First, // data [31..0].
kX86Bkpt,
kX86Nop,
// Define groups of binary operations
@@ -427,22 +419,24 @@
const char* fmt;
};
-extern X86EncodingMap EncodingMap[kX86Last];
// FIXME: mem barrier type - what do we do for x86?
#define kSY 0
#define kST 0
-/* Offsets of high and low halves of a 64bit value */
+// Offsets of high and low halves of a 64bit value.
#define LOWORD_OFFSET 0
#define HIWORD_OFFSET 4
-/* Segment override instruction prefix used for quick TLS access to Thread::Current() */
+// Segment override instruction prefix used for quick TLS access to Thread::Current().
#define THREAD_PREFIX 0x64
#define IS_SIMM8(v) ((-128 <= (v)) && ((v) <= 127))
#define IS_SIMM16(v) ((-32768 <= (v)) && ((v) <= 32767))
+extern X86EncodingMap EncodingMap[kX86Last];
+extern X86ConditionCode X86ConditionEncoding(ConditionCode cond);
+
} // namespace art
#endif // ART_COMPILER_COMPILER_CODEGEN_X86_X86LIR_H_