[Compiler] use Art indentation standard
First of several CLs to bring code closer to alignment with Art and LLVM
standards. Move to 2-space indenting. Sticking with 80-col line
length (which LLVM apparently also wants). LLVM also prefers camel
case names, so keeping Dalvik convention there as well (for now).
Change-Id: I351ab234e640678d97747377cccdd6df0a770f4a
diff --git a/src/compiler/codegen/x86/ArchFactory.cc b/src/compiler/codegen/x86/ArchFactory.cc
index 043d66e..dc13238 100644
--- a/src/compiler/codegen/x86/ArchFactory.cc
+++ b/src/compiler/codegen/x86/ArchFactory.cc
@@ -70,7 +70,7 @@
}
bool genOrLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
+ RegLocation rlSrc1, RegLocation rlSrc2)
{
oatFlushAllRegs(cUnit);
oatLockCallTemps(cUnit); // Prepare for explicit register usage
@@ -149,65 +149,65 @@
void opRegThreadMem(CompilationUnit* cUnit, OpKind op, int rDest, int threadOffset) {
X86OpCode opcode = kX86Bkpt;
switch (op) {
- case kOpCmp: opcode = kX86Cmp32RT; break;
- default:
- LOG(FATAL) << "Bad opcode: " << op;
- break;
+ case kOpCmp: opcode = kX86Cmp32RT; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
}
newLIR2(cUnit, opcode, rDest, threadOffset);
}
void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
{
- /*
- * On entry, rARG0, rARG1, rARG2 are live. Let the register
- * allocation mechanism know so it doesn't try to use any of them when
- * expanding the frame or flushing. This leaves the utility
- * code with no spare temps.
- */
- oatLockTemp(cUnit, rARG0);
- oatLockTemp(cUnit, rARG1);
- oatLockTemp(cUnit, rARG2);
+ /*
+ * On entry, rARG0, rARG1, rARG2 are live. Let the register
+ * allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with no spare temps.
+ */
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
- /* Build frame, return address already on stack */
- opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - 4);
+ /* Build frame, return address already on stack */
+ opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - 4);
- /*
- * We can safely skip the stack overflow check if we're
- * a leaf *and* our frame size < fudge factor.
- */
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- ((size_t)cUnit->frameSize <
- Thread::kStackOverflowReservedBytes));
- newLIR0(cUnit, kPseudoMethodEntry);
- /* Spill core callee saves */
- spillCoreRegs(cUnit);
- /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
- DCHECK_EQ(cUnit->numFPSpills, 0);
- if (!skipOverflowCheck) {
- // cmp rSP, fs:[stack_end_]; jcc throw_launchpad
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
- opRegThreadMem(cUnit, kOpCmp, rSP, Thread::StackEndOffset().Int32Value());
- opCondBranch(cUnit, kCondUlt, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- }
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
+ ((size_t)cUnit->frameSize <
+ Thread::kStackOverflowReservedBytes));
+ newLIR0(cUnit, kPseudoMethodEntry);
+ /* Spill core callee saves */
+ spillCoreRegs(cUnit);
+ /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ DCHECK_EQ(cUnit->numFPSpills, 0);
+ if (!skipOverflowCheck) {
+ // cmp rSP, fs:[stack_end_]; jcc throw_launchpad
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+ opRegThreadMem(cUnit, kOpCmp, rSP, Thread::StackEndOffset().Int32Value());
+ opCondBranch(cUnit, kCondUlt, tgt);
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ }
- flushIns(cUnit);
+ flushIns(cUnit);
- if (cUnit->genDebugger) {
- // Refresh update debugger callout
- UNIMPLEMENTED(WARNING) << "genDebugger";
+ if (cUnit->genDebugger) {
+ // Refresh update debugger callout
+ UNIMPLEMENTED(WARNING) << "genDebugger";
#if 0
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
+ loadWordDisp(cUnit, rSELF,
+ OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
#endif
- }
+ }
- oatFreeTemp(cUnit, rARG0);
- oatFreeTemp(cUnit, rARG1);
- oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
}
void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb) {
@@ -221,7 +221,7 @@
newLIR0(cUnit, kPseudoMethodExit);
/* If we're compiling for the debugger, generate an update callout */
if (cUnit->genDebugger) {
- genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
+ genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
}
unSpillCoreRegs(cUnit);
/* Remove frame except for return address */
@@ -238,32 +238,32 @@
LIR* thisLIR;
for (thisLIR = (LIR*) cUnit->firstLIRInsn;
- thisLIR != (LIR*) cUnit->lastLIRInsn;
- thisLIR = NEXT_LIR(thisLIR)) {
+ thisLIR != (LIR*) cUnit->lastLIRInsn;
+ thisLIR = NEXT_LIR(thisLIR)) {
- /* Branch to the next instruction */
- if (thisLIR->opcode == kX86Jmp8 || thisLIR->opcode == kX86Jmp32) {
- LIR* nextLIR = thisLIR;
+ /* Branch to the next instruction */
+ if (thisLIR->opcode == kX86Jmp8 || thisLIR->opcode == kX86Jmp32) {
+ LIR* nextLIR = thisLIR;
- while (true) {
- nextLIR = NEXT_LIR(nextLIR);
+ while (true) {
+ nextLIR = NEXT_LIR(nextLIR);
- /*
- * Is the branch target the next instruction?
- */
- if (nextLIR == (LIR*) thisLIR->target) {
- thisLIR->flags.isNop = true;
- break;
- }
+ /*
+ * Is the branch target the next instruction?
+ */
+ if (nextLIR == (LIR*) thisLIR->target) {
+ thisLIR->flags.isNop = true;
+ break;
+ }
- /*
- * Found real useful stuff between the branch and the target.
- * Need to explicitly check the lastLIRInsn here because it
- * might be the last real instruction.
- */
- if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR = (LIR*) cUnit->lastLIRInsn))
- break;
+ /*
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the lastLIRInsn here because it
+ * might be the last real instruction.
+ */
+ if (!isPseudoOpcode(nextLIR->opcode) ||
+ (nextLIR = (LIR*) cUnit->lastLIRInsn))
+ break;
}
}
}
@@ -277,7 +277,8 @@
for (i = 0; i < kX86Last; i++) {
if (EncodingMap[i].opcode != i) {
LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing " << (int)EncodingMap[i].opcode;
+ << " is wrong: expecting " << i << ", seeing "
+ << (int)EncodingMap[i].opcode;
}
}
diff --git a/src/compiler/codegen/x86/ArchUtility.cc b/src/compiler/codegen/x86/ArchUtility.cc
index d325f5c..4e75ef2 100644
--- a/src/compiler/codegen/x86/ArchUtility.cc
+++ b/src/compiler/codegen/x86/ArchUtility.cc
@@ -24,27 +24,27 @@
/* For dumping instructions */
static const char* x86RegName[] = {
- "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
};
static const char* x86CondName[] = {
- "O",
- "NO",
- "B/NAE/C",
- "NB/AE/NC",
- "Z/EQ",
- "NZ/NE",
- "BE/NA",
- "NBE/A",
- "S",
- "NS",
- "P/PE",
- "NP/PO",
- "L/NGE",
- "NL/GE",
- "LE/NG",
- "NLE/G"
+ "O",
+ "NO",
+ "B/NAE/C",
+ "NB/AE/NC",
+ "Z/EQ",
+ "NZ/NE",
+ "BE/NA",
+ "NBE/A",
+ "S",
+ "NS",
+ "P/PE",
+ "NP/PO",
+ "L/NGE",
+ "NL/GE",
+ "LE/NG",
+ "NLE/G"
};
/*
@@ -95,8 +95,8 @@
break;
case 't':
buf += StringPrintf("0x%08x (L%p)",
- reinterpret_cast<uint32_t>(baseAddr) + lir->offset + operand,
- lir->target);
+ reinterpret_cast<uint32_t>(baseAddr)
+ + lir->offset + operand, lir->target);
break;
default:
buf += StringPrintf("DecodeError '%c'", fmt[i]);
@@ -111,45 +111,45 @@
void oatDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
{
- char buf[256];
- buf[0] = 0;
- LIR *x86LIR = (LIR *) lir;
+ char buf[256];
+ buf[0] = 0;
+ LIR *x86LIR = (LIR *) lir;
- if (mask == ENCODE_ALL) {
- strcpy(buf, "all");
- } else {
- char num[8];
- int i;
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
- for (i = 0; i < kRegEnd; i++) {
- if (mask & (1ULL << i)) {
- sprintf(num, "%d ", i);
- strcat(buf, num);
- }
- }
-
- if (mask & ENCODE_CCODE) {
- strcat(buf, "cc ");
- }
- /* Memory bits */
- if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
- (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
- }
- if (mask & ENCODE_LITERAL) {
- strcat(buf, "lit ");
- }
-
- if (mask & ENCODE_HEAP_REF) {
- strcat(buf, "heap ");
- }
- if (mask & ENCODE_MUST_NOT_ALIAS) {
- strcat(buf, "noalias ");
- }
+ for (i = 0; i < kRegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
}
- if (buf[0]) {
- LOG(INFO) << prefix << ": " << buf;
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
}
+ /* Memory bits */
+ if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
+ (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
}
} // namespace art
diff --git a/src/compiler/codegen/x86/Codegen.h b/src/compiler/codegen/x86/Codegen.h
index 52ba7c1..f04acd4 100644
--- a/src/compiler/codegen/x86/Codegen.h
+++ b/src/compiler/codegen/x86/Codegen.h
@@ -74,10 +74,10 @@
*/
inline RegisterClass oatRegClassBySize(OpSize size)
{
- return (size == kUnsignedHalf ||
- size == kSignedHalf ||
- size == kUnsignedByte ||
- size == kSignedByte ) ? kCoreReg : kAnyReg;
+ return (size == kUnsignedHalf ||
+ size == kSignedHalf ||
+ size == kUnsignedByte ||
+ size == kSignedByte ) ? kCoreReg : kAnyReg;
}
/*
@@ -89,12 +89,12 @@
*/
#if __BYTE_ORDER == __LITTLE_ENDIAN
inline s4 s4FromSwitchData(const void* switchData) {
- return *(s4*) switchData;
+ return *(s4*) switchData;
}
#else
inline s4 s4FromSwitchData(const void* switchData) {
- u2* data = switchData;
- return data[0] | (((s4) data[1]) << 16);
+ u2* data = switchData;
+ return data[0] | (((s4) data[1]) << 16);
}
#endif
@@ -102,7 +102,6 @@
extern void oatSetupResourceMasks(LIR* lir);
-extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest,
- int rSrc);
+extern LIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc);
} // namespace art
diff --git a/src/compiler/codegen/x86/FP/X86FP.cc b/src/compiler/codegen/x86/FP/X86FP.cc
index fe0d149..f45a099 100644
--- a/src/compiler/codegen/x86/FP/X86FP.cc
+++ b/src/compiler/codegen/x86/FP/X86FP.cc
@@ -16,8 +16,9 @@
namespace art {
-static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2) {
+static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir,
+ RegLocation rlDest, RegLocation rlSrc1,
+ RegLocation rlSrc2) {
X86OpCode op = kX86Nop;
RegLocation rlResult;
diff --git a/src/compiler/codegen/x86/X86/Factory.cc b/src/compiler/codegen/x86/X86/Factory.cc
index 2bd5b42..c3fb6a6 100644
--- a/src/compiler/codegen/x86/X86/Factory.cc
+++ b/src/compiler/codegen/x86/X86/Factory.cc
@@ -26,23 +26,23 @@
//FIXME: restore "static" when usage uncovered
/*static*/ int coreRegs[] = {
- rAX, rCX, rDX, rBX, rSP, rBP, rSI, rDI
+ rAX, rCX, rDX, rBX, rSP, rBP, rSI, rDI
#ifdef TARGET_REX_SUPPORT
- r8, r9, r10, r11, r12, r13, r14, 15
+ r8, r9, r10, r11, r12, r13, r14, 15
#endif
};
/*static*/ int reservedRegs[] = {rSP};
/*static*/ int coreTemps[] = {rAX, rCX, rDX, rBX};
/*static*/ int fpRegs[] = {
- fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
#ifdef TARGET_REX_SUPPORT
- fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
#endif
};
/*static*/ int fpTemps[] = {
- fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
#ifdef TARGET_REX_SUPPORT
- fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
#endif
};
@@ -58,29 +58,29 @@
LIR *fpRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
{
- int opcode;
- /* must be both DOUBLE or both not DOUBLE */
- DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
- if (DOUBLEREG(rDest)) {
- opcode = kX86MovsdRR;
- } else {
- if (SINGLEREG(rDest)) {
- if (SINGLEREG(rSrc)) {
- opcode = kX86MovssRR;
- } else { // Fpr <- Gpr
- opcode = kX86MovdxrRR;
- }
- } else { // Gpr <- Fpr
- DCHECK(SINGLEREG(rSrc));
- opcode = kX86MovdrxRR;
- }
+ int opcode;
+ /* must be both DOUBLE or both not DOUBLE */
+ DCHECK_EQ(DOUBLEREG(rDest),DOUBLEREG(rSrc));
+ if (DOUBLEREG(rDest)) {
+ opcode = kX86MovsdRR;
+ } else {
+ if (SINGLEREG(rDest)) {
+ if (SINGLEREG(rSrc)) {
+ opcode = kX86MovssRR;
+ } else { // Fpr <- Gpr
+ opcode = kX86MovdxrRR;
+ }
+ } else { // Gpr <- Fpr
+ DCHECK(SINGLEREG(rSrc));
+ opcode = kX86MovdrxRR;
}
- DCHECK((EncodingMap[opcode].flags & IS_BINARY_OP) != 0);
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
- if (rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ }
+ DCHECK((EncodingMap[opcode].flags & IS_BINARY_OP) != 0);
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, opcode, rDest, rSrc);
+ if (rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
/*
@@ -92,7 +92,8 @@
* 1) rDest is freshly returned from oatAllocTemp or
* 2) The codegen is under fixed register usage
*/
-LIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value) {
+LIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value)
+{
int rDestSave = rDest;
if (FPREG(rDest)) {
if (value == 0) {
@@ -117,7 +118,8 @@
return res;
}
-LIR* opBranchUnconditional(CompilationUnit *cUnit, OpKind op) {
+LIR* opBranchUnconditional(CompilationUnit *cUnit, OpKind op)
+{
CHECK_EQ(op, kOpUncondBr);
return newLIR1(cUnit, kX86Jmp8, 0 /* offset to be patched */ );
}
@@ -133,7 +135,8 @@
return branch;
}
-LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc) {
+LIR *opReg(CompilationUnit *cUnit, OpKind op, int rDestSrc)
+{
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpNeg: opcode = kX86Neg32R; break;
@@ -144,7 +147,8 @@
return newLIR1(cUnit, opcode, rDestSrc);
}
-LIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int value) {
+LIR *opRegImm(CompilationUnit *cUnit, OpKind op, int rDestSrc1, int value)
+{
X86OpCode opcode = kX86Bkpt;
bool byteImm = IS_SIMM8(value);
DCHECK(!FPREG(rDestSrc1));
@@ -207,7 +211,9 @@
return newLIR2(cUnit, opcode, rDestSrc1, rSrc2);
}
-LIR* opRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase, int offset) {
+LIR* opRegMem(CompilationUnit *cUnit, OpKind op, int rDest, int rBase,
+ int offset)
+{
X86OpCode opcode = kX86Nop;
switch (op) {
// X86 binary opcodes
@@ -229,7 +235,9 @@
return newLIR3(cUnit, opcode, rDest, rBase, offset);
}
-LIR* opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc1, int rSrc2) {
+LIR* opRegRegReg(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc1,
+ int rSrc2)
+{
if (rDest != rSrc1 && rDest != rSrc2) {
if (op == kOpAdd) { // lea special case, except can't encode rbp as base
if (rSrc1 == rSrc2) {
@@ -276,7 +284,9 @@
}
}
-LIR* opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc, int value) {
+LIR* opRegRegImm(CompilationUnit *cUnit, OpKind op, int rDest, int rSrc,
+ int value)
+{
if (op == kOpMul) {
X86OpCode opcode = IS_SIMM8(value) ? kX86Imul32RRI8 : kX86Imul32RRI;
return newLIR3(cUnit, opcode, rDest, rSrc, value);
@@ -301,7 +311,8 @@
return opRegImm(cUnit, op, rDest, value);
}
-LIR* opThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset) {
+LIR* opThreadMem(CompilationUnit* cUnit, OpKind op, int threadOffset)
+{
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpBlx: opcode = kX86CallT; break;
@@ -312,7 +323,8 @@
return newLIR1(cUnit, opcode, threadOffset);
}
-LIR* opMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp) {
+LIR* opMem(CompilationUnit* cUnit, OpKind op, int rBase, int disp)
+{
X86OpCode opcode = kX86Bkpt;
switch (op) {
case kOpBlx: opcode = kX86CallM; break;
@@ -324,7 +336,7 @@
}
LIR *loadConstantValueWide(CompilationUnit *cUnit, int rDestLo,
- int rDestHi, int valLo, int valHi)
+ int rDestHi, int valLo, int valHi)
{
LIR *res;
if (FPREG(rDestLo)) {
@@ -354,111 +366,111 @@
LIR *loadBaseIndexed(CompilationUnit *cUnit, int rBase,
int rIndex, int rDest, int scale, OpSize size)
{
- UNIMPLEMENTED(WARNING) << "loadBaseIndexed";
- newLIR0(cUnit, kX86Bkpt);
- return NULL;
+ UNIMPLEMENTED(WARNING) << "loadBaseIndexed";
+ newLIR0(cUnit, kX86Bkpt);
+ return NULL;
#if 0
- LIR *first = NULL;
- LIR *res;
- X86OpCode opcode = kX86Nop;
- int tReg = oatAllocTemp(cUnit);
+ LIR *first = NULL;
+ LIR *res;
+ X86OpCode opcode = kX86Nop;
+ int tReg = oatAllocTemp(cUnit);
- if (FPREG(rDest)) {
- DCHECK(SINGLEREG(rDest));
- DCHECK((size == kWord) || (size == kSingle));
- size = kSingle;
- } else {
- if (size == kSingle)
- size = kWord;
- }
+ if (FPREG(rDest)) {
+ DCHECK(SINGLEREG(rDest));
+ DCHECK((size == kWord) || (size == kSingle));
+ size = kSingle;
+ } else {
+ if (size == kSingle)
+ size = kWord;
+ }
- if (!scale) {
- first = newLIR3(cUnit, kX86Addu, tReg , rBase, rIndex);
- } else {
- first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
- newLIR3(cUnit, kX86Addu, tReg , rBase, tReg);
- }
+ if (!scale) {
+ first = newLIR3(cUnit, kX86Addu, tReg , rBase, rIndex);
+ } else {
+ first = opRegRegImm(cUnit, kOpLsl, tReg, rIndex, scale);
+ newLIR3(cUnit, kX86Addu, tReg , rBase, tReg);
+ }
- switch (size) {
- case kSingle:
- opcode = kX86Flwc1;
- break;
- case kWord:
- opcode = kX86Lw;
- break;
- case kUnsignedHalf:
- opcode = kX86Lhu;
- break;
- case kSignedHalf:
- opcode = kX86Lh;
- break;
- case kUnsignedByte:
- opcode = kX86Lbu;
- break;
- case kSignedByte:
- opcode = kX86Lb;
- break;
- default:
- LOG(FATAL) << "Bad case in loadBaseIndexed";
- }
+ switch (size) {
+ case kSingle:
+ opcode = kX86Flwc1;
+ break;
+ case kWord:
+ opcode = kX86Lw;
+ break;
+ case kUnsignedHalf:
+ opcode = kX86Lhu;
+ break;
+ case kSignedHalf:
+ opcode = kX86Lh;
+ break;
+ case kUnsignedByte:
+ opcode = kX86Lbu;
+ break;
+ case kSignedByte:
+ opcode = kX86Lb;
+ break;
+ default:
+ LOG(FATAL) << "Bad case in loadBaseIndexed";
+ }
- res = newLIR3(cUnit, opcode, rDest, 0, tReg);
- oatFreeTemp(cUnit, tReg);
- return (first) ? first : res;
+ res = newLIR3(cUnit, opcode, rDest, 0, tReg);
+ oatFreeTemp(cUnit, tReg);
+ return (first) ? first : res;
#endif
}
LIR *loadMultiple(CompilationUnit *cUnit, int rBase, int rMask)
{
- UNIMPLEMENTED(WARNING) << "loadMultiple";
- newLIR0(cUnit, kX86Bkpt);
- return NULL;
+ UNIMPLEMENTED(WARNING) << "loadMultiple";
+ newLIR0(cUnit, kX86Bkpt);
+ return NULL;
#if 0
- int i;
- int loadCnt = 0;
- LIR *res = NULL ;
- genBarrier(cUnit);
+ int i;
+ int loadCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) {
- newLIR3(cUnit, kX86Lw, i+r_A0, loadCnt*4, rBase);
- loadCnt++;
- }
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) {
+ newLIR3(cUnit, kX86Lw, i+r_A0, loadCnt*4, rBase);
+ loadCnt++;
}
+ }
- if (loadCnt) {/* increment after */
- newLIR3(cUnit, kX86Addiu, rBase, rBase, loadCnt*4);
- }
+ if (loadCnt) {/* increment after */
+ newLIR3(cUnit, kX86Addiu, rBase, rBase, loadCnt*4);
+ }
- genBarrier(cUnit);
- return res; /* NULL always returned which should be ok since no callers use it */
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
#endif
}
LIR *storeMultiple(CompilationUnit *cUnit, int rBase, int rMask)
{
- UNIMPLEMENTED(WARNING) << "storeMultiple";
- newLIR0(cUnit, kX86Bkpt);
- return NULL;
+ UNIMPLEMENTED(WARNING) << "storeMultiple";
+ newLIR0(cUnit, kX86Bkpt);
+ return NULL;
#if 0
- int i;
- int storeCnt = 0;
- LIR *res = NULL ;
- genBarrier(cUnit);
+ int i;
+ int storeCnt = 0;
+ LIR *res = NULL ;
+ genBarrier(cUnit);
- for (i = 0; i < 8; i++, rMask >>= 1) {
- if (rMask & 0x1) {
- newLIR3(cUnit, kX86Sw, i+r_A0, storeCnt*4, rBase);
- storeCnt++;
- }
+ for (i = 0; i < 8; i++, rMask >>= 1) {
+ if (rMask & 0x1) {
+ newLIR3(cUnit, kX86Sw, i+r_A0, storeCnt*4, rBase);
+ storeCnt++;
}
+ }
- if (storeCnt) { /* increment after */
- newLIR3(cUnit, kX86Addiu, rBase, rBase, storeCnt*4);
- }
+ if (storeCnt) { /* increment after */
+ newLIR3(cUnit, kX86Addiu, rBase, rBase, storeCnt*4);
+ }
- genBarrier(cUnit);
- return res; /* NULL always returned which should be ok since no callers use it */
+ genBarrier(cUnit);
+ return res; /* NULL always returned which should be ok since no callers use it */
#endif
}
@@ -524,11 +536,12 @@
load = newLIR3(cUnit, opcode, rDest, rBase, displacement + LOWORD_OFFSET);
} else {
load = newLIR3(cUnit, opcode, rDest, rBase, displacement + LOWORD_OFFSET);
- load2 = newLIR3(cUnit, opcode, rDestHi, rBase, displacement + HIWORD_OFFSET);
+ load2 = newLIR3(cUnit, opcode, rDestHi, rBase,
+ displacement + HIWORD_OFFSET);
}
if (rBase == rSP) {
- annotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0)) >> 2,
- true /* isLoad */, is64bit);
+ annotateDalvikRegAccess(load, (displacement + (pair ? LOWORD_OFFSET : 0))
+ >> 2, true /* isLoad */, is64bit);
if (pair) {
annotateDalvikRegAccess(load2, (displacement + HIWORD_OFFSET) >> 2,
true /* isLoad */, is64bit);
@@ -536,10 +549,13 @@
}
} else {
if (!pair) {
- load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale, displacement + LOWORD_OFFSET);
+ load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET);
} else {
- load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale, displacement + LOWORD_OFFSET);
- load2 = newLIR5(cUnit, opcode, rDestHi, rBase, rIndex, scale, displacement + HIWORD_OFFSET);
+ load = newLIR5(cUnit, opcode, rDest, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET);
+ load2 = newLIR5(cUnit, opcode, rDestHi, rBase, rIndex, scale,
+ displacement + HIWORD_OFFSET);
}
}
@@ -619,14 +635,18 @@
store = newLIR3(cUnit, opcode, rBase, displacement + LOWORD_OFFSET, rSrc);
} else {
store = newLIR3(cUnit, opcode, rBase, displacement + LOWORD_OFFSET, rSrc);
- store2 = newLIR3(cUnit, opcode, rBase, displacement + HIWORD_OFFSET, rSrcHi);
+ store2 = newLIR3(cUnit, opcode, rBase, displacement + HIWORD_OFFSET,
+ rSrcHi);
}
} else {
if (!pair) {
- store = newLIR5(cUnit, opcode, rBase, rIndex, scale, displacement + LOWORD_OFFSET, rSrc);
+ store = newLIR5(cUnit, opcode, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET, rSrc);
} else {
- store = newLIR5(cUnit, opcode, rBase, rIndex, scale, displacement + LOWORD_OFFSET, rSrc);
- store2 = newLIR5(cUnit, opcode, rBase, rIndex, scale, displacement + HIWORD_OFFSET, rSrcHi);
+ store = newLIR5(cUnit, opcode, rBase, rIndex, scale,
+ displacement + LOWORD_OFFSET, rSrc);
+ store2 = newLIR5(cUnit, opcode, rBase, rIndex, scale,
+ displacement + HIWORD_OFFSET, rSrcHi);
}
}
@@ -634,34 +654,38 @@
}
/* store value base base + scaled index. */
-LIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase, int rIndex, int rSrc, int scale,
- OpSize size)
+LIR *storeBaseIndexed(CompilationUnit *cUnit, int rBase, int rIndex, int rSrc,
+ int scale, OpSize size)
{
return storeBaseIndexedDisp(cUnit, NULL, rBase, rIndex, scale, 0,
rSrc, INVALID_REG, size, INVALID_SREG);
}
-LIR *storeBaseDisp(CompilationUnit *cUnit, int rBase, int displacement, int rSrc, OpSize size) {
- return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0, displacement,
- rSrc, INVALID_REG, size, INVALID_SREG);
+LIR *storeBaseDisp(CompilationUnit *cUnit, int rBase, int displacement,
+ int rSrc, OpSize size)
+{
+ return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0,
+ displacement, rSrc, INVALID_REG, size,
+ INVALID_SREG);
}
LIR *storeBaseDispWide(CompilationUnit *cUnit, int rBase, int displacement,
- int rSrcLo, int rSrcHi) {
+ int rSrcLo, int rSrcHi)
+{
return storeBaseIndexedDisp(cUnit, NULL, rBase, INVALID_REG, 0, displacement,
rSrcLo, rSrcHi, kLong, INVALID_SREG);
}
void storePair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
{
- storeWordDisp(cUnit, base, 0, lowReg);
- storeWordDisp(cUnit, base, 4, highReg);
+ storeWordDisp(cUnit, base, 0, lowReg);
+ storeWordDisp(cUnit, base, 4, highReg);
}
void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg)
{
- loadWordDisp(cUnit, base, 0, lowReg);
- loadWordDisp(cUnit, base, 4, highReg);
+ loadWordDisp(cUnit, base, 0, lowReg);
+ loadWordDisp(cUnit, base, 4, highReg);
}
} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Gen.cc b/src/compiler/codegen/x86/X86/Gen.cc
index ffc5952..62ff3ad 100644
--- a/src/compiler/codegen/x86/X86/Gen.cc
+++ b/src/compiler/codegen/x86/X86/Gen.cc
@@ -27,7 +27,7 @@
void genSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
SpecialCaseHandler specialCase)
{
- // TODO
+ // TODO
}
/*
@@ -36,13 +36,13 @@
LIR* genRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
int reg1, int base, int offset, MIR* mir, ThrowKind kind)
{
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- mir ? mir->offset : 0, reg1, base, offset);
- opRegMem(cUnit, kOpCmp, reg1, base, offset);
- LIR* branch = opCondBranch(cUnit, cCode, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
+ mir ? mir->offset : 0, reg1, base, offset);
+ opRegMem(cUnit, kOpCmp, reg1, base, offset);
+ LIR* branch = opCondBranch(cUnit, cCode, tgt);
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ return branch;
}
/*
@@ -51,7 +51,9 @@
*/
BasicBlock *findBlock(CompilationUnit* cUnit, unsigned int codeOffset,
bool split, bool create, BasicBlock** immedPredBlockP);
-void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc, LIR* labelList) {
+void genSparseSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
+ LIR* labelList)
+{
const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
if (cUnit->printMe) {
dumpSparseSwitchTable(table);
@@ -64,7 +66,8 @@
int key = keys[i];
BasicBlock* case_block = findBlock(cUnit, mir->offset + targets[i],
false, false, NULL);
- opCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key, &labelList[case_block->id]);
+ opCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key,
+ &labelList[case_block->id]);
}
}
@@ -84,7 +87,8 @@
* jmp rStartOfMethod
* done:
*/
-void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc) {
+void genPackedSwitch(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
+{
const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
if (cUnit->printMe) {
dumpPackedSwitchTable(table);
@@ -120,7 +124,8 @@
// Load the displacement from the switch table
int dispReg = oatAllocTemp(cUnit);
- newLIR5(cUnit, kX86PcRelLoadRA, dispReg, startOfMethodReg, keyReg, 2, (intptr_t)tabRec);
+ newLIR5(cUnit, kX86PcRelLoadRA, dispReg, startOfMethodReg, keyReg, 2,
+ (intptr_t)tabRec);
// Add displacement to start of method
opRegReg(cUnit, kOpAdd, startOfMethodReg, dispReg);
// ..and go!
@@ -132,7 +137,8 @@
branchOver->target = (LIR*)target;
}
-void callRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset, int arg0, int arg1);
+void callRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset,
+ int arg0, int arg1);
/*
* Array data table format:
* ushort ident = 0x0300 magic value
@@ -147,7 +153,8 @@
{
const u2* table = cUnit->insns + mir->offset + mir->dalvikInsn.vB;
// Add the table to the list - we'll process it later
- FillArrayData *tabRec = (FillArrayData *)oatNew(cUnit, sizeof(FillArrayData), true, kAllocData);
+ FillArrayData *tabRec = (FillArrayData *)oatNew(cUnit, sizeof(FillArrayData),
+ true, kAllocData);
tabRec->table = table;
tabRec->vaddr = mir->offset;
u2 width = tabRec->table[1];
@@ -163,35 +170,37 @@
newLIR1(cUnit, kX86StartOfMethod, rARG2);
newLIR2(cUnit, kX86PcRelAdr, rARG1, (intptr_t)tabRec);
newLIR2(cUnit, kX86Add32RR, rARG1, rARG2);
- callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rARG0, rARG1);
+ callRuntimeHelperRegReg(cUnit,
+ ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ rARG0, rARG1);
}
void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- UNIMPLEMENTED(WARNING) << "genNegFloat " << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- newLIR0(cUnit, kX86Bkpt);
+ UNIMPLEMENTED(WARNING) << "genNegFloat "
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ newLIR0(cUnit, kX86Bkpt);
#if 0
- RegLocation rlResult;
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.lowReg,
- rlSrc.lowReg, 0x80000000);
- storeValue(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
+ storeValue(cUnit, rlDest, rlResult);
#endif
}
void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
{
- UNIMPLEMENTED(WARNING) << "genNegDouble" << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
- newLIR0(cUnit, kX86Bkpt);
+ UNIMPLEMENTED(WARNING) << "genNegDouble"
+ << PrettyMethod(cUnit->method_idx, *cUnit->dex_file);
+ newLIR0(cUnit, kX86Bkpt);
#if 0
- RegLocation rlResult;
- rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg,
- 0x80000000);
- opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- storeValueWide(cUnit, rlDest, rlResult);
+ RegLocation rlResult;
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
+ opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ storeValueWide(cUnit, rlDest, rlResult);
#endif
}
@@ -203,12 +212,12 @@
*/
void genMonitorEnter(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
- // Go expensive route - artLockObjectFromCode(self, obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARG0);
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - artLockObjectFromCode(self, obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARG0);
}
/*
@@ -216,12 +225,12 @@
*/
void genMonitorExit(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
- // Go expensive route - UnlockObjectFromCode(obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARG0);
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rARG0, mir);
+ // Go expensive route - UnlockObjectFromCode(obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARG0);
}
/*
@@ -243,20 +252,20 @@
void genCmpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
RegLocation rlSrc1, RegLocation rlSrc2)
{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc1, r2, r3);
- // Compute (r1:r0) = (r1:r0) - (r2:r3)
- opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- opRegReg(cUnit, kOpOr, r0, r1); // r0 = high | low - sets ZF
- newLIR2(cUnit, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r2:r3) ? 1 : 0
- newLIR2(cUnit, kX86Movzx8RR, r0, r0);
- opRegImm(cUnit, kOpAsr, r1, 31); // r1 = high >> 31
- opRegReg(cUnit, kOpOr, r0, r1); // r0 holds result
- RegLocation rlResult = LOC_C_RETURN;
- storeValue(cUnit, rlDest, rlResult);
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc1, r2, r3);
+ // Compute (r1:r0) = (r1:r0) - (r2:r3)
+ opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ opRegReg(cUnit, kOpOr, r0, r1); // r0 = high | low - sets ZF
+ newLIR2(cUnit, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r2:r3) ? 1 : 0
+ newLIR2(cUnit, kX86Movzx8RR, r0, r0);
+ opRegImm(cUnit, kOpAsr, r1, 31); // r1 = high >> 31
+ opRegReg(cUnit, kOpOr, r0, r1); // r0 holds result
+ RegLocation rlResult = LOC_C_RETURN;
+ storeValue(cUnit, rlDest, rlResult);
}
X86ConditionCode oatX86ConditionEncoding(ConditionCode cond) {
@@ -281,11 +290,13 @@
return kX86CondO;
}
-LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1, int src2, LIR* target)
+LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+ int src2, LIR* target)
{
newLIR2(cUnit, kX86Cmp32RR, src1, src2);
X86ConditionCode cc = oatX86ConditionEncoding(cond);
- LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+ cc);
branch->target = target;
return branch;
}
@@ -307,25 +318,26 @@
LIR* opRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
{
- if (FPREG(rDest) || FPREG(rSrc))
- return fpRegCopy(cUnit, rDest, rSrc);
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kX86Mov32RR,
- rDest, rSrc);
- if (rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
+ if (FPREG(rDest) || FPREG(rSrc))
+ return fpRegCopy(cUnit, rDest, rSrc);
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kX86Mov32RR,
+ rDest, rSrc);
+ if (rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
}
LIR* opRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
{
- LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
- oatAppendLIR(cUnit, res);
- return res;
+ LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
+ oatAppendLIR(cUnit, res);
+ return res;
}
void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
- int srcLo, int srcHi) {
+ int srcLo, int srcHi)
+{
bool destFP = FPREG(destLo) && FPREG(destHi);
bool srcFP = FPREG(srcLo) && FPREG(srcHi);
assert(FPREG(srcLo) == FPREG(srcHi));
diff --git a/src/compiler/codegen/x86/X86/Ralloc.cc b/src/compiler/codegen/x86/X86/Ralloc.cc
index 38b9df8..2886b8f 100644
--- a/src/compiler/codegen/x86/X86/Ralloc.cc
+++ b/src/compiler/codegen/x86/X86/Ralloc.cc
@@ -29,7 +29,8 @@
* high reg in next byte.
*/
int oatAllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
- int regClass) {
+ int regClass)
+{
int highReg;
int lowReg;
int res = 0;
@@ -65,12 +66,12 @@
cUnit->regPool = pool;
pool->numCoreRegs = numRegs;
pool->coreRegs = (RegisterInfo *)
- oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs),
- true, kAllocRegAlloc);
+ oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs), true,
+ kAllocRegAlloc);
pool->numFPRegs = numFPRegs;
pool->FPRegs = (RegisterInfo *)
- oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
- kAllocRegAlloc);
+ oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
+ kAllocRegAlloc);
oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
// Keep special registers from being allocated
@@ -104,7 +105,8 @@
}
void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree) {
+ RegLocation rlFree)
+{
if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
(rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
// No overlap, free both
diff --git a/src/compiler/codegen/x86/X86LIR.h b/src/compiler/codegen/x86/X86LIR.h
index 85d2565..3646a1f 100644
--- a/src/compiler/codegen/x86/X86LIR.h
+++ b/src/compiler/codegen/x86/X86LIR.h
@@ -144,18 +144,18 @@
#define LOC_C_RETURN_WIDE_DOUBLE {kLocPhysReg, 1, 0, 1, 0, 0, 1, fr0, fr1, INVALID_SREG}
enum ResourceEncodingPos {
- kGPReg0 = 0,
- kRegSP = 4,
- kRegLR = -1,
- kFPReg0 = 16, // xmm0 .. xmm7/xmm15
- kFPRegEnd = 32,
- kRegEnd = kFPRegEnd,
- kCCode = kRegEnd,
- // The following four bits are for memory disambiguation
- kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
- kLiteral, // 2 Literal pool (can be fully disambiguated)
- kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
- kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
+ kGPReg0 = 0,
+ kRegSP = 4,
+ kRegLR = -1,
+ kFPReg0 = 16, // xmm0 .. xmm7/xmm15
+ kFPRegEnd = 32,
+ kRegEnd = kFPRegEnd,
+ kCCode = kRegEnd,
+ // The following four bits are for memory disambiguation
+ kDalvikReg, // 1 Dalvik Frame (can be fully disambiguated)
+ kLiteral, // 2 Literal pool (can be fully disambiguated)
+ kHeapRef, // 3 Somewhere on the heap (alias with any other heap)
+ kMustNotAlias, // 4 Guaranteed to be non-alias (eg *(r6+x))
};
#define ENCODE_REG_LIST(N) ((u8) N)
@@ -242,49 +242,49 @@
/* X86 condition encodings */
enum X86ConditionCode {
- kX86CondO = 0x0, // overflow
- kX86CondNo = 0x1, // not overflow
+ kX86CondO = 0x0, // overflow
+ kX86CondNo = 0x1, // not overflow
- kX86CondB = 0x2, // below
- kX86CondNae = kX86CondB, // not-above-equal
- kX86CondC = kX86CondB, // carry
+ kX86CondB = 0x2, // below
+ kX86CondNae = kX86CondB, // not-above-equal
+ kX86CondC = kX86CondB, // carry
- kX86CondNb = 0x3, // not-below
- kX86CondAe = kX86CondNb, // above-equal
- kX86CondNc = kX86CondNb, // not-carry
+ kX86CondNb = 0x3, // not-below
+ kX86CondAe = kX86CondNb, // above-equal
+ kX86CondNc = kX86CondNb, // not-carry
- kX86CondZ = 0x4, // zero
- kX86CondEq = kX86CondZ, // equal
+ kX86CondZ = 0x4, // zero
+ kX86CondEq = kX86CondZ, // equal
- kX86CondNz = 0x5, // not-zero
- kX86CondNe = kX86CondNz, // not-equal
+ kX86CondNz = 0x5, // not-zero
+ kX86CondNe = kX86CondNz, // not-equal
- kX86CondBe = 0x6, // below-equal
- kX86CondNa = kX86CondBe, // not-above
+ kX86CondBe = 0x6, // below-equal
+ kX86CondNa = kX86CondBe, // not-above
- kX86CondNbe = 0x7, // not-below-equal
- kX86CondA = kX86CondNbe,// above
+ kX86CondNbe = 0x7, // not-below-equal
+ kX86CondA = kX86CondNbe,// above
- kX86CondS = 0x8, // sign
- kX86CondNs = 0x9, // not-sign
+ kX86CondS = 0x8, // sign
+ kX86CondNs = 0x9, // not-sign
- kX86CondP = 0xA, // 8-bit parity even
- kX86CondPE = kX86CondP,
+ kX86CondP = 0xA, // 8-bit parity even
+ kX86CondPE = kX86CondP,
- kX86CondNp = 0xB, // 8-bit parity odd
- kX86CondPo = kX86CondNp,
+ kX86CondNp = 0xB, // 8-bit parity odd
+ kX86CondPo = kX86CondNp,
- kX86CondL = 0xC, // less-than
- kX86CondNge = kX86CondL, // not-greater-equal
+ kX86CondL = 0xC, // less-than
+ kX86CondNge = kX86CondL, // not-greater-equal
- kX86CondNl = 0xD, // not-less-than
- kX86CondGe = kX86CondNl, // not-greater-equal
+ kX86CondNl = 0xD, // not-less-than
+ kX86CondGe = kX86CondNl, // not-greater-equal
- kX86CondLe = 0xE, // less-than-equal
- kX86CondNg = kX86CondLe, // not-greater
+ kX86CondLe = 0xE, // less-than-equal
+ kX86CondNg = kX86CondLe, // not-greater
- kX86CondNle = 0xF, // not-less-than
- kX86CondG = kX86CondNle,// greater
+ kX86CondNle = 0xF, // not-less-than
+ kX86CondG = kX86CondNle,// greater
};
/*
@@ -293,49 +293,49 @@
* Assemble.cc.
*/
enum X86OpCode {
- kPseudoIntrinsicRetry = -16,
- kPseudoSuspendTarget = -15,
- kPseudoThrowTarget = -14,
- kPseudoCaseLabel = -13,
- kPseudoMethodEntry = -12,
- kPseudoMethodExit = -11,
- kPseudoBarrier = -10,
- kPseudoExtended = -9,
- kPseudoSSARep = -8,
- kPseudoEntryBlock = -7,
- kPseudoExitBlock = -6,
- kPseudoTargetLabel = -5,
- kPseudoDalvikByteCodeBoundary = -4,
- kPseudoPseudoAlign4 = -3,
- kPseudoEHBlockLabel = -2,
- kPseudoNormalBlockLabel = -1,
- kX86First,
- kX8632BitData = kX86First, /* data [31..0] */
- kX86Bkpt,
- kX86Nop,
- // Define groups of binary operations
- // MR - Memory Register - opcode [base + disp], reg
- // - lir operands - 0: base, 1: disp, 2: reg
- // AR - Array Register - opcode [base + index * scale + disp], reg
- // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
- // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
- // - lir operands - 0: disp, 1: reg
- // RR - Register Register - opcode reg1, reg2
- // - lir operands - 0: reg1, 1: reg2
- // RM - Register Memory - opcode reg, [base + disp]
- // - lir operands - 0: reg, 1: base, 2: disp
- // RA - Register Array - opcode reg, [base + index * scale + disp]
- // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
- // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
- // - lir operands - 0: reg, 1: disp
- // RI - Register Immediate - opcode reg, #immediate
- // - lir operands - 0: reg, 1: immediate
- // MI - Memory Immediate - opcode [base + disp], #immediate
- // - lir operands - 0: base, 1: disp, 2: immediate
- // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
- // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
- // TI - Thread Register - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
- // - lir operands - 0: disp, 1: imm
+ kPseudoIntrinsicRetry = -16,
+ kPseudoSuspendTarget = -15,
+ kPseudoThrowTarget = -14,
+ kPseudoCaseLabel = -13,
+ kPseudoMethodEntry = -12,
+ kPseudoMethodExit = -11,
+ kPseudoBarrier = -10,
+ kPseudoExtended = -9,
+ kPseudoSSARep = -8,
+ kPseudoEntryBlock = -7,
+ kPseudoExitBlock = -6,
+ kPseudoTargetLabel = -5,
+ kPseudoDalvikByteCodeBoundary = -4,
+ kPseudoPseudoAlign4 = -3,
+ kPseudoEHBlockLabel = -2,
+ kPseudoNormalBlockLabel = -1,
+ kX86First,
+ kX8632BitData = kX86First, /* data [31..0] */
+ kX86Bkpt,
+ kX86Nop,
+ // Define groups of binary operations
+ // MR - Memory Register - opcode [base + disp], reg
+ // - lir operands - 0: base, 1: disp, 2: reg
+ // AR - Array Register - opcode [base + index * scale + disp], reg
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg
+ // TR - Thread Register - opcode fs:[disp], reg - where fs: is equal to Thread::Current()
+ // - lir operands - 0: disp, 1: reg
+ // RR - Register Register - opcode reg1, reg2
+ // - lir operands - 0: reg1, 1: reg2
+ // RM - Register Memory - opcode reg, [base + disp]
+ // - lir operands - 0: reg, 1: base, 2: disp
+ // RA - Register Array - opcode reg, [base + index * scale + disp]
+ // - lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp
+ // RT - Register Thread - opcode reg, fs:[disp] - where fs: is equal to Thread::Current()
+ // - lir operands - 0: reg, 1: disp
+ // RI - Register Immediate - opcode reg, #immediate
+ // - lir operands - 0: reg, 1: immediate
+ // MI - Memory Immediate - opcode [base + disp], #immediate
+ // - lir operands - 0: base, 1: disp, 2: immediate
+ // AI - Array Immediate - opcode [base + index * scale + disp], #immediate
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate
+ // TI - Thread Register - opcode fs:[disp], imm - where fs: is equal to Thread::Current()
+ // - lir operands - 0: disp, 1: imm
#define BinaryOpCode(opcode) \
opcode ## 8MR, opcode ## 8AR, opcode ## 8TR, \
opcode ## 8RR, opcode ## 8RM, opcode ## 8RA, opcode ## 8RT, \
@@ -348,118 +348,118 @@
opcode ## 32RR, opcode ## 32RM, opcode ## 32RA, opcode ## 32RT, \
opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, opcode ## 32TI, \
opcode ## 32RI8, opcode ## 32MI8, opcode ## 32AI8, opcode ## 32TI8
- BinaryOpCode(kX86Add),
- BinaryOpCode(kX86Or),
- BinaryOpCode(kX86Adc),
- BinaryOpCode(kX86Sbb),
- BinaryOpCode(kX86And),
- BinaryOpCode(kX86Sub),
- BinaryOpCode(kX86Xor),
- BinaryOpCode(kX86Cmp),
+ BinaryOpCode(kX86Add),
+ BinaryOpCode(kX86Or),
+ BinaryOpCode(kX86Adc),
+ BinaryOpCode(kX86Sbb),
+ BinaryOpCode(kX86And),
+ BinaryOpCode(kX86Sub),
+ BinaryOpCode(kX86Xor),
+ BinaryOpCode(kX86Cmp),
#undef BinaryOpCode
- kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
- kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
- kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
- kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
- kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
- kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
- kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
- kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
- kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
- kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
- kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
- kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
- kX86Lea32RA,
- // RC - Register CL - opcode reg, CL
- // - lir operands - 0: reg, 1: CL
- // MC - Memory CL - opcode [base + disp], CL
- // - lir operands - 0: base, 1: disp, 2: CL
- // AC - Array CL - opcode [base + index * scale + disp], CL
- // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
+ kX86Imul16RRI, kX86Imul16RMI, kX86Imul16RAI,
+ kX86Imul32RRI, kX86Imul32RMI, kX86Imul32RAI,
+ kX86Imul32RRI8, kX86Imul32RMI8, kX86Imul32RAI8,
+ kX86Mov8MR, kX86Mov8AR, kX86Mov8TR,
+ kX86Mov8RR, kX86Mov8RM, kX86Mov8RA, kX86Mov8RT,
+ kX86Mov8RI, kX86Mov8MI, kX86Mov8AI, kX86Mov8TI,
+ kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
+ kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
+ kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
+ kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
+ kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
+ kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
+ kX86Lea32RA,
+ // RC - Register CL - opcode reg, CL
+ // - lir operands - 0: reg, 1: CL
+ // MC - Memory CL - opcode [base + disp], CL
+ // - lir operands - 0: base, 1: disp, 2: CL
+ // AC - Array CL - opcode [base + index * scale + disp], CL
+ // - lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: CL
#define BinaryShiftOpCode(opcode) \
- opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
- opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
- opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
- opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
- opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
- opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
- BinaryShiftOpCode(kX86Rol),
- BinaryShiftOpCode(kX86Ror),
- BinaryShiftOpCode(kX86Rcl),
- BinaryShiftOpCode(kX86Rcr),
- BinaryShiftOpCode(kX86Sal),
- BinaryShiftOpCode(kX86Shr),
- BinaryShiftOpCode(kX86Sar),
+ opcode ## 8RI, opcode ## 8MI, opcode ## 8AI, \
+ opcode ## 8RC, opcode ## 8MC, opcode ## 8AC, \
+ opcode ## 16RI, opcode ## 16MI, opcode ## 16AI, \
+ opcode ## 16RC, opcode ## 16MC, opcode ## 16AC, \
+ opcode ## 32RI, opcode ## 32MI, opcode ## 32AI, \
+ opcode ## 32RC, opcode ## 32MC, opcode ## 32AC
+ BinaryShiftOpCode(kX86Rol),
+ BinaryShiftOpCode(kX86Ror),
+ BinaryShiftOpCode(kX86Rcl),
+ BinaryShiftOpCode(kX86Rcr),
+ BinaryShiftOpCode(kX86Sal),
+ BinaryShiftOpCode(kX86Shr),
+ BinaryShiftOpCode(kX86Sar),
#undef BinaryShiftOpcode
#define UnaryOpcode(opcode, reg, mem, array) \
- opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
- opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
- opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
- UnaryOpcode(kX86Test, RI, MI, AI),
- UnaryOpcode(kX86Not, R, M, A),
- UnaryOpcode(kX86Neg, R, M, A),
- UnaryOpcode(kX86Mul, DaR, DaM, DaA),
- UnaryOpcode(kX86Imul, DaR, DaM, DaA),
- UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
- UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
+ opcode ## 8 ## reg, opcode ## 8 ## mem, opcode ## 8 ## array, \
+ opcode ## 16 ## reg, opcode ## 16 ## mem, opcode ## 16 ## array, \
+ opcode ## 32 ## reg, opcode ## 32 ## mem, opcode ## 32 ## array
+ UnaryOpcode(kX86Test, RI, MI, AI),
+ UnaryOpcode(kX86Not, R, M, A),
+ UnaryOpcode(kX86Neg, R, M, A),
+ UnaryOpcode(kX86Mul, DaR, DaM, DaA),
+ UnaryOpcode(kX86Imul, DaR, DaM, DaA),
+ UnaryOpcode(kX86Divmod, DaR, DaM, DaA),
+ UnaryOpcode(kX86Idivmod, DaR, DaM, DaA),
#undef UnaryOpcode
#define Binary0fOpCode(opcode) \
opcode ## RR, opcode ## RM, opcode ## RA
- Binary0fOpCode(kX86Movsd),
- kX86MovsdMR,
- kX86MovsdAR,
- Binary0fOpCode(kX86Movss),
- kX86MovssMR,
- kX86MovssAR,
- Binary0fOpCode(kX86Cvtsi2sd), // int to double
- Binary0fOpCode(kX86Cvtsi2ss), // int to float
- Binary0fOpCode(kX86Cvttsd2si), // truncating double to int
- Binary0fOpCode(kX86Cvttss2si), // truncating float to int
- Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
- Binary0fOpCode(kX86Cvtss2si), // rounding float to int
- Binary0fOpCode(kX86Ucomisd), // unordered double compare
- Binary0fOpCode(kX86Ucomiss), // unordered float compare
- Binary0fOpCode(kX86Comisd), // double compare
- Binary0fOpCode(kX86Comiss), // float compare
- Binary0fOpCode(kX86Orps), // or of floating point registers
- Binary0fOpCode(kX86Xorps), // xor of floating point registers
- Binary0fOpCode(kX86Addsd), // double add
- Binary0fOpCode(kX86Addss), // float add
- Binary0fOpCode(kX86Mulsd), // double multiply
- Binary0fOpCode(kX86Mulss), // float multiply
- Binary0fOpCode(kX86Cvtss2sd), // float to double
- Binary0fOpCode(kX86Cvtsd2ss), // double to float
- Binary0fOpCode(kX86Subsd), // double subtract
- Binary0fOpCode(kX86Subss), // float subtract
- Binary0fOpCode(kX86Divsd), // double divide
- Binary0fOpCode(kX86Divss), // float divide
- kX86PsllqRI, // shift of floating point registers
- Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
- Binary0fOpCode(kX86Movdrx), // move into reg from xmm
- kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
- kX86Mfence, // memory barrier
- Binary0fOpCode(kX86Imul16), // 16bit multiply
- Binary0fOpCode(kX86Imul32), // 32bit multiply
- Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
- Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
- Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
- Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
+ Binary0fOpCode(kX86Movsd),
+ kX86MovsdMR,
+ kX86MovsdAR,
+ Binary0fOpCode(kX86Movss),
+ kX86MovssMR,
+ kX86MovssAR,
+ Binary0fOpCode(kX86Cvtsi2sd), // int to double
+ Binary0fOpCode(kX86Cvtsi2ss), // int to float
+ Binary0fOpCode(kX86Cvttsd2si),// truncating double to int
+ Binary0fOpCode(kX86Cvttss2si),// truncating float to int
+ Binary0fOpCode(kX86Cvtsd2si), // rounding double to int
+ Binary0fOpCode(kX86Cvtss2si), // rounding float to int
+ Binary0fOpCode(kX86Ucomisd), // unordered double compare
+ Binary0fOpCode(kX86Ucomiss), // unordered float compare
+ Binary0fOpCode(kX86Comisd), // double compare
+ Binary0fOpCode(kX86Comiss), // float compare
+ Binary0fOpCode(kX86Orps), // or of floating point registers
+ Binary0fOpCode(kX86Xorps), // xor of floating point registers
+ Binary0fOpCode(kX86Addsd), // double add
+ Binary0fOpCode(kX86Addss), // float add
+ Binary0fOpCode(kX86Mulsd), // double multiply
+ Binary0fOpCode(kX86Mulss), // float multiply
+ Binary0fOpCode(kX86Cvtss2sd), // float to double
+ Binary0fOpCode(kX86Cvtsd2ss), // double to float
+ Binary0fOpCode(kX86Subsd), // double subtract
+ Binary0fOpCode(kX86Subss), // float subtract
+ Binary0fOpCode(kX86Divsd), // double divide
+ Binary0fOpCode(kX86Divss), // float divide
+ kX86PsllqRI, // shift of floating point registers
+ Binary0fOpCode(kX86Movdxr), // move into xmm from gpr
+ Binary0fOpCode(kX86Movdrx), // move into reg from xmm
+ kX86Set8R, kX86Set8M, kX86Set8A,// set byte depending on condition operand
+ kX86Mfence, // memory barrier
+ Binary0fOpCode(kX86Imul16), // 16bit multiply
+ Binary0fOpCode(kX86Imul32), // 32bit multiply
+ Binary0fOpCode(kX86Movzx8), // zero-extend 8-bit value
+ Binary0fOpCode(kX86Movzx16), // zero-extend 16-bit value
+ Binary0fOpCode(kX86Movsx8), // sign-extend 8-bit value
+ Binary0fOpCode(kX86Movsx16), // sign-extend 16-bit value
#undef Binary0fOpCode
- kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
- kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
- kX86JmpR, // jmp reg; lir operands - 0: reg
- kX86CallR, // call reg; lir operands - 0: reg
- kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
- kX86CallA, // call [base + index * scale + disp]
- // lir operands - 0: base, 1: index, 2: scale, 3: disp
- kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
- kX86Ret, // ret; no lir operands
- kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
- // lir operands - 0: reg
- kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
- // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
- kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
- kX86Last
+ kX86Jcc8, kX86Jcc32, // jCC rel8/32; lir operands - 0: rel, 1: CC, target assigned
+ kX86Jmp8, kX86Jmp32, // jmp rel8/32; lir operands - 0: rel, target assigned
+ kX86JmpR, // jmp reg; lir operands - 0: reg
+ kX86CallR, // call reg; lir operands - 0: reg
+ kX86CallM, // call [base + disp]; lir operands - 0: base, 1: disp
+ kX86CallA, // call [base + index * scale + disp]
+ // lir operands - 0: base, 1: index, 2: scale, 3: disp
+ kX86CallT, // call fs:[disp]; fs: is equal to Thread::Current(); lir operands - 0: disp
+ kX86Ret, // ret; no lir operands
+ kX86StartOfMethod, // call 0; pop reg; sub reg, # - generate start of method into reg
+ // lir operands - 0: reg
+ kX86PcRelLoadRA, // mov reg, [base + index * scale + PC relative displacement]
+ // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table
+ kX86PcRelAdr, // mov reg, PC relative displacement; lir operands - 0: reg, 1: table
+ kX86Last
};
/* Instruction assembly fieldLoc kind */
@@ -477,10 +477,10 @@
kShiftRegCl, kShiftMemCl, kShiftArrayCl, // Shift opcode with register CL.
kRegRegReg, kRegRegMem, kRegRegArray, // RRR, RRM, RRA instruction kinds.
kRegCond, kMemCond, kArrayCond, // R, M, A instruction kinds following by a condition.
- kJmp, kJcc, kCall, // Branch instruction kinds.
- kPcRel, // Operation with displacement that is PC relative
- kMacro, // An instruction composing multiple others
- kUnimplemented // Encoding used when an instruction isn't yet implemented.
+ kJmp, kJcc, kCall, // Branch instruction kinds.
+ kPcRel, // Operation with displacement that is PC relative
+ kMacro, // An instruction composing multiple others
+ kUnimplemented // Encoding used when an instruction isn't yet implemented.
};
/* Struct used to define the EncodingMap positions for each X86 opcode */
@@ -489,16 +489,16 @@
X86EncodingKind kind; // Used to discriminate in the union below
int flags;
struct {
- uint8_t prefix1; // non-zero => a prefix byte
- uint8_t prefix2; // non-zero => a second prefix byte
- uint8_t opcode; // 1 byte opcode
- uint8_t extra_opcode1; // possible extra opcode byte
- uint8_t extra_opcode2; // possible second extra opcode byte
- // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
- // encoding kind
- uint8_t modrm_opcode;
- uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
- uint8_t immediate_bytes; // number of bytes of immediate
+ uint8_t prefix1; // non-zero => a prefix byte
+ uint8_t prefix2; // non-zero => a second prefix byte
+ uint8_t opcode; // 1 byte opcode
+ uint8_t extra_opcode1; // possible extra opcode byte
+ uint8_t extra_opcode2; // possible second extra opcode byte
+ // 3bit opcode that gets encoded in the register bits of the modrm byte, use determined by the
+ // encoding kind
+ uint8_t modrm_opcode;
+ uint8_t ax_opcode; // non-zero => shorter encoding for AX as a destination
+ uint8_t immediate_bytes; // number of bytes of immediate
} skeleton;
const char *name;
const char* fmt;
@@ -512,32 +512,32 @@
/* Bit flags describing the behavior of each native opcode */
enum X86OpFeatureFlags {
- kIsBranch = 0,
- kRegDef0,
- kRegDef1,
- kRegDefSP,
- kRegDefList0,
- kRegDefList1,
- kRegUse0,
- kRegUse1,
- kRegUse2,
- kRegUse3,
- kRegUseSP,
- kRegUseList0,
- kRegUseList1,
- kNoOperand,
- kIsUnaryOp,
- kIsBinaryOp,
- kIsTertiaryOp,
- kIsQuadOp,
- kIsQuinOp,
- kIsSextupleOp,
- kIsIT,
- kSetsCCodes,
- kUsesCCodes,
- kMemLoad,
- kMemStore,
- kPCRelFixup,
+ kIsBranch = 0,
+ kRegDef0,
+ kRegDef1,
+ kRegDefSP,
+ kRegDefList0,
+ kRegDefList1,
+ kRegUse0,
+ kRegUse1,
+ kRegUse2,
+ kRegUse3,
+ kRegUseSP,
+ kRegUseList0,
+ kRegUseList1,
+ kNoOperand,
+ kIsUnaryOp,
+ kIsBinaryOp,
+ kIsTertiaryOp,
+ kIsQuadOp,
+ kIsQuinOp,
+ kIsSextupleOp,
+ kIsIT,
+ kSetsCCodes,
+ kUsesCCodes,
+ kMemLoad,
+ kMemStore,
+ kPCRelFixup,
// FIXME: add NEEDS_FIXUP to instruction attributes
};
@@ -591,7 +591,7 @@
/* Keys for target-specific scheduling and other optimization hints */
enum X86TargetOptHints {
- kMaxHoistDistance,
+ kMaxHoistDistance,
};
/* Offsets of high and low halves of a 64bit value */
diff --git a/src/compiler/codegen/x86/X86RallocUtil.cc b/src/compiler/codegen/x86/X86RallocUtil.cc
index a85cb8a..58ad25a 100644
--- a/src/compiler/codegen/x86/X86RallocUtil.cc
+++ b/src/compiler/codegen/x86/X86RallocUtil.cc
@@ -41,64 +41,60 @@
*/
void oatMarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
{
- UNIMPLEMENTED(WARNING) << "oatMarkPreservedSingle";
+ UNIMPLEMENTED(WARNING) << "oatMarkPreservedSingle";
#if 0
- LOG(FATAL) << "No support yet for promoted FP regs";
+ LOG(FATAL) << "No support yet for promoted FP regs";
#endif
}
void oatFlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
{
- RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
- DCHECK(info1 && info2 && info1->pair && info2->pair &&
- (info1->partner == info2->reg) &&
- (info2->partner == info1->reg));
- if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
- LOG(FATAL) << "Long half-temp, half-promoted";
- }
-
- info1->dirty = false;
- info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) <
- SRegToVReg(cUnit, info1->sReg))
- info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- oatFlushRegWideImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- info1->reg, info1->partner);
+ RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
+ RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->isTemp && info2->isTemp)) {
+ /* Should not happen. If it does, there's a problem in evalLoc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
}
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cUnit, info2->sReg) < SRegToVReg(cUnit, info1->sReg))
+ info1 = info2;
+ int vReg = SRegToVReg(cUnit, info1->sReg);
+ oatFlushRegWideImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg),
+ info1->reg, info1->partner);
+ }
}
void oatFlushReg(CompilationUnit* cUnit, int reg)
{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- if (info->live && info->dirty) {
- info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- oatFlushRegImpl(cUnit, rSP,
- oatVRegOffset(cUnit, vReg),
- reg, kWord);
- }
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int vReg = SRegToVReg(cUnit, info->sReg);
+ oatFlushRegImpl(cUnit, rSP, oatVRegOffset(cUnit, vReg), reg, kWord);
+ }
}
/* Give access to the target-dependent FP register encoding to common code */
bool oatIsFpReg(int reg) {
- return FPREG(reg);
+ return FPREG(reg);
}
uint32_t oatFpRegMask() {
- return FP_REG_MASK;
+ return FP_REG_MASK;
}
/* Clobber all regs that might be used by an external C call */
extern void oatClobberCalleeSave(CompilationUnit *cUnit)
{
- oatClobber(cUnit, rAX);
- oatClobber(cUnit, rCX);
- oatClobber(cUnit, rDX);
+ oatClobber(cUnit, rAX);
+ oatClobber(cUnit, rCX);
+ oatClobber(cUnit, rDX);
}
extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit) {
@@ -115,41 +111,41 @@
extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
{
- RegLocation res = LOC_C_RETURN;
- res.lowReg = rDX;
- oatClobber(cUnit, rDX);
- oatMarkInUse(cUnit, rDX);
- return res;
+ RegLocation res = LOC_C_RETURN;
+ res.lowReg = rDX;
+ oatClobber(cUnit, rDX);
+ oatMarkInUse(cUnit, rDX);
+ return res;
}
extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
{
- return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
+ return FPREG(reg) ? &cUnit->regPool->FPRegs[reg & FP_REG_MASK]
+ : &cUnit->regPool->coreRegs[reg];
}
/* To be used when explicitly managing register use */
extern void oatLockCallTemps(CompilationUnit* cUnit)
{
- oatLockTemp(cUnit, rARG0);
- oatLockTemp(cUnit, rARG1);
- oatLockTemp(cUnit, rARG2);
- oatLockTemp(cUnit, rARG3);
+ oatLockTemp(cUnit, rARG0);
+ oatLockTemp(cUnit, rARG1);
+ oatLockTemp(cUnit, rARG2);
+ oatLockTemp(cUnit, rARG3);
}
/* To be used when explicitly managing register use */
extern void oatFreeCallTemps(CompilationUnit* cUnit)
{
- oatFreeTemp(cUnit, rARG0);
- oatFreeTemp(cUnit, rARG1);
- oatFreeTemp(cUnit, rARG2);
- oatFreeTemp(cUnit, rARG3);
+ oatFreeTemp(cUnit, rARG0);
+ oatFreeTemp(cUnit, rARG1);
+ oatFreeTemp(cUnit, rARG2);
+ oatFreeTemp(cUnit, rARG3);
}
/* Convert an instruction to a NOP */
void oatNopLIR( LIR* lir)
{
- ((LIR*)lir)->flags.isNop = true;
+ ((LIR*)lir)->flags.isNop = true;
}
} // namespace art
diff --git a/src/compiler/codegen/x86/x86/ArchVariant.cc b/src/compiler/codegen/x86/x86/ArchVariant.cc
index 2bb84d7..4b70202 100644
--- a/src/compiler/codegen/x86/x86/ArchVariant.cc
+++ b/src/compiler/codegen/x86/x86/ArchVariant.cc
@@ -27,33 +27,33 @@
*/
InstructionSet oatInstructionSet()
{
- return kX86;
+ return kX86;
}
/* Architecture-specific initializations and checks go here */
bool oatArchVariantInit(void)
{
- return true;
+ return true;
}
int dvmCompilerTargetOptHint(int key)
{
- int res;
- switch (key) {
- case kMaxHoistDistance:
- res = 2;
- break;
- default:
- LOG(FATAL) << "Unknown target optimization hint key: " << key;
- }
- return res;
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ LOG(FATAL) << "Unknown target optimization hint key: " << key;
+ }
+ return res;
}
void oatGenMemBarrier(CompilationUnit *cUnit, int /* barrierKind */)
{
#if ANDROID_SMP != 0
- // TODO: optimize fences
- newLIR0(cUnit, kX86Mfence);
+ // TODO: optimize fences
+ newLIR0(cUnit, kX86Mfence);
#endif
}