Quick compiler source reorganizatio - part 1
A step towards cleanup of the quick compiler source. In this
CL we rename all files to Art standards, combine some of the
old target-specific files that may have made sense in the
JIT, but no longer do. Also removed some codegen/<target>/
subdirectories, combined and deleted some existing files.
Still quite a bit of work to do in cleaning up header files,
getting some better consistency in what codegen functions
go where. That will happen in later CLs.
No logic changes in this CL - just renaming and moving stuff around
Change-Id: Ic172cd3b76d4c670f8e4d5fdd4a3e967db3f4c1e
diff --git a/src/compiler/codegen/x86/ArchFactory.cc b/src/compiler/codegen/x86/ArchFactory.cc
deleted file mode 100644
index 0ed6848..0000000
--- a/src/compiler/codegen/x86/ArchFactory.cc
+++ /dev/null
@@ -1,282 +0,0 @@
-/*
- * Copyright (C) 2011 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains x86-specific codegen factory support. */
-
-namespace art {
-
-bool genAddLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
-{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- // Compute (r1:r0) = (r1:r0) + (r2:r3)
- opRegReg(cUnit, kOpAdd, r0, r2); // r0 = r0 + r2
- opRegReg(cUnit, kOpAdc, r1, r3); // r1 = r1 + r3 + CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
- INVALID_SREG, INVALID_SREG};
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
-}
-
-bool genSubLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
-{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- // Compute (r1:r0) = (r1:r0) + (r2:r3)
- opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
- INVALID_SREG, INVALID_SREG};
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
-}
-
-bool genAndLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
-{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- // Compute (r1:r0) = (r1:r0) + (r2:r3)
- opRegReg(cUnit, kOpAnd, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpAnd, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
- INVALID_SREG, INVALID_SREG};
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
-}
-
-bool genOrLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
-{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- // Compute (r1:r0) = (r1:r0) + (r2:r3)
- opRegReg(cUnit, kOpOr, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpOr, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
- INVALID_SREG, INVALID_SREG};
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
-}
-
-bool genXorLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
-{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- // Compute (r1:r0) = (r1:r0) + (r2:r3)
- opRegReg(cUnit, kOpXor, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpXor, r1, r3); // r1 = r1 - r3 - CF
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
- INVALID_SREG, INVALID_SREG};
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
-}
-
-bool genNegLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc)
-{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc, r0, r1);
- // Compute (r1:r0) = -(r1:r0)
- opRegReg(cUnit, kOpNeg, r0, r0); // r0 = -r0
- opRegImm(cUnit, kOpAdc, r1, 0); // r1 = r1 + CF
- opRegReg(cUnit, kOpNeg, r1, r1); // r1 = -r1
- RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
- INVALID_SREG, INVALID_SREG};
- storeValueWide(cUnit, rlDest, rlResult);
- return false;
-}
-
-void spillCoreRegs(CompilationUnit* cUnit) {
- if (cUnit->numCoreSpills == 0) {
- return;
- }
- // Spill mask not including fake return address register
- uint32_t mask = cUnit->coreSpillMask & ~(1 << rRET);
- int offset = cUnit->frameSize - (4 * cUnit->numCoreSpills);
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- storeWordDisp(cUnit, rX86_SP, offset, reg);
- offset += 4;
- }
- }
-}
-
-void unSpillCoreRegs(CompilationUnit* cUnit) {
- if (cUnit->numCoreSpills == 0) {
- return;
- }
- // Spill mask not including fake return address register
- uint32_t mask = cUnit->coreSpillMask & ~(1 << rRET);
- int offset = cUnit->frameSize - (4 * cUnit->numCoreSpills);
- for (int reg = 0; mask; mask >>= 1, reg++) {
- if (mask & 0x1) {
- loadWordDisp(cUnit, rX86_SP, offset, reg);
- offset += 4;
- }
- }
-}
-
-void opRegThreadMem(CompilationUnit* cUnit, OpKind op, int rDest, int threadOffset) {
- X86OpCode opcode = kX86Bkpt;
- switch (op) {
- case kOpCmp: opcode = kX86Cmp32RT; break;
- default:
- LOG(FATAL) << "Bad opcode: " << op;
- break;
- }
- newLIR2(cUnit, opcode, rDest, threadOffset);
-}
-
-void genEntrySequence(CompilationUnit* cUnit, RegLocation* argLocs,
- RegLocation rlMethod)
-{
- /*
- * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
- * allocation mechanism know so it doesn't try to use any of them when
- * expanding the frame or flushing. This leaves the utility
- * code with no spare temps.
- */
- oatLockTemp(cUnit, rX86_ARG0);
- oatLockTemp(cUnit, rX86_ARG1);
- oatLockTemp(cUnit, rX86_ARG2);
-
- /* Build frame, return address already on stack */
- opRegImm(cUnit, kOpSub, rX86_SP, cUnit->frameSize - 4);
-
- /*
- * We can safely skip the stack overflow check if we're
- * a leaf *and* our frame size < fudge factor.
- */
- bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
- ((size_t)cUnit->frameSize <
- Thread::kStackOverflowReservedBytes));
- newLIR0(cUnit, kPseudoMethodEntry);
- /* Spill core callee saves */
- spillCoreRegs(cUnit);
- /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
- DCHECK_EQ(cUnit->numFPSpills, 0);
- if (!skipOverflowCheck) {
- // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
- opRegThreadMem(cUnit, kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
- opCondBranch(cUnit, kCondUlt, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- }
-
- flushIns(cUnit, argLocs, rlMethod);
-
- oatFreeTemp(cUnit, rX86_ARG0);
- oatFreeTemp(cUnit, rX86_ARG1);
- oatFreeTemp(cUnit, rX86_ARG2);
-}
-
-void genExitSequence(CompilationUnit* cUnit) {
- /*
- * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
- * allocated by the register utilities as temps.
- */
- oatLockTemp(cUnit, rX86_RET0);
- oatLockTemp(cUnit, rX86_RET1);
-
- newLIR0(cUnit, kPseudoMethodExit);
- unSpillCoreRegs(cUnit);
- /* Remove frame except for return address */
- opRegImm(cUnit, kOpAdd, rX86_SP, cUnit->frameSize - 4);
- newLIR0(cUnit, kX86Ret);
-}
-
-/*
- * Nop any unconditional branches that go to the next instruction.
- * Note: new redundant branches may be inserted later, and we'll
- * use a check in final instruction assembly to nop those out.
- */
-void removeRedundantBranches(CompilationUnit* cUnit) {
- LIR* thisLIR;
-
- for (thisLIR = (LIR*) cUnit->firstLIRInsn;
- thisLIR != (LIR*) cUnit->lastLIRInsn;
- thisLIR = NEXT_LIR(thisLIR)) {
-
- /* Branch to the next instruction */
- if (thisLIR->opcode == kX86Jmp8 || thisLIR->opcode == kX86Jmp32) {
- LIR* nextLIR = thisLIR;
-
- while (true) {
- nextLIR = NEXT_LIR(nextLIR);
-
- /*
- * Is the branch target the next instruction?
- */
- if (nextLIR == (LIR*) thisLIR->target) {
- thisLIR->flags.isNop = true;
- break;
- }
-
- /*
- * Found real useful stuff between the branch and the target.
- * Need to explicitly check the lastLIRInsn here because it
- * might be the last real instruction.
- */
- if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR = (LIR*) cUnit->lastLIRInsn))
- break;
- }
- }
- }
-}
-
-
-/* Common initialization routine for an architecture family */
-bool oatArchInit() {
- int i;
-
- for (i = 0; i < kX86Last; i++) {
- if (EncodingMap[i].opcode != i) {
- LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
- << " is wrong: expecting " << i << ", seeing "
- << (int)EncodingMap[i].opcode;
- }
- }
-
- return oatArchVariantInit();
-}
-
-// Not used in x86
-int loadHelper(CompilationUnit* cUnit, int offset)
-{
- LOG(FATAL) << "Unexpected use of loadHelper in x86";
- return INVALID_REG;
-}
-
-
-
-} // namespace art
diff --git a/src/compiler/codegen/x86/ArchUtility.cc b/src/compiler/codegen/x86/ArchUtility.cc
deleted file mode 100644
index 32dd811..0000000
--- a/src/compiler/codegen/x86/ArchUtility.cc
+++ /dev/null
@@ -1,304 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "../../CompilerInternals.h"
-#include "X86LIR.h"
-#include "../Ralloc.h"
-
-#include <string>
-
-namespace art {
-
-RegLocation locCReturn()
-{
- RegLocation res = X86_LOC_C_RETURN;
- return res;
-}
-
-RegLocation locCReturnWide()
-{
- RegLocation res = X86_LOC_C_RETURN_WIDE;
- return res;
-}
-
-RegLocation locCReturnFloat()
-{
- RegLocation res = X86_LOC_C_RETURN_FLOAT;
- return res;
-}
-
-RegLocation locCReturnDouble()
-{
- RegLocation res = X86_LOC_C_RETURN_DOUBLE;
- return res;
-}
-
-// Return a target-dependent special register.
-int targetReg(SpecialTargetRegister reg) {
- int res = INVALID_REG;
- switch (reg) {
- case kSelf: res = rX86_SELF; break;
- case kSuspend: res = rX86_SUSPEND; break;
- case kLr: res = rX86_LR; break;
- case kPc: res = rX86_PC; break;
- case kSp: res = rX86_SP; break;
- case kArg0: res = rX86_ARG0; break;
- case kArg1: res = rX86_ARG1; break;
- case kArg2: res = rX86_ARG2; break;
- case kArg3: res = rX86_ARG3; break;
- case kFArg0: res = rX86_FARG0; break;
- case kFArg1: res = rX86_FARG1; break;
- case kFArg2: res = rX86_FARG2; break;
- case kFArg3: res = rX86_FARG3; break;
- case kRet0: res = rX86_RET0; break;
- case kRet1: res = rX86_RET1; break;
- case kInvokeTgt: res = rX86_INVOKE_TGT; break;
- case kCount: res = rX86_COUNT; break;
- }
- return res;
-}
-
-// Create a double from a pair of singles.
-int s2d(int lowReg, int highReg)
-{
- return X86_S2D(lowReg, highReg);
-}
-
-// Is reg a single or double?
-bool fpReg(int reg)
-{
- return X86_FPREG(reg);
-}
-
-// Is reg a single?
-bool singleReg(int reg)
-{
- return X86_SINGLEREG(reg);
-}
-
-// Is reg a double?
-bool doubleReg(int reg)
-{
- return X86_DOUBLEREG(reg);
-}
-
-// Return mask to strip off fp reg flags and bias.
-uint32_t fpRegMask()
-{
- return X86_FP_REG_MASK;
-}
-
-// True if both regs single, both core or both double.
-bool sameRegType(int reg1, int reg2)
-{
- return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
-}
-
-/*
- * Decode the register id.
- */
-u8 getRegMaskCommon(CompilationUnit* cUnit, int reg)
-{
- u8 seed;
- int shift;
- int regId;
-
- regId = reg & 0xf;
- /* Double registers in x86 are just a single FP register */
- seed = 1;
- /* FP register starts at bit position 16 */
- shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
- /* Expand the double register id into single offset */
- shift += regId;
- return (seed << shift);
-}
-
-uint64_t getPCUseDefEncoding()
-{
- /*
- * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
- * able to clean up some of the x86/Arm_Mips differences
- */
- LOG(FATAL) << "Unexpected call to getPCUseDefEncoding for x86";
- return 0ULL;
-}
-
-void setupTargetResourceMasks(CompilationUnit* cUnit, LIR* lir)
-{
- DCHECK_EQ(cUnit->instructionSet, kX86);
-
- // X86-specific resource map setup here.
- uint64_t flags = EncodingMap[lir->opcode].flags;
-
- if (flags & REG_USE_SP) {
- lir->useMask |= ENCODE_X86_REG_SP;
- }
-
- if (flags & REG_DEF_SP) {
- lir->defMask |= ENCODE_X86_REG_SP;
- }
-
- if (flags & REG_DEFA) {
- oatSetupRegMask(cUnit, &lir->defMask, rAX);
- }
-
- if (flags & REG_DEFD) {
- oatSetupRegMask(cUnit, &lir->defMask, rDX);
- }
- if (flags & REG_USEA) {
- oatSetupRegMask(cUnit, &lir->useMask, rAX);
- }
-
- if (flags & REG_USEC) {
- oatSetupRegMask(cUnit, &lir->useMask, rCX);
- }
-
- if (flags & REG_USED) {
- oatSetupRegMask(cUnit, &lir->useMask, rDX);
- }
-}
-
-/* For dumping instructions */
-static const char* x86RegName[] = {
- "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
-};
-
-static const char* x86CondName[] = {
- "O",
- "NO",
- "B/NAE/C",
- "NB/AE/NC",
- "Z/EQ",
- "NZ/NE",
- "BE/NA",
- "NBE/A",
- "S",
- "NS",
- "P/PE",
- "NP/PO",
- "L/NGE",
- "NL/GE",
- "LE/NG",
- "NLE/G"
-};
-
-/*
- * Interpret a format string and build a string no longer than size
- * See format key in Assemble.cc.
- */
-std::string buildInsnString(const char *fmt, LIR *lir, unsigned char* baseAddr) {
- std::string buf;
- size_t i = 0;
- size_t fmt_len = strlen(fmt);
- while (i < fmt_len) {
- if (fmt[i] != '!') {
- buf += fmt[i];
- i++;
- } else {
- i++;
- DCHECK_LT(i, fmt_len);
- char operand_number_ch = fmt[i];
- i++;
- if (operand_number_ch == '!') {
- buf += "!";
- } else {
- int operand_number = operand_number_ch - '0';
- DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
- DCHECK_LT(i, fmt_len);
- int operand = lir->operands[operand_number];
- switch (fmt[i]) {
- case 'c':
- DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
- buf += x86CondName[operand];
- break;
- case 'd':
- buf += StringPrintf("%d", operand);
- break;
- case 'p': {
- SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(operand);
- buf += StringPrintf("0x%08x", tabRec->offset);
- break;
- }
- case 'r':
- if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
- int fp_reg = operand & X86_FP_REG_MASK;
- buf += StringPrintf("xmm%d", fp_reg);
- } else {
- DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
- buf += x86RegName[operand];
- }
- break;
- case 't':
- buf += StringPrintf("0x%08x (L%p)",
- reinterpret_cast<uint32_t>(baseAddr)
- + lir->offset + operand, lir->target);
- break;
- default:
- buf += StringPrintf("DecodeError '%c'", fmt[i]);
- break;
- }
- i++;
- }
- }
- }
- return buf;
-}
-
-void oatDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
-{
- char buf[256];
- buf[0] = 0;
- LIR *x86LIR = (LIR *) lir;
-
- if (mask == ENCODE_ALL) {
- strcpy(buf, "all");
- } else {
- char num[8];
- int i;
-
- for (i = 0; i < kX86RegEnd; i++) {
- if (mask & (1ULL << i)) {
- sprintf(num, "%d ", i);
- strcat(buf, num);
- }
- }
-
- if (mask & ENCODE_CCODE) {
- strcat(buf, "cc ");
- }
- /* Memory bits */
- if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
- sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
- (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
- }
- if (mask & ENCODE_LITERAL) {
- strcat(buf, "lit ");
- }
-
- if (mask & ENCODE_HEAP_REF) {
- strcat(buf, "heap ");
- }
- if (mask & ENCODE_MUST_NOT_ALIAS) {
- strcat(buf, "noalias ");
- }
- }
- if (buf[0]) {
- LOG(INFO) << prefix << ": " << buf;
- }
-}
-
-} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Gen.cc b/src/compiler/codegen/x86/X86/Gen.cc
deleted file mode 100644
index 37cd725..0000000
--- a/src/compiler/codegen/x86/X86/Gen.cc
+++ /dev/null
@@ -1,551 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This file contains codegen for the X86 ISA */
-
-namespace art {
-
-void genSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
- SpecialCaseHandler specialCase)
-{
- // TODO
-}
-
-/*
- * Perform register memory operation.
- */
-LIR* genRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
- int reg1, int base, int offset, ThrowKind kind)
-{
- LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
- cUnit->currentDalvikOffset, reg1, base, offset);
- opRegMem(cUnit, kOpCmp, reg1, base, offset);
- LIR* branch = opCondBranch(cUnit, cCode, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
-}
-
-/*
- * The sparse table in the literal pool is an array of <key,displacement>
- * pairs.
- */
-BasicBlock *findBlock(CompilationUnit* cUnit, unsigned int codeOffset,
- bool split, bool create, BasicBlock** immedPredBlockP);
-void genSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
-{
- const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
- dumpSparseSwitchTable(table);
- }
- int entries = table[1];
- int* keys = (int*)&table[2];
- int* targets = &keys[entries];
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- for (int i = 0; i < entries; i++) {
- int key = keys[i];
- BasicBlock* case_block = findBlock(cUnit,
- cUnit->currentDalvikOffset + targets[i],
- false, false, NULL);
- LIR* labelList = cUnit->blockLabelList;
- opCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key,
- &labelList[case_block->id]);
- }
-}
-
-/*
- * Code pattern will look something like:
- *
- * mov rVal, ..
- * call 0
- * pop rStartOfMethod
- * sub rStartOfMethod, ..
- * mov rKeyReg, rVal
- * sub rKeyReg, lowKey
- * cmp rKeyReg, size-1 ; bound check
- * ja done
- * mov rDisp, [rStartOfMethod + rKeyReg * 4 + tableOffset]
- * add rStartOfMethod, rDisp
- * jmp rStartOfMethod
- * done:
- */
-void genPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
-{
- const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- if (cUnit->printMe) {
- dumpPackedSwitchTable(table);
- }
- // Add the table to the list - we'll process it later
- SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
- true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
- int size = table[1];
- tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
- kAllocLIR);
- oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
-
- // Get the switch value
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- int startOfMethodReg = oatAllocTemp(cUnit);
- // Materialize a pointer to the switch table
- //newLIR0(cUnit, kX86Bkpt);
- newLIR1(cUnit, kX86StartOfMethod, startOfMethodReg);
- int lowKey = s4FromSwitchData(&table[2]);
- int keyReg;
- // Remove the bias, if necessary
- if (lowKey == 0) {
- keyReg = rlSrc.lowReg;
- } else {
- keyReg = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpSub, keyReg, rlSrc.lowReg, lowKey);
- }
- // Bounds check - if < 0 or >= size continue following switch
- opRegImm(cUnit, kOpCmp, keyReg, size-1);
- LIR* branchOver = opCondBranch(cUnit, kCondHi, NULL);
-
- // Load the displacement from the switch table
- int dispReg = oatAllocTemp(cUnit);
- newLIR5(cUnit, kX86PcRelLoadRA, dispReg, startOfMethodReg, keyReg, 2,
- (intptr_t)tabRec);
- // Add displacement to start of method
- opRegReg(cUnit, kOpAdd, startOfMethodReg, dispReg);
- // ..and go!
- LIR* switchBranch = newLIR1(cUnit, kX86JmpR, startOfMethodReg);
- tabRec->anchor = switchBranch;
-
- /* branchOver target here */
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)target;
-}
-
-void callRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset,
- int arg0, int arg1, bool safepointPC);
-/*
- * Array data table format:
- * ushort ident = 0x0300 magic value
- * ushort width width of each element in the table
- * uint size number of elements in the table
- * ubyte data[size*width] table of data values (may contain a single-byte
- * padding at the end)
- *
- * Total size is 4+(width * size + 1)/2 16-bit code units.
- */
-void genFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset,
- RegLocation rlSrc)
-{
- const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
- // Add the table to the list - we'll process it later
- FillArrayData *tabRec = (FillArrayData *)oatNew(cUnit, sizeof(FillArrayData),
- true, kAllocData);
- tabRec->table = table;
- tabRec->vaddr = cUnit->currentDalvikOffset;
- u2 width = tabRec->table[1];
- u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
- tabRec->size = (size * width) + 8;
-
- oatInsertGrowableList(cUnit, &cUnit->fillArrayData, (intptr_t)tabRec);
-
- // Making a call - use explicit registers
- oatFlushAllRegs(cUnit); /* Everything to home location */
- loadValueDirectFixed(cUnit, rlSrc, rX86_ARG0);
- // Materialize a pointer to the fill data image
- newLIR1(cUnit, kX86StartOfMethod, rX86_ARG2);
- newLIR2(cUnit, kX86PcRelAdr, rX86_ARG1, (intptr_t)tabRec);
- newLIR2(cUnit, kX86Add32RR, rX86_ARG1, rX86_ARG2);
- callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, rX86_ARG1,
- true);
-}
-
-void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
-{
- RegLocation rlResult;
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
- storeValue(cUnit, rlDest, rlResult);
-}
-
-void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
-{
- RegLocation rlResult;
- rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
- rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
- opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- storeValueWide(cUnit, rlDest, rlResult);
-}
-
-LIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg, int optFlags);
-void callRuntimeHelperReg(CompilationUnit* cUnit, int helperOffset, int arg0, bool safepointPC);
-
-void genMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
-{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rCX); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rCX, optFlags);
- // If lock is unheld, try to grab it quickly with compare and exchange
- // TODO: copy and clear hash state?
- newLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
- newLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
- newLIR2(cUnit, kX86Xor32RR, rAX, rAX);
- newLIR3(cUnit, kX86LockCmpxchgMR, rCX, Object::MonitorOffset().Int32Value(), rDX);
- LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondEq);
- // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
- branch->target = newLIR0(cUnit, kPseudoTargetLabel);
-}
-
-void genMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
-{
- oatFlushAllRegs(cUnit);
- loadValueDirectFixed(cUnit, rlSrc, rAX); // Get obj
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- genNullCheck(cUnit, rlSrc.sRegLow, rAX, optFlags);
- // If lock is held by the current thread, clear it to quickly release it
- // TODO: clear hash state?
- newLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
- newLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
- newLIR3(cUnit, kX86Mov32RM, rCX, rAX, Object::MonitorOffset().Int32Value());
- opRegReg(cUnit, kOpSub, rCX, rDX);
- LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondNe);
- newLIR3(cUnit, kX86Mov32MR, rAX, Object::MonitorOffset().Int32Value(), rCX);
- LIR* branch2 = newLIR1(cUnit, kX86Jmp8, 0);
- branch->target = newLIR0(cUnit, kPseudoTargetLabel);
- // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
- callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
- branch2->target = newLIR0(cUnit, kPseudoTargetLabel);
-}
-
-/*
- * Compare two 64-bit values
- * x = y return 0
- * x < y return -1
- * x > y return 1
- *
- * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
- * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
- * subu res, t0, t1 # res = -1:1:0 for [ < > = ]
- * bnez res, finish
- * sltu t0, x.lo, y.lo
- * sgtu r1, x.lo, y.lo
- * subu res, t0, t1
- * finish:
- *
- */
-void genCmpLong(CompilationUnit* cUnit, RegLocation rlDest,
- RegLocation rlSrc1, RegLocation rlSrc2)
-{
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- // Compute (r1:r0) = (r1:r0) - (r3:r2)
- opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- newLIR2(cUnit, kX86Set8R, r2, kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0
- newLIR2(cUnit, kX86Movzx8RR, r2, r2);
- opReg(cUnit, kOpNeg, r2); // r2 = -r2
- opRegReg(cUnit, kOpOr, r0, r1); // r0 = high | low - sets ZF
- newLIR2(cUnit, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0
- newLIR2(cUnit, kX86Movzx8RR, r0, r0);
- opRegReg(cUnit, kOpOr, r0, r2); // r0 = r0 | r2
- RegLocation rlResult = locCReturn();
- storeValue(cUnit, rlDest, rlResult);
-}
-
-X86ConditionCode oatX86ConditionEncoding(ConditionCode cond) {
- switch (cond) {
- case kCondEq: return kX86CondEq;
- case kCondNe: return kX86CondNe;
- case kCondCs: return kX86CondC;
- case kCondCc: return kX86CondNc;
- case kCondMi: return kX86CondS;
- case kCondPl: return kX86CondNs;
- case kCondVs: return kX86CondO;
- case kCondVc: return kX86CondNo;
- case kCondHi: return kX86CondA;
- case kCondLs: return kX86CondBe;
- case kCondGe: return kX86CondGe;
- case kCondLt: return kX86CondL;
- case kCondGt: return kX86CondG;
- case kCondLe: return kX86CondLe;
- case kCondAl:
- case kCondNv: LOG(FATAL) << "Should not reach here";
- }
- return kX86CondO;
-}
-
-LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
- int src2, LIR* target)
-{
- newLIR2(cUnit, kX86Cmp32RR, src1, src2);
- X86ConditionCode cc = oatX86ConditionEncoding(cond);
- LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
- cc);
- branch->target = target;
- return branch;
-}
-
-LIR* opCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
- int checkValue, LIR* target)
-{
- if ((checkValue == 0) && (cond == kCondEq || cond == kCondNe)) {
- // TODO: when checkValue == 0 and reg is rCX, use the jcxz/nz opcode
- newLIR2(cUnit, kX86Test32RR, reg, reg);
- } else {
- newLIR2(cUnit, IS_SIMM8(checkValue) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, checkValue);
- }
- X86ConditionCode cc = oatX86ConditionEncoding(cond);
- LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
- branch->target = target;
- return branch;
-}
-
-LIR* opRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
-{
- if (X86_FPREG(rDest) || X86_FPREG(rSrc))
- return fpRegCopy(cUnit, rDest, rSrc);
- LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kX86Mov32RR,
- rDest, rSrc);
- if (rDest == rSrc) {
- res->flags.isNop = true;
- }
- return res;
-}
-
-LIR* opRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
-{
- LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
- oatAppendLIR(cUnit, res);
- return res;
-}
-
-void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
- int srcLo, int srcHi)
-{
- bool destFP = X86_FPREG(destLo) && X86_FPREG(destHi);
- bool srcFP = X86_FPREG(srcLo) && X86_FPREG(srcHi);
- assert(X86_FPREG(srcLo) == X86_FPREG(srcHi));
- assert(X86_FPREG(destLo) == X86_FPREG(destHi));
- if (destFP) {
- if (srcFP) {
- opRegCopy(cUnit, s2d(destLo, destHi), s2d(srcLo, srcHi));
- } else {
- // TODO: Prevent this from happening in the code. The result is often
- // unused or could have been loaded more easily from memory.
- newLIR2(cUnit, kX86MovdxrRR, destLo, srcLo);
- newLIR2(cUnit, kX86MovdxrRR, destHi, srcHi);
- newLIR2(cUnit, kX86PsllqRI, destHi, 32);
- newLIR2(cUnit, kX86OrpsRR, destLo, destHi);
- }
- } else {
- if (srcFP) {
- newLIR2(cUnit, kX86MovdrxRR, destLo, srcLo);
- newLIR2(cUnit, kX86PsrlqRI, srcLo, 32);
- newLIR2(cUnit, kX86MovdrxRR, destHi, srcLo);
- } else {
- // Handle overlap
- if (srcHi == destLo) {
- opRegCopy(cUnit, destHi, srcHi);
- opRegCopy(cUnit, destLo, srcLo);
- } else {
- opRegCopy(cUnit, destLo, srcLo);
- opRegCopy(cUnit, destHi, srcHi);
- }
- }
- }
-}
-
-void genFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir) {
- LIR* labelList = cUnit->blockLabelList;
- LIR* taken = &labelList[bb->taken->id];
- RegLocation rlSrc1 = oatGetSrcWide(cUnit, mir, 0);
- RegLocation rlSrc2 = oatGetSrcWide(cUnit, mir, 2);
- oatFlushAllRegs(cUnit);
- oatLockCallTemps(cUnit); // Prepare for explicit register usage
- loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
- loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
- ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
- // Swap operands and condition code to prevent use of zero flag.
- if (ccode == kCondLe || ccode == kCondGt) {
- // Compute (r3:r2) = (r3:r2) - (r1:r0)
- opRegReg(cUnit, kOpSub, r2, r0); // r2 = r2 - r0
- opRegReg(cUnit, kOpSbc, r3, r1); // r3 = r3 - r1 - CF
- } else {
- // Compute (r1:r0) = (r1:r0) - (r3:r2)
- opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
- opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
- }
- switch (ccode) {
- case kCondEq:
- case kCondNe:
- opRegReg(cUnit, kOpOr, r0, r1); // r0 = r0 | r1
- break;
- case kCondLe:
- ccode = kCondGe;
- break;
- case kCondGt:
- ccode = kCondLt;
- break;
- case kCondLt:
- case kCondGe:
- break;
- default:
- LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
- }
- opCondBranch(cUnit, ccode, taken);
-}
-RegLocation genDivRemLit(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int lit, bool isDiv)
-{
- LOG(FATAL) << "Unexpected use of genDivRemLit for x86";
- return rlDest;
-}
-
-RegLocation genDivRem(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int regHi, bool isDiv)
-{
- LOG(FATAL) << "Unexpected use of genDivRem for x86";
- return rlDest;
-}
-
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-void markGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
-{
- int regCardBase = oatAllocTemp(cUnit);
- int regCardNo = oatAllocTemp(cUnit);
- LIR* branchOver = opCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
- newLIR2(cUnit, kX86Mov32RT, regCardBase, Thread::CardTableOffset().Int32Value());
- opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, CardTable::kCardShift);
- storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
- kUnsignedByte);
- LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
- branchOver->target = (LIR*)target;
- oatFreeTemp(cUnit, regCardBase);
- oatFreeTemp(cUnit, regCardNo);
-}
-
-bool genInlinedMinMaxInt(CompilationUnit *cUnit, CallInfo* info, bool isMin)
-{
- DCHECK_EQ(cUnit->instructionSet, kX86);
- RegLocation rlSrc1 = info->args[0];
- RegLocation rlSrc2 = info->args[1];
- rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
- RegLocation rlDest = inlineTarget(cUnit, info);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- DCHECK_EQ(cUnit->instructionSet, kX86);
- LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, isMin ? kX86CondG : kX86CondL);
- opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc1.lowReg);
- LIR* branch2 = newLIR1(cUnit, kX86Jmp8, 0);
- branch->target = newLIR0(cUnit, kPseudoTargetLabel);
- opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc2.lowReg);
- branch2->target = newLIR0(cUnit, kPseudoTargetLabel);
- storeValue(cUnit, rlDest, rlResult);
- return true;
-}
-
-void opLea(CompilationUnit* cUnit, int rBase, int reg1, int reg2, int scale, int offset)
-{
- newLIR5(cUnit, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
-}
-
-void opTlsCmp(CompilationUnit* cUnit, int offset, int val)
-{
- newLIR2(cUnit, kX86Cmp16TI8, offset, val);
-}
-
-bool genInlinedCas32(CompilationUnit* cUnit, CallInfo* info, bool need_write_barrier) {
- DCHECK_NE(cUnit->instructionSet, kThumb2);
- return false;
-}
-
-bool genInlinedSqrt(CompilationUnit* cUnit, CallInfo* info) {
- DCHECK_NE(cUnit->instructionSet, kThumb2);
- return false;
-}
-
-LIR* opPcRelLoad(CompilationUnit* cUnit, int reg, LIR* target) {
- LOG(FATAL) << "Unexpected use of opPcRelLoad for x86";
- return NULL;
-}
-
-LIR* opVldm(CompilationUnit* cUnit, int rBase, int count)
-{
- LOG(FATAL) << "Unexpected use of opVldm for x86";
- return NULL;
-}
-
-LIR* opVstm(CompilationUnit* cUnit, int rBase, int count)
-{
- LOG(FATAL) << "Unexpected use of opVstm for x86";
- return NULL;
-}
-
-void genMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
- RegLocation rlResult, int lit,
- int firstBit, int secondBit)
-{
- int tReg = oatAllocTemp(cUnit);
- opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, secondBit - firstBit);
- opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, tReg);
- oatFreeTemp(cUnit, tReg);
- if (firstBit != 0) {
- opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
- }
-}
-
-void genDivZeroCheck(CompilationUnit* cUnit, int regLo, int regHi)
-{
- int tReg = oatAllocTemp(cUnit);
- opRegRegReg(cUnit, kOpOr, tReg, regLo, regHi);
- genImmedCheck(cUnit, kCondEq, tReg, 0, kThrowDivZero);
- oatFreeTemp(cUnit, tReg);
-}
-
-// Test suspend flag, return target of taken suspend branch
-LIR* opTestSuspend(CompilationUnit* cUnit, LIR* target)
-{
- opTlsCmp(cUnit, Thread::ThreadFlagsOffset().Int32Value(), 0);
- return opCondBranch(cUnit, (target == NULL) ? kCondNe : kCondEq, target);
-}
-
-// Decrement register and branch on condition
-LIR* opDecAndBranch(CompilationUnit* cUnit, ConditionCode cCode, int reg, LIR* target)
-{
- opRegImm(cUnit, kOpSub, reg, 1);
- return opCmpImmBranch(cUnit, cCode, reg, 0, target);
-}
-
-bool smallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
- RegLocation rlSrc, RegLocation rlDest, int lit)
-{
- LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
- return false;
-}
-
-LIR* opIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide)
-{
- LOG(FATAL) << "Unexpected use of opIT in x86";
- return NULL;
-}
-
-} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Ralloc.cc b/src/compiler/codegen/x86/X86/Ralloc.cc
deleted file mode 100644
index ef72e52..0000000
--- a/src/compiler/codegen/x86/X86/Ralloc.cc
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-namespace art {
-
-/* This file contains codegen for the X86 ISA */
-
-/*
- * Alloc a pair of core registers, or a double. Low reg in low byte,
- * high reg in next byte.
- */
-int oatAllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
- int regClass)
-{
- int highReg;
- int lowReg;
- int res = 0;
-
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- lowReg = oatAllocTempDouble(cUnit);
- highReg = lowReg + 1;
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
- return res;
- }
-
- lowReg = oatAllocTemp(cUnit);
- highReg = oatAllocTemp(cUnit);
- res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
- return res;
-}
-
-int oatAllocTypedTemp(CompilationUnit *cUnit, bool fpHint, int regClass) {
- if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
- return oatAllocTempFloat(cUnit);
- }
- return oatAllocTemp(cUnit);
-}
-
-void oatInitializeRegAlloc(CompilationUnit* cUnit) {
- int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
- int numReserved = sizeof(reservedRegs)/sizeof(*reservedRegs);
- int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
- int numFPRegs = sizeof(fpRegs)/sizeof(*fpRegs);
- int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
- RegisterPool *pool = (RegisterPool *)oatNew(cUnit, sizeof(*pool), true,
- kAllocRegAlloc);
- cUnit->regPool = pool;
- pool->numCoreRegs = numRegs;
- pool->coreRegs = (RegisterInfo *)
- oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs), true,
- kAllocRegAlloc);
- pool->numFPRegs = numFPRegs;
- pool->FPRegs = (RegisterInfo *)
- oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
- kAllocRegAlloc);
- oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
- oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
- // Keep special registers from being allocated
- for (int i = 0; i < numReserved; i++) {
- oatMarkInUse(cUnit, reservedRegs[i]);
- }
- // Mark temp regs - all others not in use can be used for promotion
- for (int i = 0; i < numTemps; i++) {
- oatMarkTemp(cUnit, coreTemps[i]);
- }
- for (int i = 0; i < numFPTemps; i++) {
- oatMarkTemp(cUnit, fpTemps[i]);
- }
- // Construct the alias map.
- cUnit->phiAliasMap = (int*)oatNew(cUnit, cUnit->numSSARegs *
- sizeof(cUnit->phiAliasMap[0]), false,
- kAllocDFInfo);
- for (int i = 0; i < cUnit->numSSARegs; i++) {
- cUnit->phiAliasMap[i] = i;
- }
- for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
- int defReg = phi->ssaRep->defs[0];
- for (int i = 0; i < phi->ssaRep->numUses; i++) {
- for (int j = 0; j < cUnit->numSSARegs; j++) {
- if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
- cUnit->phiAliasMap[j] = defReg;
- }
- }
- }
- }
-}
-
-void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
- RegLocation rlFree)
-{
- if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
- (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
- // No overlap, free both
- oatFreeTemp(cUnit, rlFree.lowReg);
- oatFreeTemp(cUnit, rlFree.highReg);
- }
-}
-
-
-} // namespace art
diff --git a/src/compiler/codegen/x86/X86RallocUtil.cc b/src/compiler/codegen/x86/X86RallocUtil.cc
deleted file mode 100644
index caf4e08..0000000
--- a/src/compiler/codegen/x86/X86RallocUtil.cc
+++ /dev/null
@@ -1,151 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/*
- * This file contains X86-specific register allocation support.
- */
-
-#include "../../CompilerUtility.h"
-#include "../../CompilerIR.h"
-#include "../..//Dataflow.h"
-#include "X86LIR.h"
-#include "Codegen.h"
-#include "../Ralloc.h"
-
-namespace art {
-
-void oatAdjustSpillMask(CompilationUnit* cUnit) {
- // Adjustment for LR spilling, x86 has no LR so nothing to do here
- cUnit->coreSpillMask |= (1 << rRET);
- cUnit->numCoreSpills++;
-}
-
-/*
- * Mark a callee-save fp register as promoted. Note that
- * vpush/vpop uses contiguous register lists so we must
- * include any holes in the mask. Associate holes with
- * Dalvik register INVALID_VREG (0xFFFFU).
- */
-void oatMarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
-{
- UNIMPLEMENTED(WARNING) << "oatMarkPreservedSingle";
-#if 0
- LOG(FATAL) << "No support yet for promoted FP regs";
-#endif
-}
-
-void oatFlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
-{
- RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
- RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
- DCHECK(info1 && info2 && info1->pair && info2->pair &&
- (info1->partner == info2->reg) &&
- (info2->partner == info1->reg));
- if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
- if (!(info1->isTemp && info2->isTemp)) {
- /* Should not happen. If it does, there's a problem in evalLoc */
- LOG(FATAL) << "Long half-temp, half-promoted";
- }
-
- info1->dirty = false;
- info2->dirty = false;
- if (SRegToVReg(cUnit, info2->sReg) < SRegToVReg(cUnit, info1->sReg))
- info1 = info2;
- int vReg = SRegToVReg(cUnit, info1->sReg);
- oatFlushRegWideImpl(cUnit, rX86_SP, oatVRegOffset(cUnit, vReg),
- info1->reg, info1->partner);
- }
-}
-
-void oatFlushReg(CompilationUnit* cUnit, int reg)
-{
- RegisterInfo* info = oatGetRegInfo(cUnit, reg);
- if (info->live && info->dirty) {
- info->dirty = false;
- int vReg = SRegToVReg(cUnit, info->sReg);
- oatFlushRegImpl(cUnit, rX86_SP, oatVRegOffset(cUnit, vReg), reg, kWord);
- }
-}
-
-/* Give access to the target-dependent FP register encoding to common code */
-bool oatIsFpReg(int reg) {
- return X86_FPREG(reg);
-}
-
-uint32_t oatFpRegMask() {
- return X86_FP_REG_MASK;
-}
-
-/* Clobber all regs that might be used by an external C call */
-extern void oatClobberCalleeSave(CompilationUnit *cUnit)
-{
- oatClobber(cUnit, rAX);
- oatClobber(cUnit, rCX);
- oatClobber(cUnit, rDX);
-}
-
-extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit) {
- RegLocation res = locCReturnWide();
- CHECK(res.lowReg == rAX);
- CHECK(res.highReg == rDX);
- oatClobber(cUnit, rAX);
- oatClobber(cUnit, rDX);
- oatMarkInUse(cUnit, rAX);
- oatMarkInUse(cUnit, rDX);
- oatMarkPair(cUnit, res.lowReg, res.highReg);
- return res;
-}
-
-extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
-{
- RegLocation res = locCReturn();
- res.lowReg = rDX;
- oatClobber(cUnit, rDX);
- oatMarkInUse(cUnit, rDX);
- return res;
-}
-
-extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
-{
- return X86_FPREG(reg) ? &cUnit->regPool->FPRegs[reg & X86_FP_REG_MASK]
- : &cUnit->regPool->coreRegs[reg];
-}
-
-/* To be used when explicitly managing register use */
-extern void oatLockCallTemps(CompilationUnit* cUnit)
-{
- oatLockTemp(cUnit, rX86_ARG0);
- oatLockTemp(cUnit, rX86_ARG1);
- oatLockTemp(cUnit, rX86_ARG2);
- oatLockTemp(cUnit, rX86_ARG3);
-}
-
-/* To be used when explicitly managing register use */
-extern void oatFreeCallTemps(CompilationUnit* cUnit)
-{
- oatFreeTemp(cUnit, rX86_ARG0);
- oatFreeTemp(cUnit, rX86_ARG1);
- oatFreeTemp(cUnit, rX86_ARG2);
- oatFreeTemp(cUnit, rX86_ARG3);
-}
-
-/* Convert an instruction to a NOP */
-void oatNopLIR( LIR* lir)
-{
- ((LIR*)lir)->flags.isNop = true;
-}
-
-} // namespace art
diff --git a/src/compiler/codegen/x86/Assemble.cc b/src/compiler/codegen/x86/assemble_x86.cc
similarity index 99%
rename from src/compiler/codegen/x86/Assemble.cc
rename to src/compiler/codegen/x86/assemble_x86.cc
index a5388e8..79ed075 100644
--- a/src/compiler/codegen/x86/Assemble.cc
+++ b/src/compiler/codegen/x86/assemble_x86.cc
@@ -14,10 +14,10 @@
* limitations under the License.
*/
-#include "../../Dalvik.h"
-#include "../../CompilerInternals.h"
-#include "X86LIR.h"
-#include "Codegen.h"
+#include "../../dalvik.h"
+#include "../../compiler_internals.h"
+#include "x86_lir.h"
+#include "codegen.h"
namespace art {
diff --git a/src/compiler/codegen/x86/backend_x86.cc b/src/compiler/codegen/x86/backend_x86.cc
new file mode 100644
index 0000000..6abfb23
--- /dev/null
+++ b/src/compiler/codegen/x86/backend_x86.cc
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define _CODEGEN_C
+
+#include "../../dalvik.h"
+#include "../../compiler_internals.h"
+#include "x86_lir.h"
+#include "../ralloc.h"
+#include "codegen.h"
+
+/* Common codegen utility code */
+#include "../codegen_util.cc"
+
+#include "utility_x86.cc"
+#include "../codegen_factory.cc"
+#include "../gen_common.cc"
+#include "../gen_invoke.cc"
+#include "call_x86.cc"
+#include "fp_x86.cc"
+#include "int_x86.cc"
+
+/* Bitcode conversion */
+#include "../method_bitcode.cc"
+
+/* MIR2LIR dispatcher and architectural independent codegen routines */
+#include "../method_codegen_driver.cc"
+
+/* Target-independent local optimizations */
+#include "../local_optimizations.cc"
diff --git a/src/compiler/codegen/x86/call_x86.cc b/src/compiler/codegen/x86/call_x86.cc
new file mode 100644
index 0000000..0cd9b2d
--- /dev/null
+++ b/src/compiler/codegen/x86/call_x86.cc
@@ -0,0 +1,276 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+namespace art {
+
+void genSpecialCase(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
+ SpecialCaseHandler specialCase)
+{
+ // TODO
+}
+
+/*
+ * The sparse table in the literal pool is an array of <key,displacement>
+ * pairs.
+ */
+BasicBlock *findBlock(CompilationUnit* cUnit, unsigned int codeOffset,
+ bool split, bool create, BasicBlock** immedPredBlockP);
+void genSparseSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
+ RegLocation rlSrc)
+{
+ const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
+ if (cUnit->printMe) {
+ dumpSparseSwitchTable(table);
+ }
+ int entries = table[1];
+ int* keys = (int*)&table[2];
+ int* targets = &keys[entries];
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ for (int i = 0; i < entries; i++) {
+ int key = keys[i];
+ BasicBlock* case_block = findBlock(cUnit,
+ cUnit->currentDalvikOffset + targets[i],
+ false, false, NULL);
+ LIR* labelList = cUnit->blockLabelList;
+ opCmpImmBranch(cUnit, kCondEq, rlSrc.lowReg, key,
+ &labelList[case_block->id]);
+ }
+}
+
+/*
+ * Code pattern will look something like:
+ *
+ * mov rVal, ..
+ * call 0
+ * pop rStartOfMethod
+ * sub rStartOfMethod, ..
+ * mov rKeyReg, rVal
+ * sub rKeyReg, lowKey
+ * cmp rKeyReg, size-1 ; bound check
+ * ja done
+ * mov rDisp, [rStartOfMethod + rKeyReg * 4 + tableOffset]
+ * add rStartOfMethod, rDisp
+ * jmp rStartOfMethod
+ * done:
+ */
+void genPackedSwitch(CompilationUnit* cUnit, uint32_t tableOffset,
+ RegLocation rlSrc)
+{
+ const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
+ if (cUnit->printMe) {
+ dumpPackedSwitchTable(table);
+ }
+ // Add the table to the list - we'll process it later
+ SwitchTable *tabRec = (SwitchTable *)oatNew(cUnit, sizeof(SwitchTable),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = cUnit->currentDalvikOffset;
+ int size = table[1];
+ tabRec->targets = (LIR* *)oatNew(cUnit, size * sizeof(LIR*), true,
+ kAllocLIR);
+ oatInsertGrowableList(cUnit, &cUnit->switchTables, (intptr_t)tabRec);
+
+ // Get the switch value
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ int startOfMethodReg = oatAllocTemp(cUnit);
+ // Materialize a pointer to the switch table
+ //newLIR0(cUnit, kX86Bkpt);
+ newLIR1(cUnit, kX86StartOfMethod, startOfMethodReg);
+ int lowKey = s4FromSwitchData(&table[2]);
+ int keyReg;
+ // Remove the bias, if necessary
+ if (lowKey == 0) {
+ keyReg = rlSrc.lowReg;
+ } else {
+ keyReg = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpSub, keyReg, rlSrc.lowReg, lowKey);
+ }
+ // Bounds check - if < 0 or >= size continue following switch
+ opRegImm(cUnit, kOpCmp, keyReg, size-1);
+ LIR* branchOver = opCondBranch(cUnit, kCondHi, NULL);
+
+ // Load the displacement from the switch table
+ int dispReg = oatAllocTemp(cUnit);
+ newLIR5(cUnit, kX86PcRelLoadRA, dispReg, startOfMethodReg, keyReg, 2,
+ (intptr_t)tabRec);
+ // Add displacement to start of method
+ opRegReg(cUnit, kOpAdd, startOfMethodReg, dispReg);
+ // ..and go!
+ LIR* switchBranch = newLIR1(cUnit, kX86JmpR, startOfMethodReg);
+ tabRec->anchor = switchBranch;
+
+ /* branchOver target here */
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)target;
+}
+
+void callRuntimeHelperRegReg(CompilationUnit* cUnit, int helperOffset,
+ int arg0, int arg1, bool safepointPC);
+/*
+ * Array data table format:
+ * ushort ident = 0x0300 magic value
+ * ushort width width of each element in the table
+ * uint size number of elements in the table
+ * ubyte data[size*width] table of data values (may contain a single-byte
+ * padding at the end)
+ *
+ * Total size is 4+(width * size + 1)/2 16-bit code units.
+ */
+void genFillArrayData(CompilationUnit* cUnit, uint32_t tableOffset,
+ RegLocation rlSrc)
+{
+ const u2* table = cUnit->insns + cUnit->currentDalvikOffset + tableOffset;
+ // Add the table to the list - we'll process it later
+ FillArrayData *tabRec = (FillArrayData *)oatNew(cUnit, sizeof(FillArrayData),
+ true, kAllocData);
+ tabRec->table = table;
+ tabRec->vaddr = cUnit->currentDalvikOffset;
+ u2 width = tabRec->table[1];
+ u4 size = tabRec->table[2] | (((u4)tabRec->table[3]) << 16);
+ tabRec->size = (size * width) + 8;
+
+ oatInsertGrowableList(cUnit, &cUnit->fillArrayData, (intptr_t)tabRec);
+
+ // Making a call - use explicit registers
+ oatFlushAllRegs(cUnit); /* Everything to home location */
+ loadValueDirectFixed(cUnit, rlSrc, rX86_ARG0);
+ // Materialize a pointer to the fill data image
+ newLIR1(cUnit, kX86StartOfMethod, rX86_ARG2);
+ newLIR2(cUnit, kX86PcRelAdr, rX86_ARG1, (intptr_t)tabRec);
+ newLIR2(cUnit, kX86Add32RR, rX86_ARG1, rX86_ARG2);
+ callRuntimeHelperRegReg(cUnit, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0, rX86_ARG1,
+ true);
+}
+
+void genMonitorEnter(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+{
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rCX); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rCX, optFlags);
+ // If lock is unheld, try to grab it quickly with compare and exchange
+ // TODO: copy and clear hash state?
+ newLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ newLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ newLIR2(cUnit, kX86Xor32RR, rAX, rAX);
+ newLIR3(cUnit, kX86LockCmpxchgMR, rCX, Object::MonitorOffset().Int32Value(), rDX);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondEq);
+ // If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+ branch->target = newLIR0(cUnit, kPseudoTargetLabel);
+}
+
+void genMonitorExit(CompilationUnit* cUnit, int optFlags, RegLocation rlSrc)
+{
+ oatFlushAllRegs(cUnit);
+ loadValueDirectFixed(cUnit, rlSrc, rAX); // Get obj
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ genNullCheck(cUnit, rlSrc.sRegLow, rAX, optFlags);
+ // If lock is held by the current thread, clear it to quickly release it
+ // TODO: clear hash state?
+ newLIR2(cUnit, kX86Mov32RT, rDX, Thread::ThinLockIdOffset().Int32Value());
+ newLIR2(cUnit, kX86Sal32RI, rDX, LW_LOCK_OWNER_SHIFT);
+ newLIR3(cUnit, kX86Mov32RM, rCX, rAX, Object::MonitorOffset().Int32Value());
+ opRegReg(cUnit, kOpSub, rCX, rDX);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, kX86CondNe);
+ newLIR3(cUnit, kX86Mov32MR, rAX, Object::MonitorOffset().Int32Value(), rCX);
+ LIR* branch2 = newLIR1(cUnit, kX86Jmp8, 0);
+ branch->target = newLIR0(cUnit, kPseudoTargetLabel);
+ // Otherwise, go the expensive route - UnlockObjectFromCode(obj);
+ callRuntimeHelperReg(cUnit, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+ branch2->target = newLIR0(cUnit, kPseudoTargetLabel);
+}
+
+/*
+ * Mark garbage collection card. Skip if the value we're storing is null.
+ */
+void markGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
+{
+ int regCardBase = oatAllocTemp(cUnit);
+ int regCardNo = oatAllocTemp(cUnit);
+ LIR* branchOver = opCmpImmBranch(cUnit, kCondEq, valReg, 0, NULL);
+ newLIR2(cUnit, kX86Mov32RT, regCardBase, Thread::CardTableOffset().Int32Value());
+ opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, CardTable::kCardShift);
+ storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
+ kUnsignedByte);
+ LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
+ branchOver->target = (LIR*)target;
+ oatFreeTemp(cUnit, regCardBase);
+ oatFreeTemp(cUnit, regCardNo);
+}
+
+void genEntrySequence(CompilationUnit* cUnit, RegLocation* argLocs,
+ RegLocation rlMethod)
+{
+ /*
+ * On entry, rX86_ARG0, rX86_ARG1, rX86_ARG2 are live. Let the register
+ * allocation mechanism know so it doesn't try to use any of them when
+ * expanding the frame or flushing. This leaves the utility
+ * code with no spare temps.
+ */
+ oatLockTemp(cUnit, rX86_ARG0);
+ oatLockTemp(cUnit, rX86_ARG1);
+ oatLockTemp(cUnit, rX86_ARG2);
+
+ /* Build frame, return address already on stack */
+ opRegImm(cUnit, kOpSub, rX86_SP, cUnit->frameSize - 4);
+
+ /*
+ * We can safely skip the stack overflow check if we're
+ * a leaf *and* our frame size < fudge factor.
+ */
+ bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
+ ((size_t)cUnit->frameSize <
+ Thread::kStackOverflowReservedBytes));
+ newLIR0(cUnit, kPseudoMethodEntry);
+ /* Spill core callee saves */
+ spillCoreRegs(cUnit);
+ /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
+ DCHECK_EQ(cUnit->numFPSpills, 0);
+ if (!skipOverflowCheck) {
+ // cmp rX86_SP, fs:[stack_end_]; jcc throw_launchpad
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kThrowStackOverflow, 0, 0, 0, 0);
+ opRegThreadMem(cUnit, kOpCmp, rX86_SP, Thread::StackEndOffset().Int32Value());
+ opCondBranch(cUnit, kCondUlt, tgt);
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ }
+
+ flushIns(cUnit, argLocs, rlMethod);
+
+ oatFreeTemp(cUnit, rX86_ARG0);
+ oatFreeTemp(cUnit, rX86_ARG1);
+ oatFreeTemp(cUnit, rX86_ARG2);
+}
+
+void genExitSequence(CompilationUnit* cUnit) {
+ /*
+ * In the exit path, rX86_RET0/rX86_RET1 are live - make sure they aren't
+ * allocated by the register utilities as temps.
+ */
+ oatLockTemp(cUnit, rX86_RET0);
+ oatLockTemp(cUnit, rX86_RET1);
+
+ newLIR0(cUnit, kPseudoMethodExit);
+ unSpillCoreRegs(cUnit);
+ /* Remove frame except for return address */
+ opRegImm(cUnit, kOpAdd, rX86_SP, cUnit->frameSize - 4);
+ newLIR0(cUnit, kX86Ret);
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/Codegen.h b/src/compiler/codegen/x86/codegen.h
similarity index 99%
rename from src/compiler/codegen/x86/Codegen.h
rename to src/compiler/codegen/x86/codegen.h
index 3755e50..c95fa67 100644
--- a/src/compiler/codegen/x86/Codegen.h
+++ b/src/compiler/codegen/x86/codegen.h
@@ -16,7 +16,7 @@
/* This file contains register alloction support */
-#include "../../CompilerIR.h"
+#include "../../compiler_ir.h"
namespace art {
diff --git a/src/compiler/codegen/x86/FP/X86FP.cc b/src/compiler/codegen/x86/fp_x86.cc
similarity index 92%
rename from src/compiler/codegen/x86/FP/X86FP.cc
rename to src/compiler/codegen/x86/fp_x86.cc
index 5e97a50..0a08ab0 100644
--- a/src/compiler/codegen/x86/FP/X86FP.cc
+++ b/src/compiler/codegen/x86/fp_x86.cc
@@ -331,4 +331,30 @@
opCondBranch(cUnit, ccode, taken);
}
+void genNegFloat(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+{
+ RegLocation rlResult;
+ rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, 0x80000000);
+ storeValue(cUnit, rlDest, rlResult);
+}
+
+void genNegDouble(CompilationUnit *cUnit, RegLocation rlDest, RegLocation rlSrc)
+{
+ RegLocation rlResult;
+ rlSrc = loadValueWide(cUnit, rlSrc, kCoreReg);
+ rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegRegImm(cUnit, kOpAdd, rlResult.highReg, rlSrc.highReg, 0x80000000);
+ opRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
+ storeValueWide(cUnit, rlDest, rlResult);
+}
+
+bool genInlinedSqrt(CompilationUnit* cUnit, CallInfo* info) {
+ DCHECK_NE(cUnit->instructionSet, kThumb2);
+ return false;
+}
+
+
+
} // namespace art
diff --git a/src/compiler/codegen/x86/int_x86.cc b/src/compiler/codegen/x86/int_x86.cc
new file mode 100644
index 0000000..1673b55
--- /dev/null
+++ b/src/compiler/codegen/x86/int_x86.cc
@@ -0,0 +1,434 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This file contains codegen for the X86 ISA */
+
+namespace art {
+
+/*
+ * Perform register memory operation.
+ */
+LIR* genRegMemCheck(CompilationUnit* cUnit, ConditionCode cCode,
+ int reg1, int base, int offset, ThrowKind kind)
+{
+ LIR* tgt = rawLIR(cUnit, 0, kPseudoThrowTarget, kind,
+ cUnit->currentDalvikOffset, reg1, base, offset);
+ opRegMem(cUnit, kOpCmp, reg1, base, offset);
+ LIR* branch = opCondBranch(cUnit, cCode, tgt);
+ // Remember branch target - will process later
+ oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
+ return branch;
+}
+
+/*
+ * Compare two 64-bit values
+ * x = y return 0
+ * x < y return -1
+ * x > y return 1
+ *
+ * slt t0, x.hi, y.hi; # (x.hi < y.hi) ? 1:0
+ * sgt t1, x.hi, y.hi; # (y.hi > x.hi) ? 1:0
+ * subu res, t0, t1 # res = -1:1:0 for [ < > = ]
+ * bnez res, finish
+ * sltu t0, x.lo, y.lo
+ * sgtu r1, x.lo, y.lo
+ * subu res, t0, t1
+ * finish:
+ *
+ */
+void genCmpLong(CompilationUnit* cUnit, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) - (r3:r2)
+ opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ newLIR2(cUnit, kX86Set8R, r2, kX86CondL); // r2 = (r1:r0) < (r3:r2) ? 1 : 0
+ newLIR2(cUnit, kX86Movzx8RR, r2, r2);
+ opReg(cUnit, kOpNeg, r2); // r2 = -r2
+ opRegReg(cUnit, kOpOr, r0, r1); // r0 = high | low - sets ZF
+ newLIR2(cUnit, kX86Set8R, r0, kX86CondNz); // r0 = (r1:r0) != (r3:r2) ? 1 : 0
+ newLIR2(cUnit, kX86Movzx8RR, r0, r0);
+ opRegReg(cUnit, kOpOr, r0, r2); // r0 = r0 | r2
+ RegLocation rlResult = locCReturn();
+ storeValue(cUnit, rlDest, rlResult);
+}
+
+X86ConditionCode oatX86ConditionEncoding(ConditionCode cond) {
+ switch (cond) {
+ case kCondEq: return kX86CondEq;
+ case kCondNe: return kX86CondNe;
+ case kCondCs: return kX86CondC;
+ case kCondCc: return kX86CondNc;
+ case kCondMi: return kX86CondS;
+ case kCondPl: return kX86CondNs;
+ case kCondVs: return kX86CondO;
+ case kCondVc: return kX86CondNo;
+ case kCondHi: return kX86CondA;
+ case kCondLs: return kX86CondBe;
+ case kCondGe: return kX86CondGe;
+ case kCondLt: return kX86CondL;
+ case kCondGt: return kX86CondG;
+ case kCondLe: return kX86CondLe;
+ case kCondAl:
+ case kCondNv: LOG(FATAL) << "Should not reach here";
+ }
+ return kX86CondO;
+}
+
+LIR* opCmpBranch(CompilationUnit* cUnit, ConditionCode cond, int src1,
+ int src2, LIR* target)
+{
+ newLIR2(cUnit, kX86Cmp32RR, src1, src2);
+ X86ConditionCode cc = oatX86ConditionEncoding(cond);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ ,
+ cc);
+ branch->target = target;
+ return branch;
+}
+
+LIR* opCmpImmBranch(CompilationUnit* cUnit, ConditionCode cond, int reg,
+ int checkValue, LIR* target)
+{
+ if ((checkValue == 0) && (cond == kCondEq || cond == kCondNe)) {
+ // TODO: when checkValue == 0 and reg is rCX, use the jcxz/nz opcode
+ newLIR2(cUnit, kX86Test32RR, reg, reg);
+ } else {
+ newLIR2(cUnit, IS_SIMM8(checkValue) ? kX86Cmp32RI8 : kX86Cmp32RI, reg, checkValue);
+ }
+ X86ConditionCode cc = oatX86ConditionEncoding(cond);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0 /* lir operand for Jcc offset */ , cc);
+ branch->target = target;
+ return branch;
+}
+
+LIR* opRegCopyNoInsert(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+ if (X86_FPREG(rDest) || X86_FPREG(rSrc))
+ return fpRegCopy(cUnit, rDest, rSrc);
+ LIR* res = rawLIR(cUnit, cUnit->currentDalvikOffset, kX86Mov32RR,
+ rDest, rSrc);
+ if (rDest == rSrc) {
+ res->flags.isNop = true;
+ }
+ return res;
+}
+
+LIR* opRegCopy(CompilationUnit *cUnit, int rDest, int rSrc)
+{
+ LIR *res = opRegCopyNoInsert(cUnit, rDest, rSrc);
+ oatAppendLIR(cUnit, res);
+ return res;
+}
+
+void opRegCopyWide(CompilationUnit *cUnit, int destLo, int destHi,
+ int srcLo, int srcHi)
+{
+ bool destFP = X86_FPREG(destLo) && X86_FPREG(destHi);
+ bool srcFP = X86_FPREG(srcLo) && X86_FPREG(srcHi);
+ assert(X86_FPREG(srcLo) == X86_FPREG(srcHi));
+ assert(X86_FPREG(destLo) == X86_FPREG(destHi));
+ if (destFP) {
+ if (srcFP) {
+ opRegCopy(cUnit, s2d(destLo, destHi), s2d(srcLo, srcHi));
+ } else {
+ // TODO: Prevent this from happening in the code. The result is often
+ // unused or could have been loaded more easily from memory.
+ newLIR2(cUnit, kX86MovdxrRR, destLo, srcLo);
+ newLIR2(cUnit, kX86MovdxrRR, destHi, srcHi);
+ newLIR2(cUnit, kX86PsllqRI, destHi, 32);
+ newLIR2(cUnit, kX86OrpsRR, destLo, destHi);
+ }
+ } else {
+ if (srcFP) {
+ newLIR2(cUnit, kX86MovdrxRR, destLo, srcLo);
+ newLIR2(cUnit, kX86PsrlqRI, srcLo, 32);
+ newLIR2(cUnit, kX86MovdrxRR, destHi, srcLo);
+ } else {
+ // Handle overlap
+ if (srcHi == destLo) {
+ opRegCopy(cUnit, destHi, srcHi);
+ opRegCopy(cUnit, destLo, srcLo);
+ } else {
+ opRegCopy(cUnit, destLo, srcLo);
+ opRegCopy(cUnit, destHi, srcHi);
+ }
+ }
+ }
+}
+
+void genFusedLongCmpBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir) {
+ LIR* labelList = cUnit->blockLabelList;
+ LIR* taken = &labelList[bb->taken->id];
+ RegLocation rlSrc1 = oatGetSrcWide(cUnit, mir, 0);
+ RegLocation rlSrc2 = oatGetSrcWide(cUnit, mir, 2);
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ ConditionCode ccode = static_cast<ConditionCode>(mir->dalvikInsn.arg[0]);
+ // Swap operands and condition code to prevent use of zero flag.
+ if (ccode == kCondLe || ccode == kCondGt) {
+ // Compute (r3:r2) = (r3:r2) - (r1:r0)
+ opRegReg(cUnit, kOpSub, r2, r0); // r2 = r2 - r0
+ opRegReg(cUnit, kOpSbc, r3, r1); // r3 = r3 - r1 - CF
+ } else {
+ // Compute (r1:r0) = (r1:r0) - (r3:r2)
+ opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ }
+ switch (ccode) {
+ case kCondEq:
+ case kCondNe:
+ opRegReg(cUnit, kOpOr, r0, r1); // r0 = r0 | r1
+ break;
+ case kCondLe:
+ ccode = kCondGe;
+ break;
+ case kCondGt:
+ ccode = kCondLt;
+ break;
+ case kCondLt:
+ case kCondGe:
+ break;
+ default:
+ LOG(FATAL) << "Unexpected ccode: " << (int)ccode;
+ }
+ opCondBranch(cUnit, ccode, taken);
+}
+RegLocation genDivRemLit(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int lit, bool isDiv)
+{
+ LOG(FATAL) << "Unexpected use of genDivRemLit for x86";
+ return rlDest;
+}
+
+RegLocation genDivRem(CompilationUnit* cUnit, RegLocation rlDest, int regLo, int regHi, bool isDiv)
+{
+ LOG(FATAL) << "Unexpected use of genDivRem for x86";
+ return rlDest;
+}
+
+bool genInlinedMinMaxInt(CompilationUnit *cUnit, CallInfo* info, bool isMin)
+{
+ DCHECK_EQ(cUnit->instructionSet, kX86);
+ RegLocation rlSrc1 = info->args[0];
+ RegLocation rlSrc2 = info->args[1];
+ rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
+ rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
+ RegLocation rlDest = inlineTarget(cUnit, info);
+ RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
+ opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
+ DCHECK_EQ(cUnit->instructionSet, kX86);
+ LIR* branch = newLIR2(cUnit, kX86Jcc8, 0, isMin ? kX86CondG : kX86CondL);
+ opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc1.lowReg);
+ LIR* branch2 = newLIR1(cUnit, kX86Jmp8, 0);
+ branch->target = newLIR0(cUnit, kPseudoTargetLabel);
+ opRegReg(cUnit, kOpMov, rlResult.lowReg, rlSrc2.lowReg);
+ branch2->target = newLIR0(cUnit, kPseudoTargetLabel);
+ storeValue(cUnit, rlDest, rlResult);
+ return true;
+}
+
+void opLea(CompilationUnit* cUnit, int rBase, int reg1, int reg2, int scale, int offset)
+{
+ newLIR5(cUnit, kX86Lea32RA, rBase, reg1, reg2, scale, offset);
+}
+
+void opTlsCmp(CompilationUnit* cUnit, int offset, int val)
+{
+ newLIR2(cUnit, kX86Cmp16TI8, offset, val);
+}
+
+bool genInlinedCas32(CompilationUnit* cUnit, CallInfo* info, bool need_write_barrier) {
+ DCHECK_NE(cUnit->instructionSet, kThumb2);
+ return false;
+}
+
+LIR* opPcRelLoad(CompilationUnit* cUnit, int reg, LIR* target) {
+ LOG(FATAL) << "Unexpected use of opPcRelLoad for x86";
+ return NULL;
+}
+
+LIR* opVldm(CompilationUnit* cUnit, int rBase, int count)
+{
+ LOG(FATAL) << "Unexpected use of opVldm for x86";
+ return NULL;
+}
+
+LIR* opVstm(CompilationUnit* cUnit, int rBase, int count)
+{
+ LOG(FATAL) << "Unexpected use of opVstm for x86";
+ return NULL;
+}
+
+void genMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
+ RegLocation rlResult, int lit,
+ int firstBit, int secondBit)
+{
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, secondBit - firstBit);
+ opRegRegReg(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, tReg);
+ oatFreeTemp(cUnit, tReg);
+ if (firstBit != 0) {
+ opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
+ }
+}
+
+void genDivZeroCheck(CompilationUnit* cUnit, int regLo, int regHi)
+{
+ int tReg = oatAllocTemp(cUnit);
+ opRegRegReg(cUnit, kOpOr, tReg, regLo, regHi);
+ genImmedCheck(cUnit, kCondEq, tReg, 0, kThrowDivZero);
+ oatFreeTemp(cUnit, tReg);
+}
+
+// Test suspend flag, return target of taken suspend branch
+LIR* opTestSuspend(CompilationUnit* cUnit, LIR* target)
+{
+ opTlsCmp(cUnit, Thread::ThreadFlagsOffset().Int32Value(), 0);
+ return opCondBranch(cUnit, (target == NULL) ? kCondNe : kCondEq, target);
+}
+
+// Decrement register and branch on condition
+LIR* opDecAndBranch(CompilationUnit* cUnit, ConditionCode cCode, int reg, LIR* target)
+{
+ opRegImm(cUnit, kOpSub, reg, 1);
+ return opCmpImmBranch(cUnit, cCode, reg, 0, target);
+}
+
+bool smallLiteralDivide(CompilationUnit* cUnit, Instruction::Code dalvikOpcode,
+ RegLocation rlSrc, RegLocation rlDest, int lit)
+{
+ LOG(FATAL) << "Unexpected use of smallLiteralDive in x86";
+ return false;
+}
+
+LIR* opIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide)
+{
+ LOG(FATAL) << "Unexpected use of opIT in x86";
+ return NULL;
+}
+bool genAddLong(CompilationUnit* cUnit, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) + (r2:r3)
+ opRegReg(cUnit, kOpAdd, r0, r2); // r0 = r0 + r2
+ opRegReg(cUnit, kOpAdc, r1, r3); // r1 = r1 + r3 + CF
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+}
+
+bool genSubLong(CompilationUnit* cUnit, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) + (r2:r3)
+ opRegReg(cUnit, kOpSub, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpSbc, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+}
+
+bool genAndLong(CompilationUnit* cUnit, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) + (r2:r3)
+ opRegReg(cUnit, kOpAnd, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpAnd, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+}
+
+bool genOrLong(CompilationUnit* cUnit, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) + (r2:r3)
+ opRegReg(cUnit, kOpOr, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpOr, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+}
+
+bool genXorLong(CompilationUnit* cUnit, RegLocation rlDest,
+ RegLocation rlSrc1, RegLocation rlSrc2)
+{
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc1, r0, r1);
+ loadValueDirectWideFixed(cUnit, rlSrc2, r2, r3);
+ // Compute (r1:r0) = (r1:r0) + (r2:r3)
+ opRegReg(cUnit, kOpXor, r0, r2); // r0 = r0 - r2
+ opRegReg(cUnit, kOpXor, r1, r3); // r1 = r1 - r3 - CF
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+}
+
+bool genNegLong(CompilationUnit* cUnit, RegLocation rlDest,
+ RegLocation rlSrc)
+{
+ oatFlushAllRegs(cUnit);
+ oatLockCallTemps(cUnit); // Prepare for explicit register usage
+ loadValueDirectWideFixed(cUnit, rlSrc, r0, r1);
+ // Compute (r1:r0) = -(r1:r0)
+ opRegReg(cUnit, kOpNeg, r0, r0); // r0 = -r0
+ opRegImm(cUnit, kOpAdc, r1, 0); // r1 = r1 + CF
+ opRegReg(cUnit, kOpNeg, r1, r1); // r1 = -r1
+ RegLocation rlResult = {kLocPhysReg, 1, 0, 0, 0, 0, 0, 0, 1, r0, r1,
+ INVALID_SREG, INVALID_SREG};
+ storeValueWide(cUnit, rlDest, rlResult);
+ return false;
+}
+
+void opRegThreadMem(CompilationUnit* cUnit, OpKind op, int rDest, int threadOffset) {
+ X86OpCode opcode = kX86Bkpt;
+ switch (op) {
+ case kOpCmp: opcode = kX86Cmp32RT; break;
+ default:
+ LOG(FATAL) << "Bad opcode: " << op;
+ break;
+ }
+ newLIR2(cUnit, opcode, rDest, threadOffset);
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/target_x86.cc b/src/compiler/codegen/x86/target_x86.cc
new file mode 100644
index 0000000..a211c2f
--- /dev/null
+++ b/src/compiler/codegen/x86/target_x86.cc
@@ -0,0 +1,664 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "../../compiler_internals.h"
+#include "x86_lir.h"
+#include "../ralloc.h"
+
+#include <string>
+
+namespace art {
+
+//FIXME: restore "static" when usage uncovered
+/*static*/ int coreRegs[] = {
+ rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
+#ifdef TARGET_REX_SUPPORT
+ r8, r9, r10, r11, r12, r13, r14, 15
+#endif
+};
+/*static*/ int reservedRegs[] = {rX86_SP};
+/*static*/ int coreTemps[] = {rAX, rCX, rDX, rBX};
+/*static*/ int fpRegs[] = {
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+/*static*/ int fpTemps[] = {
+ fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
+#ifdef TARGET_REX_SUPPORT
+ fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
+#endif
+};
+
+RegLocation locCReturn()
+{
+ RegLocation res = X86_LOC_C_RETURN;
+ return res;
+}
+
+RegLocation locCReturnWide()
+{
+ RegLocation res = X86_LOC_C_RETURN_WIDE;
+ return res;
+}
+
+RegLocation locCReturnFloat()
+{
+ RegLocation res = X86_LOC_C_RETURN_FLOAT;
+ return res;
+}
+
+RegLocation locCReturnDouble()
+{
+ RegLocation res = X86_LOC_C_RETURN_DOUBLE;
+ return res;
+}
+
+// Return a target-dependent special register.
+int targetReg(SpecialTargetRegister reg) {
+ int res = INVALID_REG;
+ switch (reg) {
+ case kSelf: res = rX86_SELF; break;
+ case kSuspend: res = rX86_SUSPEND; break;
+ case kLr: res = rX86_LR; break;
+ case kPc: res = rX86_PC; break;
+ case kSp: res = rX86_SP; break;
+ case kArg0: res = rX86_ARG0; break;
+ case kArg1: res = rX86_ARG1; break;
+ case kArg2: res = rX86_ARG2; break;
+ case kArg3: res = rX86_ARG3; break;
+ case kFArg0: res = rX86_FARG0; break;
+ case kFArg1: res = rX86_FARG1; break;
+ case kFArg2: res = rX86_FARG2; break;
+ case kFArg3: res = rX86_FARG3; break;
+ case kRet0: res = rX86_RET0; break;
+ case kRet1: res = rX86_RET1; break;
+ case kInvokeTgt: res = rX86_INVOKE_TGT; break;
+ case kCount: res = rX86_COUNT; break;
+ }
+ return res;
+}
+
+// Create a double from a pair of singles.
+int s2d(int lowReg, int highReg)
+{
+ return X86_S2D(lowReg, highReg);
+}
+
+// Is reg a single or double?
+bool fpReg(int reg)
+{
+ return X86_FPREG(reg);
+}
+
+// Is reg a single?
+bool singleReg(int reg)
+{
+ return X86_SINGLEREG(reg);
+}
+
+// Is reg a double?
+bool doubleReg(int reg)
+{
+ return X86_DOUBLEREG(reg);
+}
+
+// Return mask to strip off fp reg flags and bias.
+uint32_t fpRegMask()
+{
+ return X86_FP_REG_MASK;
+}
+
+// True if both regs single, both core or both double.
+bool sameRegType(int reg1, int reg2)
+{
+ return (X86_REGTYPE(reg1) == X86_REGTYPE(reg2));
+}
+
+/*
+ * Decode the register id.
+ */
+u8 getRegMaskCommon(CompilationUnit* cUnit, int reg)
+{
+ u8 seed;
+ int shift;
+ int regId;
+
+ regId = reg & 0xf;
+ /* Double registers in x86 are just a single FP register */
+ seed = 1;
+ /* FP register starts at bit position 16 */
+ shift = X86_FPREG(reg) ? kX86FPReg0 : 0;
+ /* Expand the double register id into single offset */
+ shift += regId;
+ return (seed << shift);
+}
+
+uint64_t getPCUseDefEncoding()
+{
+ /*
+ * FIXME: might make sense to use a virtual resource encoding bit for pc. Might be
+ * able to clean up some of the x86/Arm_Mips differences
+ */
+ LOG(FATAL) << "Unexpected call to getPCUseDefEncoding for x86";
+ return 0ULL;
+}
+
+void setupTargetResourceMasks(CompilationUnit* cUnit, LIR* lir)
+{
+ DCHECK_EQ(cUnit->instructionSet, kX86);
+
+ // X86-specific resource map setup here.
+ uint64_t flags = EncodingMap[lir->opcode].flags;
+
+ if (flags & REG_USE_SP) {
+ lir->useMask |= ENCODE_X86_REG_SP;
+ }
+
+ if (flags & REG_DEF_SP) {
+ lir->defMask |= ENCODE_X86_REG_SP;
+ }
+
+ if (flags & REG_DEFA) {
+ oatSetupRegMask(cUnit, &lir->defMask, rAX);
+ }
+
+ if (flags & REG_DEFD) {
+ oatSetupRegMask(cUnit, &lir->defMask, rDX);
+ }
+ if (flags & REG_USEA) {
+ oatSetupRegMask(cUnit, &lir->useMask, rAX);
+ }
+
+ if (flags & REG_USEC) {
+ oatSetupRegMask(cUnit, &lir->useMask, rCX);
+ }
+
+ if (flags & REG_USED) {
+ oatSetupRegMask(cUnit, &lir->useMask, rDX);
+ }
+}
+
+/* For dumping instructions */
+static const char* x86RegName[] = {
+ "rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+};
+
+static const char* x86CondName[] = {
+ "O",
+ "NO",
+ "B/NAE/C",
+ "NB/AE/NC",
+ "Z/EQ",
+ "NZ/NE",
+ "BE/NA",
+ "NBE/A",
+ "S",
+ "NS",
+ "P/PE",
+ "NP/PO",
+ "L/NGE",
+ "NL/GE",
+ "LE/NG",
+ "NLE/G"
+};
+
+/*
+ * Interpret a format string and build a string no longer than size
+ * See format key in Assemble.cc.
+ */
+std::string buildInsnString(const char *fmt, LIR *lir, unsigned char* baseAddr) {
+ std::string buf;
+ size_t i = 0;
+ size_t fmt_len = strlen(fmt);
+ while (i < fmt_len) {
+ if (fmt[i] != '!') {
+ buf += fmt[i];
+ i++;
+ } else {
+ i++;
+ DCHECK_LT(i, fmt_len);
+ char operand_number_ch = fmt[i];
+ i++;
+ if (operand_number_ch == '!') {
+ buf += "!";
+ } else {
+ int operand_number = operand_number_ch - '0';
+ DCHECK_LT(operand_number, 6); // Expect upto 6 LIR operands.
+ DCHECK_LT(i, fmt_len);
+ int operand = lir->operands[operand_number];
+ switch (fmt[i]) {
+ case 'c':
+ DCHECK_LT(static_cast<size_t>(operand), sizeof(x86CondName));
+ buf += x86CondName[operand];
+ break;
+ case 'd':
+ buf += StringPrintf("%d", operand);
+ break;
+ case 'p': {
+ SwitchTable *tabRec = reinterpret_cast<SwitchTable*>(operand);
+ buf += StringPrintf("0x%08x", tabRec->offset);
+ break;
+ }
+ case 'r':
+ if (X86_FPREG(operand) || X86_DOUBLEREG(operand)) {
+ int fp_reg = operand & X86_FP_REG_MASK;
+ buf += StringPrintf("xmm%d", fp_reg);
+ } else {
+ DCHECK_LT(static_cast<size_t>(operand), sizeof(x86RegName));
+ buf += x86RegName[operand];
+ }
+ break;
+ case 't':
+ buf += StringPrintf("0x%08x (L%p)",
+ reinterpret_cast<uint32_t>(baseAddr)
+ + lir->offset + operand, lir->target);
+ break;
+ default:
+ buf += StringPrintf("DecodeError '%c'", fmt[i]);
+ break;
+ }
+ i++;
+ }
+ }
+ }
+ return buf;
+}
+
+void oatDumpResourceMask(LIR *lir, u8 mask, const char *prefix)
+{
+ char buf[256];
+ buf[0] = 0;
+ LIR *x86LIR = (LIR *) lir;
+
+ if (mask == ENCODE_ALL) {
+ strcpy(buf, "all");
+ } else {
+ char num[8];
+ int i;
+
+ for (i = 0; i < kX86RegEnd; i++) {
+ if (mask & (1ULL << i)) {
+ sprintf(num, "%d ", i);
+ strcat(buf, num);
+ }
+ }
+
+ if (mask & ENCODE_CCODE) {
+ strcat(buf, "cc ");
+ }
+ /* Memory bits */
+ if (x86LIR && (mask & ENCODE_DALVIK_REG)) {
+ sprintf(buf + strlen(buf), "dr%d%s", x86LIR->aliasInfo & 0xffff,
+ (x86LIR->aliasInfo & 0x80000000) ? "(+1)" : "");
+ }
+ if (mask & ENCODE_LITERAL) {
+ strcat(buf, "lit ");
+ }
+
+ if (mask & ENCODE_HEAP_REF) {
+ strcat(buf, "heap ");
+ }
+ if (mask & ENCODE_MUST_NOT_ALIAS) {
+ strcat(buf, "noalias ");
+ }
+ }
+ if (buf[0]) {
+ LOG(INFO) << prefix << ": " << buf;
+ }
+}
+void oatAdjustSpillMask(CompilationUnit* cUnit) {
+ // Adjustment for LR spilling, x86 has no LR so nothing to do here
+ cUnit->coreSpillMask |= (1 << rRET);
+ cUnit->numCoreSpills++;
+}
+
+/*
+ * Mark a callee-save fp register as promoted. Note that
+ * vpush/vpop uses contiguous register lists so we must
+ * include any holes in the mask. Associate holes with
+ * Dalvik register INVALID_VREG (0xFFFFU).
+ */
+void oatMarkPreservedSingle(CompilationUnit* cUnit, int vReg, int reg)
+{
+ UNIMPLEMENTED(WARNING) << "oatMarkPreservedSingle";
+#if 0
+ LOG(FATAL) << "No support yet for promoted FP regs";
+#endif
+}
+
+void oatFlushRegWide(CompilationUnit* cUnit, int reg1, int reg2)
+{
+ RegisterInfo* info1 = oatGetRegInfo(cUnit, reg1);
+ RegisterInfo* info2 = oatGetRegInfo(cUnit, reg2);
+ DCHECK(info1 && info2 && info1->pair && info2->pair &&
+ (info1->partner == info2->reg) &&
+ (info2->partner == info1->reg));
+ if ((info1->live && info1->dirty) || (info2->live && info2->dirty)) {
+ if (!(info1->isTemp && info2->isTemp)) {
+ /* Should not happen. If it does, there's a problem in evalLoc */
+ LOG(FATAL) << "Long half-temp, half-promoted";
+ }
+
+ info1->dirty = false;
+ info2->dirty = false;
+ if (SRegToVReg(cUnit, info2->sReg) < SRegToVReg(cUnit, info1->sReg))
+ info1 = info2;
+ int vReg = SRegToVReg(cUnit, info1->sReg);
+ oatFlushRegWideImpl(cUnit, rX86_SP, oatVRegOffset(cUnit, vReg),
+ info1->reg, info1->partner);
+ }
+}
+
+void oatFlushReg(CompilationUnit* cUnit, int reg)
+{
+ RegisterInfo* info = oatGetRegInfo(cUnit, reg);
+ if (info->live && info->dirty) {
+ info->dirty = false;
+ int vReg = SRegToVReg(cUnit, info->sReg);
+ oatFlushRegImpl(cUnit, rX86_SP, oatVRegOffset(cUnit, vReg), reg, kWord);
+ }
+}
+
+/* Give access to the target-dependent FP register encoding to common code */
+bool oatIsFpReg(int reg) {
+ return X86_FPREG(reg);
+}
+
+uint32_t oatFpRegMask() {
+ return X86_FP_REG_MASK;
+}
+
+/* Clobber all regs that might be used by an external C call */
+extern void oatClobberCalleeSave(CompilationUnit *cUnit)
+{
+ oatClobber(cUnit, rAX);
+ oatClobber(cUnit, rCX);
+ oatClobber(cUnit, rDX);
+}
+
+extern RegLocation oatGetReturnWideAlt(CompilationUnit* cUnit) {
+ RegLocation res = locCReturnWide();
+ CHECK(res.lowReg == rAX);
+ CHECK(res.highReg == rDX);
+ oatClobber(cUnit, rAX);
+ oatClobber(cUnit, rDX);
+ oatMarkInUse(cUnit, rAX);
+ oatMarkInUse(cUnit, rDX);
+ oatMarkPair(cUnit, res.lowReg, res.highReg);
+ return res;
+}
+
+extern RegLocation oatGetReturnAlt(CompilationUnit* cUnit)
+{
+ RegLocation res = locCReturn();
+ res.lowReg = rDX;
+ oatClobber(cUnit, rDX);
+ oatMarkInUse(cUnit, rDX);
+ return res;
+}
+
+extern RegisterInfo* oatGetRegInfo(CompilationUnit* cUnit, int reg)
+{
+ return X86_FPREG(reg) ? &cUnit->regPool->FPRegs[reg & X86_FP_REG_MASK]
+ : &cUnit->regPool->coreRegs[reg];
+}
+
+/* To be used when explicitly managing register use */
+extern void oatLockCallTemps(CompilationUnit* cUnit)
+{
+ oatLockTemp(cUnit, rX86_ARG0);
+ oatLockTemp(cUnit, rX86_ARG1);
+ oatLockTemp(cUnit, rX86_ARG2);
+ oatLockTemp(cUnit, rX86_ARG3);
+}
+
+/* To be used when explicitly managing register use */
+extern void oatFreeCallTemps(CompilationUnit* cUnit)
+{
+ oatFreeTemp(cUnit, rX86_ARG0);
+ oatFreeTemp(cUnit, rX86_ARG1);
+ oatFreeTemp(cUnit, rX86_ARG2);
+ oatFreeTemp(cUnit, rX86_ARG3);
+}
+
+/* Convert an instruction to a NOP */
+void oatNopLIR( LIR* lir)
+{
+ ((LIR*)lir)->flags.isNop = true;
+}
+
+/*
+ * Determine the initial instruction set to be used for this trace.
+ * Later components may decide to change this.
+ */
+InstructionSet oatInstructionSet()
+{
+ return kX86;
+}
+
+/* Architecture-specific initializations and checks go here */
+bool oatArchVariantInit(void)
+{
+ return true;
+}
+
+int dvmCompilerTargetOptHint(int key)
+{
+ int res;
+ switch (key) {
+ case kMaxHoistDistance:
+ res = 2;
+ break;
+ default:
+ LOG(FATAL) << "Unknown target optimization hint key: " << key;
+ }
+ return res;
+}
+
+void oatGenMemBarrier(CompilationUnit *cUnit, int /* barrierKind */)
+{
+#if ANDROID_SMP != 0
+ // TODO: optimize fences
+ newLIR0(cUnit, kX86Mfence);
+#endif
+}
+/*
+ * Alloc a pair of core registers, or a double. Low reg in low byte,
+ * high reg in next byte.
+ */
+int oatAllocTypedTempPair(CompilationUnit *cUnit, bool fpHint,
+ int regClass)
+{
+ int highReg;
+ int lowReg;
+ int res = 0;
+
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
+ lowReg = oatAllocTempDouble(cUnit);
+ highReg = lowReg + 1;
+ res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ return res;
+ }
+
+ lowReg = oatAllocTemp(cUnit);
+ highReg = oatAllocTemp(cUnit);
+ res = (lowReg & 0xff) | ((highReg & 0xff) << 8);
+ return res;
+}
+
+int oatAllocTypedTemp(CompilationUnit *cUnit, bool fpHint, int regClass) {
+ if (((regClass == kAnyReg) && fpHint) || (regClass == kFPReg)) {
+ return oatAllocTempFloat(cUnit);
+ }
+ return oatAllocTemp(cUnit);
+}
+
+void oatInitializeRegAlloc(CompilationUnit* cUnit) {
+ int numRegs = sizeof(coreRegs)/sizeof(*coreRegs);
+ int numReserved = sizeof(reservedRegs)/sizeof(*reservedRegs);
+ int numTemps = sizeof(coreTemps)/sizeof(*coreTemps);
+ int numFPRegs = sizeof(fpRegs)/sizeof(*fpRegs);
+ int numFPTemps = sizeof(fpTemps)/sizeof(*fpTemps);
+ RegisterPool *pool = (RegisterPool *)oatNew(cUnit, sizeof(*pool), true,
+ kAllocRegAlloc);
+ cUnit->regPool = pool;
+ pool->numCoreRegs = numRegs;
+ pool->coreRegs = (RegisterInfo *)
+ oatNew(cUnit, numRegs * sizeof(*cUnit->regPool->coreRegs), true,
+ kAllocRegAlloc);
+ pool->numFPRegs = numFPRegs;
+ pool->FPRegs = (RegisterInfo *)
+ oatNew(cUnit, numFPRegs * sizeof(*cUnit->regPool->FPRegs), true,
+ kAllocRegAlloc);
+ oatInitPool(pool->coreRegs, coreRegs, pool->numCoreRegs);
+ oatInitPool(pool->FPRegs, fpRegs, pool->numFPRegs);
+ // Keep special registers from being allocated
+ for (int i = 0; i < numReserved; i++) {
+ oatMarkInUse(cUnit, reservedRegs[i]);
+ }
+ // Mark temp regs - all others not in use can be used for promotion
+ for (int i = 0; i < numTemps; i++) {
+ oatMarkTemp(cUnit, coreTemps[i]);
+ }
+ for (int i = 0; i < numFPTemps; i++) {
+ oatMarkTemp(cUnit, fpTemps[i]);
+ }
+ // Construct the alias map.
+ cUnit->phiAliasMap = (int*)oatNew(cUnit, cUnit->numSSARegs *
+ sizeof(cUnit->phiAliasMap[0]), false,
+ kAllocDFInfo);
+ for (int i = 0; i < cUnit->numSSARegs; i++) {
+ cUnit->phiAliasMap[i] = i;
+ }
+ for (MIR* phi = cUnit->phiList; phi; phi = phi->meta.phiNext) {
+ int defReg = phi->ssaRep->defs[0];
+ for (int i = 0; i < phi->ssaRep->numUses; i++) {
+ for (int j = 0; j < cUnit->numSSARegs; j++) {
+ if (cUnit->phiAliasMap[j] == phi->ssaRep->uses[i]) {
+ cUnit->phiAliasMap[j] = defReg;
+ }
+ }
+ }
+ }
+}
+
+void freeRegLocTemps(CompilationUnit* cUnit, RegLocation rlKeep,
+ RegLocation rlFree)
+{
+ if ((rlFree.lowReg != rlKeep.lowReg) && (rlFree.lowReg != rlKeep.highReg) &&
+ (rlFree.highReg != rlKeep.lowReg) && (rlFree.highReg != rlKeep.highReg)) {
+ // No overlap, free both
+ oatFreeTemp(cUnit, rlFree.lowReg);
+ oatFreeTemp(cUnit, rlFree.highReg);
+ }
+}
+
+void spillCoreRegs(CompilationUnit* cUnit) {
+ if (cUnit->numCoreSpills == 0) {
+ return;
+ }
+ // Spill mask not including fake return address register
+ uint32_t mask = cUnit->coreSpillMask & ~(1 << rRET);
+ int offset = cUnit->frameSize - (4 * cUnit->numCoreSpills);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ storeWordDisp(cUnit, rX86_SP, offset, reg);
+ offset += 4;
+ }
+ }
+}
+
+void unSpillCoreRegs(CompilationUnit* cUnit) {
+ if (cUnit->numCoreSpills == 0) {
+ return;
+ }
+ // Spill mask not including fake return address register
+ uint32_t mask = cUnit->coreSpillMask & ~(1 << rRET);
+ int offset = cUnit->frameSize - (4 * cUnit->numCoreSpills);
+ for (int reg = 0; mask; mask >>= 1, reg++) {
+ if (mask & 0x1) {
+ loadWordDisp(cUnit, rX86_SP, offset, reg);
+ offset += 4;
+ }
+ }
+}
+
+/*
+ * Nop any unconditional branches that go to the next instruction.
+ * Note: new redundant branches may be inserted later, and we'll
+ * use a check in final instruction assembly to nop those out.
+ */
+void removeRedundantBranches(CompilationUnit* cUnit) {
+ LIR* thisLIR;
+
+ for (thisLIR = (LIR*) cUnit->firstLIRInsn;
+ thisLIR != (LIR*) cUnit->lastLIRInsn;
+ thisLIR = NEXT_LIR(thisLIR)) {
+
+ /* Branch to the next instruction */
+ if (thisLIR->opcode == kX86Jmp8 || thisLIR->opcode == kX86Jmp32) {
+ LIR* nextLIR = thisLIR;
+
+ while (true) {
+ nextLIR = NEXT_LIR(nextLIR);
+
+ /*
+ * Is the branch target the next instruction?
+ */
+ if (nextLIR == (LIR*) thisLIR->target) {
+ thisLIR->flags.isNop = true;
+ break;
+ }
+
+ /*
+ * Found real useful stuff between the branch and the target.
+ * Need to explicitly check the lastLIRInsn here because it
+ * might be the last real instruction.
+ */
+ if (!isPseudoOpcode(nextLIR->opcode) ||
+ (nextLIR = (LIR*) cUnit->lastLIRInsn))
+ break;
+ }
+ }
+ }
+}
+
+/* Common initialization routine for an architecture family */
+bool oatArchInit() {
+ int i;
+
+ for (i = 0; i < kX86Last; i++) {
+ if (EncodingMap[i].opcode != i) {
+ LOG(FATAL) << "Encoding order for " << EncodingMap[i].name
+ << " is wrong: expecting " << i << ", seeing "
+ << (int)EncodingMap[i].opcode;
+ }
+ }
+
+ return oatArchVariantInit();
+}
+
+// Not used in x86
+int loadHelper(CompilationUnit* cUnit, int offset)
+{
+ LOG(FATAL) << "Unexpected use of loadHelper in x86";
+ return INVALID_REG;
+}
+
+} // namespace art
diff --git a/src/compiler/codegen/x86/X86/Factory.cc b/src/compiler/codegen/x86/utility_x86.cc
similarity index 96%
rename from src/compiler/codegen/x86/X86/Factory.cc
rename to src/compiler/codegen/x86/utility_x86.cc
index 454b98d..418a6fe 100644
--- a/src/compiler/codegen/x86/X86/Factory.cc
+++ b/src/compiler/codegen/x86/utility_x86.cc
@@ -18,28 +18,6 @@
/* This file contains codegen for the X86 ISA */
-//FIXME: restore "static" when usage uncovered
-/*static*/ int coreRegs[] = {
- rAX, rCX, rDX, rBX, rX86_SP, rBP, rSI, rDI
-#ifdef TARGET_REX_SUPPORT
- r8, r9, r10, r11, r12, r13, r14, 15
-#endif
-};
-/*static*/ int reservedRegs[] = {rX86_SP};
-/*static*/ int coreTemps[] = {rAX, rCX, rDX, rBX};
-/*static*/ int fpRegs[] = {
- fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
-#ifdef TARGET_REX_SUPPORT
- fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
-#endif
-};
-/*static*/ int fpTemps[] = {
- fr0, fr1, fr2, fr3, fr4, fr5, fr6, fr7,
-#ifdef TARGET_REX_SUPPORT
- fr8, fr9, fr10, fr11, fr12, fr13, fr14, fr15
-#endif
-};
-
void genBarrier(CompilationUnit *cUnit);
void loadPair(CompilationUnit *cUnit, int base, int lowReg, int highReg);
LIR *loadWordDisp(CompilationUnit *cUnit, int rBase, int displacement,
diff --git a/src/compiler/codegen/x86/x86/ArchVariant.cc b/src/compiler/codegen/x86/x86/ArchVariant.cc
deleted file mode 100644
index 4b70202..0000000
--- a/src/compiler/codegen/x86/x86/ArchVariant.cc
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-namespace art {
-
-/*
- * This file is included by Codegen-x86.c, and implements architecture
- * variant-specific code.
- */
-
-/*
- * Determine the initial instruction set to be used for this trace.
- * Later components may decide to change this.
- */
-InstructionSet oatInstructionSet()
-{
- return kX86;
-}
-
-/* Architecture-specific initializations and checks go here */
-bool oatArchVariantInit(void)
-{
- return true;
-}
-
-int dvmCompilerTargetOptHint(int key)
-{
- int res;
- switch (key) {
- case kMaxHoistDistance:
- res = 2;
- break;
- default:
- LOG(FATAL) << "Unknown target optimization hint key: " << key;
- }
- return res;
-}
-
-void oatGenMemBarrier(CompilationUnit *cUnit, int /* barrierKind */)
-{
-#if ANDROID_SMP != 0
- // TODO: optimize fences
- newLIR0(cUnit, kX86Mfence);
-#endif
-}
-
-} // namespace art
diff --git a/src/compiler/codegen/x86/x86/Codegen.cc b/src/compiler/codegen/x86/x86/Codegen.cc
deleted file mode 100644
index 744a7d2..0000000
--- a/src/compiler/codegen/x86/x86/Codegen.cc
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Copyright (C) 2012 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define _CODEGEN_C
-
-#include "../../../Dalvik.h"
-#include "../../../CompilerInternals.h"
-#include "../X86LIR.h"
-#include "../../Ralloc.h"
-#include "../Codegen.h"
-
-/* X86 codegen building blocks */
-#include "../../CodegenUtil.cc"
-
-/* X86-specific factory utilities */
-#include "../X86/Factory.cc"
-/* Target independent factory utilities */
-#include "../../CodegenFactory.cc"
-/* X86-specific codegen routines */
-#include "../X86/Gen.cc"
-/* FP codegen routines */
-#include "../FP/X86FP.cc"
-/* Target independent gen routines */
-#include "../../GenCommon.cc"
-/* Shared invoke gen routines */
-#include "../../GenInvoke.cc"
-/* X86-specific factory utilities */
-#include "../ArchFactory.cc"
-
-/* X86-specific register allocation */
-#include "../X86/Ralloc.cc"
-
-/* Bitcode conversion */
-#include "../../MethodBitcode.cc"
-
-/* MIR2LIR dispatcher and architectural independent codegen routines */
-#include "../../MethodCodegenDriver.cc"
-
-/* Target-independent local optimizations */
-#include "../../LocalOptimizations.cc"
-
-/* Architecture manifest */
-#include "ArchVariant.cc"
diff --git a/src/compiler/codegen/x86/X86LIR.h b/src/compiler/codegen/x86/x86_lir.h
similarity index 99%
rename from src/compiler/codegen/x86/X86LIR.h
rename to src/compiler/codegen/x86/x86_lir.h
index bccd365..fe6d8cb 100644
--- a/src/compiler/codegen/x86/X86LIR.h
+++ b/src/compiler/codegen/x86/x86_lir.h
@@ -17,8 +17,8 @@
#ifndef ART_COMPILER_COMPILER_CODEGEN_X86_X86LIR_H_
#define ART_COMPILER_COMPILER_CODEGEN_X86_X86LIR_H_
-#include "../../Dalvik.h"
-#include "../../CompilerInternals.h"
+#include "../../dalvik.h"
+#include "../../compiler_internals.h"
namespace art {