| Ben Cheng | ba4fc8b | 2009-06-01 13:00:29 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2009 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "Dalvik.h" |
| 18 | #include "interp/InterpDefs.h" |
| 19 | #include "libdex/OpCode.h" |
| 20 | #include "dexdump/OpCodeNames.h" |
| 21 | #include "vm/compiler/CompilerInternals.h" |
| 22 | #include "Armv5teLIR.h" |
| 23 | #include "vm/mterp/common/FindInterface.h" |
| 24 | |
| 25 | /* Create the TemplateOpcode enum */ |
| 26 | #define JIT_TEMPLATE(X) TEMPLATE_##X, |
| 27 | typedef enum { |
| 28 | #include "../../template/armv5te/TemplateOpList.h" |
| 29 | /* |
| 30 | * For example, |
| 31 | * TEMPLATE_CMP_LONG, |
| 32 | * TEMPLATE_RETURN, |
| 33 | * ... |
| 34 | */ |
| 35 | TEMPLATE_LAST_MARK, |
| 36 | } TemplateOpCode; |
| 37 | #undef JIT_TEMPLATE |
| 38 | |
| 39 | /* Array holding the entry offset of each template relative to the first one */ |
| 40 | static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK]; |
| 41 | |
| 42 | /* Track exercised opcodes */ |
| 43 | static int opcodeCoverage[256]; |
| 44 | |
| 45 | /*****************************************************************************/ |
| 46 | |
| 47 | /* |
| 48 | * The following are building blocks to construct low-level IRs with 0 - 3 |
| 49 | * operands. |
| 50 | */ |
| 51 | static Armv5teLIR *newLIR0(CompilationUnit *cUnit, Armv5teOpCode opCode) |
| 52 | { |
| 53 | Armv5teLIR *insn = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 54 | assert(isPseudoOpCode(opCode) || EncodingMap[opCode].operands == 0); |
| 55 | insn->opCode = opCode; |
| 56 | dvmCompilerAppendLIR(cUnit, (LIR *) insn); |
| 57 | return insn; |
| 58 | } |
| 59 | |
| 60 | static Armv5teLIR *newLIR1(CompilationUnit *cUnit, Armv5teOpCode opCode, |
| 61 | int dest) |
| 62 | { |
| 63 | Armv5teLIR *insn = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 64 | assert(isPseudoOpCode(opCode) || EncodingMap[opCode].operands == 1); |
| 65 | insn->opCode = opCode; |
| 66 | insn->operands[0] = dest; |
| 67 | dvmCompilerAppendLIR(cUnit, (LIR *) insn); |
| 68 | return insn; |
| 69 | } |
| 70 | |
| 71 | static Armv5teLIR *newLIR2(CompilationUnit *cUnit, Armv5teOpCode opCode, |
| 72 | int dest, int src1) |
| 73 | { |
| 74 | Armv5teLIR *insn = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 75 | assert(isPseudoOpCode(opCode) || EncodingMap[opCode].operands == 2); |
| 76 | insn->opCode = opCode; |
| 77 | insn->operands[0] = dest; |
| 78 | insn->operands[1] = src1; |
| 79 | dvmCompilerAppendLIR(cUnit, (LIR *) insn); |
| 80 | return insn; |
| 81 | } |
| 82 | |
| 83 | static Armv5teLIR *newLIR3(CompilationUnit *cUnit, Armv5teOpCode opCode, |
| 84 | int dest, int src1, int src2) |
| 85 | { |
| 86 | Armv5teLIR *insn = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 87 | assert(isPseudoOpCode(opCode) || EncodingMap[opCode].operands == 3); |
| 88 | insn->opCode = opCode; |
| 89 | insn->operands[0] = dest; |
| 90 | insn->operands[1] = src1; |
| 91 | insn->operands[2] = src2; |
| 92 | dvmCompilerAppendLIR(cUnit, (LIR *) insn); |
| 93 | return insn; |
| 94 | } |
| 95 | |
| 96 | static Armv5teLIR *newLIR23(CompilationUnit *cUnit, Armv5teOpCode opCode, |
| 97 | int srcdest, int src2) |
| 98 | { |
| 99 | assert(!isPseudoOpCode(opCode)); |
| 100 | if (EncodingMap[opCode].operands==2) |
| 101 | return newLIR2(cUnit, opCode, srcdest, src2); |
| 102 | else |
| 103 | return newLIR3(cUnit, opCode, srcdest, srcdest, src2); |
| 104 | } |
| 105 | |
| 106 | /*****************************************************************************/ |
| 107 | |
| 108 | /* |
| 109 | * The following are building blocks to insert constants into the pool or |
| 110 | * instruction streams. |
| 111 | */ |
| 112 | |
| 113 | /* Add a 32-bit constant either in the constant pool or mixed with code */ |
| 114 | static Armv5teLIR *addWordData(CompilationUnit *cUnit, int value, bool inPlace) |
| 115 | { |
| 116 | /* Add the constant to the literal pool */ |
| 117 | if (!inPlace) { |
| 118 | Armv5teLIR *newValue = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 119 | newValue->operands[0] = value; |
| 120 | newValue->generic.next = cUnit->wordList; |
| 121 | cUnit->wordList = (LIR *) newValue; |
| 122 | return newValue; |
| 123 | } else { |
| 124 | /* Add the constant in the middle of code stream */ |
| 125 | newLIR1(cUnit, ARMV5TE_16BIT_DATA, (value & 0xffff)); |
| 126 | newLIR1(cUnit, ARMV5TE_16BIT_DATA, (value >> 16)); |
| 127 | } |
| 128 | return NULL; |
| 129 | } |
| 130 | |
| 131 | /* |
| 132 | * Search the existing constants in the literal pool for an exact or close match |
| 133 | * within specified delta (greater or equal to 0). |
| 134 | */ |
| 135 | static Armv5teLIR *scanLiteralPool(CompilationUnit *cUnit, int value, |
| 136 | unsigned int delta) |
| 137 | { |
| 138 | LIR *dataTarget = cUnit->wordList; |
| 139 | while (dataTarget) { |
| 140 | if (((unsigned) (value - ((Armv5teLIR *) dataTarget)->operands[0])) <= |
| 141 | delta) |
| 142 | return (Armv5teLIR *) dataTarget; |
| 143 | dataTarget = dataTarget->next; |
| 144 | } |
| 145 | return NULL; |
| 146 | } |
| 147 | |
| 148 | /* |
| 149 | * Load a immediate using a shortcut if possible; otherwise |
| 150 | * grab from the per-translation literal pool |
| 151 | */ |
| 152 | void loadConstant(CompilationUnit *cUnit, int rDest, int value) |
| 153 | { |
| 154 | /* See if the value can be constructed cheaply */ |
| 155 | if ((value >= 0) && (value <= 255)) { |
| 156 | newLIR2(cUnit, ARMV5TE_MOV_IMM, rDest, value); |
| 157 | return; |
| 158 | } else if ((value & 0xFFFFFF00) == 0xFFFFFF00) { |
| 159 | newLIR2(cUnit, ARMV5TE_MOV_IMM, rDest, ~value); |
| 160 | newLIR2(cUnit, ARMV5TE_MVN, rDest, rDest); |
| 161 | return; |
| 162 | } |
| 163 | /* No shortcut - go ahead and use literal pool */ |
| 164 | Armv5teLIR *dataTarget = scanLiteralPool(cUnit, value, 255); |
| 165 | if (dataTarget == NULL) { |
| 166 | dataTarget = addWordData(cUnit, value, false); |
| 167 | } |
| 168 | Armv5teLIR *loadPcRel = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 169 | loadPcRel->opCode = ARMV5TE_LDR_PC_REL; |
| 170 | loadPcRel->generic.target = (LIR *) dataTarget; |
| 171 | loadPcRel->operands[0] = rDest; |
| 172 | dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); |
| 173 | |
| 174 | /* |
| 175 | * To save space in the constant pool, we use the ADD_RRI8 instruction to |
| 176 | * add up to 255 to an existing constant value. |
| 177 | */ |
| 178 | if (dataTarget->operands[0] != value) { |
| 179 | newLIR2(cUnit, ARMV5TE_ADD_RI8, rDest, value - dataTarget->operands[0]); |
| 180 | } |
| 181 | } |
| 182 | |
| 183 | /* Export the Dalvik PC assicated with an instruction to the StackSave area */ |
| 184 | static void genExportPC(CompilationUnit *cUnit, MIR *mir, int rDPC, int rAddr) |
| 185 | { |
| 186 | int offset = offsetof(StackSaveArea, xtra.currentPc); |
| 187 | loadConstant(cUnit, rDPC, (int) (cUnit->method->insns + mir->offset)); |
| 188 | newLIR2(cUnit, ARMV5TE_MOV_RR, rAddr, rFP); |
| 189 | newLIR2(cUnit, ARMV5TE_SUB_RI8, rAddr, sizeof(StackSaveArea) - offset); |
| 190 | newLIR3(cUnit, ARMV5TE_STR_RRI5, rDPC, rAddr, 0); |
| 191 | } |
| 192 | |
| 193 | /* Generate conditional branch instructions */ |
| 194 | static void genConditionalBranch(CompilationUnit *cUnit, |
| 195 | Armv5teConditionCode cond, |
| 196 | Armv5teLIR *target) |
| 197 | { |
| 198 | Armv5teLIR *branch = newLIR2(cUnit, ARMV5TE_B_COND, 0, cond); |
| 199 | branch->generic.target = (LIR *) target; |
| 200 | } |
| 201 | |
| 202 | /* Generate unconditional branch instructions */ |
| 203 | static void genUnconditionalBranch(CompilationUnit *cUnit, Armv5teLIR *target) |
| 204 | { |
| 205 | Armv5teLIR *branch = newLIR0(cUnit, ARMV5TE_B_UNCOND); |
| 206 | branch->generic.target = (LIR *) target; |
| 207 | } |
| 208 | |
| 209 | #define USE_IN_CACHE_HANDLER 1 |
| 210 | |
| 211 | /* |
| 212 | * Jump to the out-of-line handler in ARM mode to finish executing the |
| 213 | * remaining of more complex instructions. |
| 214 | */ |
| 215 | static void genDispatchToHandler(CompilationUnit *cUnit, TemplateOpCode opCode) |
| 216 | { |
| 217 | #if USE_IN_CACHE_HANDLER |
| 218 | /* |
| 219 | * NOTE - In practice BLX only needs one operand, but since the assembler |
| 220 | * may abort itself and retry due to other out-of-range conditions we |
| 221 | * cannot really use operand[0] to store the absolute target address since |
| 222 | * it may get clobbered by the final relative offset. Therefore, |
| 223 | * we fake BLX_1 is a two operand instruction and the absolute target |
| 224 | * address is stored in operand[1]. |
| 225 | */ |
| 226 | newLIR2(cUnit, ARMV5TE_BLX_1, |
| 227 | (int) gDvmJit.codeCache + templateEntryOffsets[opCode], |
| 228 | (int) gDvmJit.codeCache + templateEntryOffsets[opCode]); |
| 229 | newLIR2(cUnit, ARMV5TE_BLX_2, |
| 230 | (int) gDvmJit.codeCache + templateEntryOffsets[opCode], |
| 231 | (int) gDvmJit.codeCache + templateEntryOffsets[opCode]); |
| 232 | #else |
| 233 | /* |
| 234 | * In case we want to access the statically compiled handlers for |
| 235 | * debugging purposes, define USE_IN_CACHE_HANDLER to 0 |
| 236 | */ |
| 237 | void *templatePtr; |
| 238 | |
| 239 | #define JIT_TEMPLATE(X) extern void dvmCompiler_TEMPLATE_##X(); |
| 240 | #include "../../template/armv5te/TemplateOpList.h" |
| 241 | #undef JIT_TEMPLATE |
| 242 | switch (opCode) { |
| 243 | #define JIT_TEMPLATE(X) \ |
| 244 | case TEMPLATE_##X: { templatePtr = dvmCompiler_TEMPLATE_##X; break; } |
| 245 | #include "../../template/armv5te/TemplateOpList.h" |
| 246 | #undef JIT_TEMPLATE |
| 247 | default: templatePtr = NULL; |
| 248 | } |
| 249 | loadConstant(cUnit, r7, (int) templatePtr); |
| 250 | newLIR1(cUnit, ARMV5TE_BLX_R, r7); |
| 251 | #endif |
| 252 | } |
| 253 | |
| 254 | /* Perform the actual operation for OP_RETURN_* */ |
| 255 | static void genReturnCommon(CompilationUnit *cUnit, MIR *mir) |
| 256 | { |
| 257 | genDispatchToHandler(cUnit, TEMPLATE_RETURN); |
| 258 | #if defined(INVOKE_STATS) |
| 259 | gDvmJit.jitReturn++; |
| 260 | #endif |
| 261 | int dPC = (int) (cUnit->method->insns + mir->offset); |
| 262 | Armv5teLIR *branch = newLIR0(cUnit, ARMV5TE_B_UNCOND); |
| 263 | /* Set up the place holder to reconstruct this Dalvik PC */ |
| 264 | Armv5teLIR *pcrLabel = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 265 | pcrLabel->opCode = ARMV5TE_PSEUDO_PC_RECONSTRUCTION_CELL; |
| 266 | pcrLabel->operands[0] = dPC; |
| 267 | pcrLabel->operands[1] = mir->offset; |
| 268 | /* Insert the place holder to the growable list */ |
| 269 | dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel); |
| 270 | /* Branch to the PC reconstruction code */ |
| 271 | branch->generic.target = (LIR *) pcrLabel; |
| 272 | } |
| 273 | |
| 274 | /* |
| 275 | * Load a pair of values of rFP[src..src+1] and store them into rDestLo and |
| 276 | * rDestHi |
| 277 | */ |
| 278 | static void loadValuePair(CompilationUnit *cUnit, int vSrc, int rDestLo, |
| 279 | int rDestHi) |
| 280 | { |
| 281 | /* Use reg + imm5*4 to load the values if possible */ |
| 282 | if (vSrc <= 30) { |
| 283 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, rDestLo, rFP, vSrc); |
| 284 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, rDestHi, rFP, vSrc+1); |
| 285 | } else { |
| 286 | if (vSrc <= 64) { |
| 287 | /* Sneak 4 into the base address first */ |
| 288 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, rDestLo, rFP, 4); |
| 289 | newLIR2(cUnit, ARMV5TE_ADD_RI8, rDestHi, (vSrc-1)*4); |
| 290 | } else { |
| 291 | /* Offset too far from rFP */ |
| 292 | loadConstant(cUnit, rDestLo, vSrc*4); |
| 293 | newLIR3(cUnit, ARMV5TE_ADD_RRR, rDestLo, rFP, rDestLo); |
| 294 | } |
| 295 | assert(rDestLo != rDestHi); |
| 296 | newLIR2(cUnit, ARMV5TE_LDMIA, rDestLo, (1<<rDestLo) | (1<<(rDestHi))); |
| 297 | } |
| 298 | } |
| 299 | |
| 300 | /* |
| 301 | * Store a pair of values of rSrc and rSrc+1 and store them into vDest and |
| 302 | * vDest+1 |
| 303 | */ |
| 304 | static void storeValuePair(CompilationUnit *cUnit, int rSrcLo, int rSrcHi, |
| 305 | int vDest, int rScratch) |
| 306 | { |
| 307 | /* Use reg + imm5*4 to store the values if possible */ |
| 308 | if (vDest <= 30) { |
| 309 | newLIR3(cUnit, ARMV5TE_STR_RRI5, rSrcLo, rFP, vDest); |
| 310 | newLIR3(cUnit, ARMV5TE_STR_RRI5, rSrcHi, rFP, vDest+1); |
| 311 | } else { |
| 312 | if (vDest <= 64) { |
| 313 | /* Sneak 4 into the base address first */ |
| 314 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, rScratch, rFP, 4); |
| 315 | newLIR2(cUnit, ARMV5TE_ADD_RI8, rScratch, (vDest-1)*4); |
| 316 | } else { |
| 317 | /* Offset too far from rFP */ |
| 318 | loadConstant(cUnit, rScratch, vDest*4); |
| 319 | newLIR3(cUnit, ARMV5TE_ADD_RRR, rScratch, rFP, rScratch); |
| 320 | } |
| 321 | assert(rSrcLo != rSrcHi); |
| 322 | newLIR2(cUnit, ARMV5TE_STMIA, rScratch, (1<<rSrcLo) | (1 << (rSrcHi))); |
| 323 | } |
| 324 | } |
| 325 | |
| 326 | /* Load the address of a Dalvik register on the frame */ |
| 327 | static void loadValueAddress(CompilationUnit *cUnit, int vSrc, int rDest) |
| 328 | { |
| 329 | /* RRI3 can add up to 7 */ |
| 330 | if (vSrc <= 1) { |
| 331 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, rDest, rFP, vSrc*4); |
| 332 | } else if (vSrc <= 64) { |
| 333 | /* Sneak 4 into the base address first */ |
| 334 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, rDest, rFP, 4); |
| 335 | newLIR2(cUnit, ARMV5TE_ADD_RI8, rDest, (vSrc-1)*4); |
| 336 | } else { |
| 337 | loadConstant(cUnit, rDest, vSrc*4); |
| 338 | newLIR3(cUnit, ARMV5TE_ADD_RRR, rDest, rFP, rDest); |
| 339 | } |
| 340 | } |
| 341 | |
| 342 | |
| 343 | /* Load a single value from rFP[src] and store them into rDest */ |
| 344 | static void loadValue(CompilationUnit *cUnit, int vSrc, int rDest) |
| 345 | { |
| 346 | /* Use reg + imm5*4 to load the value if possible */ |
| 347 | if (vSrc <= 31) { |
| 348 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, rDest, rFP, vSrc); |
| 349 | } else { |
| 350 | loadConstant(cUnit, rDest, vSrc*4); |
| 351 | newLIR3(cUnit, ARMV5TE_LDR_RRR, rDest, rFP, rDest); |
| 352 | } |
| 353 | } |
| 354 | |
| 355 | /* Store a value from rSrc to vDest */ |
| 356 | static void storeValue(CompilationUnit *cUnit, int rSrc, int vDest, |
| 357 | int rScratch) |
| 358 | { |
| 359 | /* Use reg + imm5*4 to store the value if possible */ |
| 360 | if (vDest <= 31) { |
| 361 | newLIR3(cUnit, ARMV5TE_STR_RRI5, rSrc, rFP, vDest); |
| 362 | } else { |
| 363 | loadConstant(cUnit, rScratch, vDest*4); |
| 364 | newLIR3(cUnit, ARMV5TE_STR_RRR, rSrc, rFP, rScratch); |
| 365 | } |
| 366 | } |
| 367 | |
| 368 | /* Calculate the address of rFP+vSrc*4 */ |
| 369 | static void calculateValueAddress(CompilationUnit *cUnit, int vSrc, int rDest) |
| 370 | { |
| 371 | /* Use add rd, rs, imm_3 */ |
| 372 | if (vSrc <= 1) { |
| 373 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, rDest, rFP, vSrc*4); |
| 374 | } else if (vSrc <= 64) { |
| 375 | /* Use add rd, imm_8 */ |
| 376 | /* Sneak in 4 above rFP to cover one more register offset (ie v64) */ |
| 377 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, rDest, rFP, 4); |
| 378 | newLIR2(cUnit, ARMV5TE_ADD_RI8, rDest, (vSrc-1)*4); |
| 379 | } else { |
| 380 | /* Load offset from the constant pool */ |
| 381 | loadConstant(cUnit, rDest, vSrc*4); |
| 382 | newLIR3(cUnit, ARMV5TE_ADD_RRR, rDest, rFP, rDest); |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | /* |
| 387 | * Perform a binary operation on 64-bit operands and leave the results in the |
| 388 | * r0/r1 pair. |
| 389 | */ |
| 390 | static void genBinaryOpWide(CompilationUnit *cUnit, int vDest, |
| 391 | Armv5teOpCode preinst, Armv5teOpCode inst) |
| 392 | { |
| 393 | newLIR23(cUnit, preinst, r0, r2); |
| 394 | newLIR23(cUnit, inst, r1, r3); |
| 395 | storeValuePair(cUnit, r0, r1, vDest, r2); |
| 396 | } |
| 397 | |
| 398 | /* Perform a binary operation on 32-bit operands and leave the results in r0. */ |
| 399 | static void genBinaryOp(CompilationUnit *cUnit, int vDest, Armv5teOpCode inst) |
| 400 | { |
| 401 | newLIR23(cUnit, inst, r0, r1); |
| 402 | storeValue(cUnit, r0, vDest, r1); |
| 403 | } |
| 404 | |
| 405 | /* Create the PC reconstruction slot if not already done */ |
| 406 | static inline Armv5teLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset, |
| 407 | Armv5teLIR *branch, |
| 408 | Armv5teLIR *pcrLabel) |
| 409 | { |
| 410 | /* Set up the place holder to reconstruct this Dalvik PC */ |
| 411 | if (pcrLabel == NULL) { |
| 412 | int dPC = (int) (cUnit->method->insns + dOffset); |
| 413 | pcrLabel = dvmCompilerNew(sizeof(Armv5teLIR), true); |
| 414 | pcrLabel->opCode = ARMV5TE_PSEUDO_PC_RECONSTRUCTION_CELL; |
| 415 | pcrLabel->operands[0] = dPC; |
| 416 | pcrLabel->operands[1] = dOffset; |
| 417 | /* Insert the place holder to the growable list */ |
| 418 | dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel); |
| 419 | } |
| 420 | /* Branch to the PC reconstruction code */ |
| 421 | branch->generic.target = (LIR *) pcrLabel; |
| 422 | return pcrLabel; |
| 423 | } |
| 424 | |
| 425 | /* |
| 426 | * Perform a "reg cmp imm" operation and jump to the PCR region if condition |
| 427 | * satisfies. |
| 428 | */ |
| 429 | static inline Armv5teLIR *genRegImmCheck(CompilationUnit *cUnit, |
| 430 | Armv5teConditionCode cond, int reg, |
| 431 | int checkValue, int dOffset, |
| 432 | Armv5teLIR *pcrLabel) |
| 433 | { |
| 434 | newLIR2(cUnit, ARMV5TE_CMP_RI8, reg, checkValue); |
| 435 | Armv5teLIR *branch = newLIR2(cUnit, ARMV5TE_B_COND, 0, cond); |
| 436 | return genCheckCommon(cUnit, dOffset, branch, pcrLabel); |
| 437 | } |
| 438 | |
| 439 | /* |
| 440 | * Perform a "reg cmp reg" operation and jump to the PCR region if condition |
| 441 | * satisfies. |
| 442 | */ |
| 443 | static inline Armv5teLIR *inertRegRegCheck(CompilationUnit *cUnit, |
| 444 | Armv5teConditionCode cond, |
| 445 | int reg1, int reg2, int dOffset, |
| 446 | Armv5teLIR *pcrLabel) |
| 447 | { |
| 448 | newLIR2(cUnit, ARMV5TE_CMP_RR, reg1, reg2); |
| 449 | Armv5teLIR *branch = newLIR2(cUnit, ARMV5TE_B_COND, 0, cond); |
| 450 | return genCheckCommon(cUnit, dOffset, branch, pcrLabel); |
| 451 | } |
| 452 | |
| 453 | /* Perform null-check on a register */ |
| 454 | static Armv5teLIR *genNullCheck(CompilationUnit *cUnit, int reg, int dOffset, |
| 455 | Armv5teLIR *pcrLabel) |
| 456 | { |
| 457 | return genRegImmCheck(cUnit, ARM_COND_EQ, reg, 0, dOffset, pcrLabel); |
| 458 | } |
| 459 | |
| 460 | /* Perform bound check on two registers */ |
| 461 | static Armv5teLIR *genBoundsCheck(CompilationUnit *cUnit, int rIndex, |
| 462 | int rBound, int dOffset, Armv5teLIR *pcrLabel) |
| 463 | { |
| 464 | return inertRegRegCheck(cUnit, ARM_COND_CS, rIndex, rBound, dOffset, |
| 465 | pcrLabel); |
| 466 | } |
| 467 | |
| 468 | /* Generate a unconditional branch to go to the interpreter */ |
| 469 | static inline Armv5teLIR *genTrap(CompilationUnit *cUnit, int dOffset, |
| 470 | Armv5teLIR *pcrLabel) |
| 471 | { |
| 472 | Armv5teLIR *branch = newLIR0(cUnit, ARMV5TE_B_UNCOND); |
| 473 | return genCheckCommon(cUnit, dOffset, branch, pcrLabel); |
| 474 | } |
| 475 | |
| 476 | /* Load a wide field from an object instance */ |
| 477 | static void genIGetWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset) |
| 478 | { |
| 479 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 480 | |
| 481 | loadValue(cUnit, dInsn->vB, r2); |
| 482 | loadConstant(cUnit, r3, fieldOffset); |
| 483 | genNullCheck(cUnit, r2, mir->offset, NULL); /* null object? */ |
| 484 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r2, r2, r3); |
| 485 | newLIR2(cUnit, ARMV5TE_LDMIA, r2, (1<<r0 | 1<<r1)); |
| 486 | storeValuePair(cUnit, r0, r1, dInsn->vA, r3); |
| 487 | } |
| 488 | |
| 489 | /* Store a wide field to an object instance */ |
| 490 | static void genIPutWide(CompilationUnit *cUnit, MIR *mir, int fieldOffset) |
| 491 | { |
| 492 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 493 | |
| 494 | loadValue(cUnit, dInsn->vB, r2); |
| 495 | loadValuePair(cUnit, dInsn->vA, r0, r1); |
| 496 | loadConstant(cUnit, r3, fieldOffset); |
| 497 | genNullCheck(cUnit, r2, mir->offset, NULL); /* null object? */ |
| 498 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r2, r2, r3); |
| 499 | newLIR2(cUnit, ARMV5TE_STMIA, r2, (1<<r0 | 1<<r1)); |
| 500 | } |
| 501 | |
| 502 | /* |
| 503 | * Load a field from an object instance |
| 504 | * |
| 505 | * Inst should be one of: |
| 506 | * ARMV5TE_LDR_RRR |
| 507 | * ARMV5TE_LDRB_RRR |
| 508 | * ARMV5TE_LDRH_RRR |
| 509 | * ARMV5TE_LDRSB_RRR |
| 510 | * ARMV5TE_LDRSH_RRR |
| 511 | */ |
| 512 | static void genIGet(CompilationUnit *cUnit, MIR *mir, Armv5teOpCode inst, |
| 513 | int fieldOffset) |
| 514 | { |
| 515 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 516 | |
| 517 | /* TUNING: write a utility routine to load via base + constant offset */ |
| 518 | loadValue(cUnit, dInsn->vB, r0); |
| 519 | loadConstant(cUnit, r1, fieldOffset); |
| 520 | genNullCheck(cUnit, r0, mir->offset, NULL); /* null object? */ |
| 521 | newLIR3(cUnit, inst, r0, r0, r1); |
| 522 | storeValue(cUnit, r0, dInsn->vA, r1); |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | * Store a field to an object instance |
| 527 | * |
| 528 | * Inst should be one of: |
| 529 | * ARMV5TE_STR_RRR |
| 530 | * ARMV5TE_STRB_RRR |
| 531 | * ARMV5TE_STRH_RRR |
| 532 | */ |
| 533 | static void genIPut(CompilationUnit *cUnit, MIR *mir, Armv5teOpCode inst, |
| 534 | int fieldOffset) |
| 535 | { |
| 536 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 537 | |
| 538 | /* TUNING: write a utility routine to load via base + constant offset */ |
| 539 | loadValue(cUnit, dInsn->vB, r2); |
| 540 | loadConstant(cUnit, r1, fieldOffset); |
| 541 | loadValue(cUnit, dInsn->vA, r0); |
| 542 | genNullCheck(cUnit, r2, mir->offset, NULL); /* null object? */ |
| 543 | newLIR3(cUnit, inst, r0, r2, r1); |
| 544 | } |
| 545 | |
| 546 | |
| 547 | /* TODO: This should probably be done as an out-of-line instruction handler. */ |
| 548 | |
| 549 | /* |
| 550 | * Generate array load |
| 551 | * |
| 552 | * Inst should be one of: |
| 553 | * ARMV5TE_LDR_RRR |
| 554 | * ARMV5TE_LDRB_RRR |
| 555 | * ARMV5TE_LDRH_RRR |
| 556 | * ARMV5TE_LDRSB_RRR |
| 557 | * ARMV5TE_LDRSH_RRR |
| 558 | */ |
| 559 | static void genArrayGet(CompilationUnit *cUnit, MIR *mir, Armv5teOpCode inst, |
| 560 | int vArray, int vIndex, int vDest, int scale) |
| 561 | { |
| 562 | int lenOffset = offsetof(ArrayObject, length); |
| 563 | int dataOffset = offsetof(ArrayObject, contents); |
| 564 | |
| 565 | loadValue(cUnit, vArray, r2); |
| 566 | loadValue(cUnit, vIndex, r3); |
| 567 | |
| 568 | /* null object? */ |
| 569 | Armv5teLIR * pcrLabel = genNullCheck(cUnit, r2, mir->offset, NULL); |
| 570 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r2, lenOffset >> 2); /* Get len */ |
| 571 | newLIR2(cUnit, ARMV5TE_ADD_RI8, r2, dataOffset); /* r2 -> array data */ |
| 572 | genBoundsCheck(cUnit, r3, r0, mir->offset, pcrLabel); |
| 573 | if (scale) { |
| 574 | newLIR3(cUnit, ARMV5TE_LSL, r3, r3, scale); |
| 575 | } |
| 576 | if (scale==3) { |
| 577 | newLIR3(cUnit, inst, r0, r2, r3); |
| 578 | newLIR2(cUnit, ARMV5TE_ADD_RI8, r2, 4); |
| 579 | newLIR3(cUnit, inst, r1, r2, r3); |
| 580 | storeValuePair(cUnit, r0, r1, vDest, r3); |
| 581 | } else { |
| 582 | newLIR3(cUnit, inst, r0, r2, r3); |
| 583 | storeValue(cUnit, r0, vDest, r3); |
| 584 | } |
| 585 | } |
| 586 | |
| 587 | /* TODO: This should probably be done as an out-of-line instruction handler. */ |
| 588 | |
| 589 | /* |
| 590 | * Generate array store |
| 591 | * |
| 592 | * Inst should be one of: |
| 593 | * ARMV5TE_STR_RRR |
| 594 | * ARMV5TE_STRB_RRR |
| 595 | * ARMV5TE_STRH_RRR |
| 596 | */ |
| 597 | static void genArrayPut(CompilationUnit *cUnit, MIR *mir, Armv5teOpCode inst, |
| 598 | int vArray, int vIndex, int vSrc, int scale) |
| 599 | { |
| 600 | int lenOffset = offsetof(ArrayObject, length); |
| 601 | int dataOffset = offsetof(ArrayObject, contents); |
| 602 | |
| 603 | loadValue(cUnit, vArray, r2); |
| 604 | loadValue(cUnit, vIndex, r3); |
| 605 | genNullCheck(cUnit, r2, mir->offset, NULL); /* null object? */ |
| 606 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r2, lenOffset >> 2); /* Get len */ |
| 607 | newLIR2(cUnit, ARMV5TE_ADD_RI8, r2, dataOffset); /* r2 -> array data */ |
| 608 | genBoundsCheck(cUnit, r3, r0, mir->offset, NULL); |
| 609 | /* at this point, r2 points to array, r3 is unscaled index */ |
| 610 | if (scale==3) { |
| 611 | loadValuePair(cUnit, vSrc, r0, r1); |
| 612 | } else { |
| 613 | loadValue(cUnit, vSrc, r0); |
| 614 | } |
| 615 | if (scale) { |
| 616 | newLIR3(cUnit, ARMV5TE_LSL, r3, r3, scale); |
| 617 | } |
| 618 | /* |
| 619 | * at this point, r2 points to array, r3 is scaled index, and r0[r1] is |
| 620 | * data |
| 621 | */ |
| 622 | if (scale==3) { |
| 623 | newLIR3(cUnit, inst, r0, r2, r3); |
| 624 | newLIR2(cUnit, ARMV5TE_ADD_RI8, r2, 4); |
| 625 | newLIR3(cUnit, inst, r1, r2, r3); |
| 626 | } else { |
| 627 | newLIR3(cUnit, inst, r0, r2, r3); |
| 628 | } |
| 629 | } |
| 630 | |
| 631 | static bool genShiftOpLong(CompilationUnit *cUnit, MIR *mir, int vDest, |
| 632 | int vSrc1, int vShift) |
| 633 | { |
| 634 | loadValuePair(cUnit, vSrc1, r0, r1); |
| 635 | loadValue(cUnit, vShift, r2); |
| 636 | switch( mir->dalvikInsn.opCode) { |
| 637 | case OP_SHL_LONG: |
| 638 | case OP_SHL_LONG_2ADDR: |
| 639 | genDispatchToHandler(cUnit, TEMPLATE_SHL_LONG); |
| 640 | break; |
| 641 | case OP_SHR_LONG: |
| 642 | case OP_SHR_LONG_2ADDR: |
| 643 | genDispatchToHandler(cUnit, TEMPLATE_SHR_LONG); |
| 644 | break; |
| 645 | case OP_USHR_LONG: |
| 646 | case OP_USHR_LONG_2ADDR: |
| 647 | genDispatchToHandler(cUnit, TEMPLATE_USHR_LONG); |
| 648 | break; |
| 649 | default: |
| 650 | return true; |
| 651 | } |
| 652 | storeValuePair(cUnit, r0, r1, vDest, r2); |
| 653 | return false; |
| 654 | } |
| 655 | |
| 656 | static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, int vDest, |
| 657 | int vSrc1, int vSrc2) |
| 658 | { |
| 659 | void* funct; |
| 660 | /* TODO: use a proper include file to define these */ |
| 661 | float __aeabi_fadd(float a, float b); |
| 662 | float __aeabi_fsub(float a, float b); |
| 663 | float __aeabi_fdiv(float a, float b); |
| 664 | float __aeabi_fmul(float a, float b); |
| 665 | float fmodf(float a, float b); |
| 666 | |
| 667 | switch (mir->dalvikInsn.opCode) { |
| 668 | case OP_ADD_FLOAT_2ADDR: |
| 669 | case OP_ADD_FLOAT: |
| 670 | funct = (void*) __aeabi_fadd; |
| 671 | break; |
| 672 | case OP_SUB_FLOAT_2ADDR: |
| 673 | case OP_SUB_FLOAT: |
| 674 | funct = (void*) __aeabi_fsub; |
| 675 | break; |
| 676 | case OP_DIV_FLOAT_2ADDR: |
| 677 | case OP_DIV_FLOAT: |
| 678 | funct = (void*) __aeabi_fdiv; |
| 679 | break; |
| 680 | case OP_MUL_FLOAT_2ADDR: |
| 681 | case OP_MUL_FLOAT: |
| 682 | funct = (void*) __aeabi_fmul; |
| 683 | break; |
| 684 | case OP_REM_FLOAT_2ADDR: |
| 685 | case OP_REM_FLOAT: |
| 686 | funct = (void*) fmodf; |
| 687 | break; |
| 688 | case OP_NEG_FLOAT: { |
| 689 | loadValue(cUnit, vSrc2, r0); |
| 690 | loadConstant(cUnit, r1, 0x80000000); |
| 691 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r0, r0, r1); |
| 692 | storeValue(cUnit, r0, vDest, r1); |
| 693 | return false; |
| 694 | } |
| 695 | default: |
| 696 | return true; |
| 697 | } |
| 698 | loadConstant(cUnit, r2, (int)funct); |
| 699 | loadValue(cUnit, vSrc1, r0); |
| 700 | loadValue(cUnit, vSrc2, r1); |
| 701 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 702 | storeValue(cUnit, r0, vDest, r1); |
| 703 | return false; |
| 704 | } |
| 705 | |
| 706 | static bool genArithOpDouble(CompilationUnit *cUnit, MIR *mir, int vDest, |
| 707 | int vSrc1, int vSrc2) |
| 708 | { |
| 709 | void* funct; |
| 710 | /* TODO: use a proper include file to define these */ |
| 711 | double __aeabi_dadd(double a, double b); |
| 712 | double __aeabi_dsub(double a, double b); |
| 713 | double __aeabi_ddiv(double a, double b); |
| 714 | double __aeabi_dmul(double a, double b); |
| 715 | double fmod(double a, double b); |
| 716 | |
| 717 | switch (mir->dalvikInsn.opCode) { |
| 718 | case OP_ADD_DOUBLE_2ADDR: |
| 719 | case OP_ADD_DOUBLE: |
| 720 | funct = (void*) __aeabi_dadd; |
| 721 | break; |
| 722 | case OP_SUB_DOUBLE_2ADDR: |
| 723 | case OP_SUB_DOUBLE: |
| 724 | funct = (void*) __aeabi_dsub; |
| 725 | break; |
| 726 | case OP_DIV_DOUBLE_2ADDR: |
| 727 | case OP_DIV_DOUBLE: |
| 728 | funct = (void*) __aeabi_ddiv; |
| 729 | break; |
| 730 | case OP_MUL_DOUBLE_2ADDR: |
| 731 | case OP_MUL_DOUBLE: |
| 732 | funct = (void*) __aeabi_dmul; |
| 733 | break; |
| 734 | case OP_REM_DOUBLE_2ADDR: |
| 735 | case OP_REM_DOUBLE: |
| 736 | funct = (void*) fmod; |
| 737 | break; |
| 738 | case OP_NEG_DOUBLE: { |
| 739 | loadValuePair(cUnit, vSrc2, r0, r1); |
| 740 | loadConstant(cUnit, r2, 0x80000000); |
| 741 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r1, r1, r2); |
| 742 | storeValuePair(cUnit, r0, r1, vDest, r2); |
| 743 | return false; |
| 744 | } |
| 745 | default: |
| 746 | return true; |
| 747 | } |
| 748 | loadConstant(cUnit, r4PC, (int)funct); |
| 749 | loadValuePair(cUnit, vSrc1, r0, r1); |
| 750 | loadValuePair(cUnit, vSrc2, r2, r3); |
| 751 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 752 | storeValuePair(cUnit, r0, r1, vDest, r2); |
| 753 | return false; |
| 754 | } |
| 755 | |
| 756 | static bool genArithOpLong(CompilationUnit *cUnit, MIR *mir, int vDest, |
| 757 | int vSrc1, int vSrc2) |
| 758 | { |
| 759 | int firstOp = ARMV5TE_BKPT; |
| 760 | int secondOp = ARMV5TE_BKPT; |
| 761 | bool callOut = false; |
| 762 | void *callTgt; |
| 763 | int retReg = r0; |
| 764 | /* TODO - find proper .h file to declare these */ |
| 765 | long long __aeabi_ldivmod(long long op1, long long op2); |
| 766 | |
| 767 | switch (mir->dalvikInsn.opCode) { |
| 768 | case OP_NOT_LONG: |
| 769 | firstOp = ARMV5TE_MVN; |
| 770 | secondOp = ARMV5TE_MVN; |
| 771 | break; |
| 772 | case OP_ADD_LONG: |
| 773 | case OP_ADD_LONG_2ADDR: |
| 774 | firstOp = ARMV5TE_ADD_RRR; |
| 775 | secondOp = ARMV5TE_ADC; |
| 776 | break; |
| 777 | case OP_SUB_LONG: |
| 778 | case OP_SUB_LONG_2ADDR: |
| 779 | firstOp = ARMV5TE_SUB_RRR; |
| 780 | secondOp = ARMV5TE_SBC; |
| 781 | break; |
| 782 | case OP_MUL_LONG: |
| 783 | case OP_MUL_LONG_2ADDR: |
| 784 | loadValuePair(cUnit, vSrc1, r0, r1); |
| 785 | loadValuePair(cUnit, vSrc2, r2, r3); |
| 786 | genDispatchToHandler(cUnit, TEMPLATE_MUL_LONG); |
| 787 | storeValuePair(cUnit, r0, r1, vDest, r2); |
| 788 | return false; |
| 789 | break; |
| 790 | case OP_DIV_LONG: |
| 791 | case OP_DIV_LONG_2ADDR: |
| 792 | callOut = true; |
| 793 | retReg = r0; |
| 794 | callTgt = (void*)__aeabi_ldivmod; |
| 795 | break; |
| 796 | /* NOTE - result is in r2/r3 instead of r0/r1 */ |
| 797 | case OP_REM_LONG: |
| 798 | case OP_REM_LONG_2ADDR: |
| 799 | callOut = true; |
| 800 | callTgt = (void*)__aeabi_ldivmod; |
| 801 | retReg = r2; |
| 802 | break; |
| 803 | case OP_AND_LONG: |
| 804 | case OP_AND_LONG_2ADDR: |
| 805 | firstOp = ARMV5TE_AND_RR; |
| 806 | secondOp = ARMV5TE_AND_RR; |
| 807 | break; |
| 808 | case OP_OR_LONG: |
| 809 | case OP_OR_LONG_2ADDR: |
| 810 | firstOp = ARMV5TE_ORR; |
| 811 | secondOp = ARMV5TE_ORR; |
| 812 | break; |
| 813 | case OP_XOR_LONG: |
| 814 | case OP_XOR_LONG_2ADDR: |
| 815 | firstOp = ARMV5TE_EOR; |
| 816 | secondOp = ARMV5TE_EOR; |
| 817 | break; |
| 818 | case OP_NEG_LONG: |
| 819 | loadValuePair(cUnit, vSrc2, r2, r3); |
| 820 | loadConstant(cUnit, r1, 0); |
| 821 | newLIR3(cUnit, ARMV5TE_SUB_RRR, r0, r1, r2); |
| 822 | newLIR2(cUnit, ARMV5TE_SBC, r1, r3); |
| 823 | storeValuePair(cUnit, r0, r1, vDest, r2); |
| 824 | return false; |
| 825 | default: |
| 826 | LOGE("Invalid long arith op"); |
| 827 | dvmAbort(); |
| 828 | } |
| 829 | if (!callOut) { |
| 830 | loadValuePair(cUnit, vSrc1, r0, r1); |
| 831 | loadValuePair(cUnit, vSrc2, r2, r3); |
| 832 | genBinaryOpWide(cUnit, vDest, firstOp, secondOp); |
| 833 | } else { |
| 834 | loadValuePair(cUnit, vSrc2, r2, r3); |
| 835 | loadConstant(cUnit, r4PC, (int) callTgt); |
| 836 | loadValuePair(cUnit, vSrc1, r0, r1); |
| 837 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 838 | storeValuePair(cUnit, retReg, retReg+1, vDest, r4PC); |
| 839 | } |
| 840 | return false; |
| 841 | } |
| 842 | |
| 843 | static bool genArithOpInt(CompilationUnit *cUnit, MIR *mir, int vDest, |
| 844 | int vSrc1, int vSrc2) |
| 845 | { |
| 846 | int armOp = ARMV5TE_BKPT; |
| 847 | bool callOut = false; |
| 848 | bool checkZero = false; |
| 849 | int retReg = r0; |
| 850 | void *callTgt; |
| 851 | |
| 852 | /* TODO - find proper .h file to declare these */ |
| 853 | int __aeabi_idivmod(int op1, int op2); |
| 854 | int __aeabi_idiv(int op1, int op2); |
| 855 | |
| 856 | switch (mir->dalvikInsn.opCode) { |
| 857 | case OP_NEG_INT: |
| 858 | armOp = ARMV5TE_NEG; |
| 859 | break; |
| 860 | case OP_NOT_INT: |
| 861 | armOp = ARMV5TE_MVN; |
| 862 | break; |
| 863 | case OP_ADD_INT: |
| 864 | case OP_ADD_INT_2ADDR: |
| 865 | armOp = ARMV5TE_ADD_RRR; |
| 866 | break; |
| 867 | case OP_SUB_INT: |
| 868 | case OP_SUB_INT_2ADDR: |
| 869 | armOp = ARMV5TE_SUB_RRR; |
| 870 | break; |
| 871 | case OP_MUL_INT: |
| 872 | case OP_MUL_INT_2ADDR: |
| 873 | armOp = ARMV5TE_MUL; |
| 874 | break; |
| 875 | case OP_DIV_INT: |
| 876 | case OP_DIV_INT_2ADDR: |
| 877 | callOut = true; |
| 878 | checkZero = true; |
| 879 | callTgt = __aeabi_idiv; |
| 880 | retReg = r0; |
| 881 | break; |
| 882 | /* NOTE: returns in r1 */ |
| 883 | case OP_REM_INT: |
| 884 | case OP_REM_INT_2ADDR: |
| 885 | callOut = true; |
| 886 | checkZero = true; |
| 887 | callTgt = __aeabi_idivmod; |
| 888 | retReg = r1; |
| 889 | break; |
| 890 | case OP_AND_INT: |
| 891 | case OP_AND_INT_2ADDR: |
| 892 | armOp = ARMV5TE_AND_RR; |
| 893 | break; |
| 894 | case OP_OR_INT: |
| 895 | case OP_OR_INT_2ADDR: |
| 896 | armOp = ARMV5TE_ORR; |
| 897 | break; |
| 898 | case OP_XOR_INT: |
| 899 | case OP_XOR_INT_2ADDR: |
| 900 | armOp = ARMV5TE_EOR; |
| 901 | break; |
| 902 | case OP_SHL_INT: |
| 903 | case OP_SHL_INT_2ADDR: |
| 904 | armOp = ARMV5TE_LSLV; |
| 905 | break; |
| 906 | case OP_SHR_INT: |
| 907 | case OP_SHR_INT_2ADDR: |
| 908 | armOp = ARMV5TE_ASRV; |
| 909 | break; |
| 910 | case OP_USHR_INT: |
| 911 | case OP_USHR_INT_2ADDR: |
| 912 | armOp = ARMV5TE_LSRV; |
| 913 | break; |
| 914 | default: |
| 915 | LOGE("Invalid word arith op: 0x%x(%d)", |
| 916 | mir->dalvikInsn.opCode, mir->dalvikInsn.opCode); |
| 917 | dvmAbort(); |
| 918 | } |
| 919 | if (!callOut) { |
| 920 | loadValue(cUnit, vSrc1, r0); |
| 921 | loadValue(cUnit, vSrc2, r1); |
| 922 | genBinaryOp(cUnit, vDest, armOp); |
| 923 | } else { |
| 924 | loadValue(cUnit, vSrc2, r1); |
| 925 | loadConstant(cUnit, r2, (int) callTgt); |
| 926 | loadValue(cUnit, vSrc1, r0); |
| 927 | if (checkZero) { |
| 928 | genNullCheck(cUnit, r1, mir->offset, NULL); |
| 929 | } |
| 930 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 931 | storeValue(cUnit, retReg, vDest, r2); |
| 932 | } |
| 933 | return false; |
| 934 | } |
| 935 | |
| 936 | static bool genArithOp(CompilationUnit *cUnit, MIR *mir) |
| 937 | { |
| 938 | OpCode opCode = mir->dalvikInsn.opCode; |
| 939 | int vA = mir->dalvikInsn.vA; |
| 940 | int vB = mir->dalvikInsn.vB; |
| 941 | int vC = mir->dalvikInsn.vC; |
| 942 | |
| 943 | if ((opCode >= OP_ADD_LONG_2ADDR) && (opCode <= OP_XOR_LONG_2ADDR)) { |
| 944 | return genArithOpLong(cUnit,mir, vA, vA, vB); |
| 945 | } |
| 946 | if ((opCode >= OP_ADD_LONG) && (opCode <= OP_XOR_LONG)) { |
| 947 | return genArithOpLong(cUnit,mir, vA, vB, vC); |
| 948 | } |
| 949 | if ((opCode >= OP_SHL_LONG_2ADDR) && (opCode <= OP_USHR_LONG_2ADDR)) { |
| 950 | return genShiftOpLong(cUnit,mir, vA, vA, vB); |
| 951 | } |
| 952 | if ((opCode >= OP_SHL_LONG) && (opCode <= OP_USHR_LONG)) { |
| 953 | return genShiftOpLong(cUnit,mir, vA, vB, vC); |
| 954 | } |
| 955 | if ((opCode >= OP_ADD_INT_2ADDR) && (opCode <= OP_USHR_INT_2ADDR)) { |
| 956 | return genArithOpInt(cUnit,mir, vA, vA, vB); |
| 957 | } |
| 958 | if ((opCode >= OP_ADD_INT) && (opCode <= OP_USHR_INT)) { |
| 959 | return genArithOpInt(cUnit,mir, vA, vB, vC); |
| 960 | } |
| 961 | if ((opCode >= OP_ADD_FLOAT_2ADDR) && (opCode <= OP_REM_FLOAT_2ADDR)) { |
| 962 | return genArithOpFloat(cUnit,mir, vA, vA, vB); |
| 963 | } |
| 964 | if ((opCode >= OP_ADD_FLOAT) && (opCode <= OP_REM_FLOAT)) { |
| 965 | return genArithOpFloat(cUnit,mir, vA, vB, vC); |
| 966 | } |
| 967 | if ((opCode >= OP_ADD_DOUBLE_2ADDR) && (opCode <= OP_REM_DOUBLE_2ADDR)) { |
| 968 | return genArithOpDouble(cUnit,mir, vA, vA, vB); |
| 969 | } |
| 970 | if ((opCode >= OP_ADD_DOUBLE) && (opCode <= OP_REM_DOUBLE)) { |
| 971 | return genArithOpDouble(cUnit,mir, vA, vB, vC); |
| 972 | } |
| 973 | return true; |
| 974 | } |
| 975 | |
| 976 | static bool genConversion(CompilationUnit *cUnit, MIR *mir, void *funct, |
| 977 | int srcSize, int tgtSize) |
| 978 | { |
| 979 | loadConstant(cUnit, r2, (int)funct); |
| 980 | if (srcSize == 1) { |
| 981 | loadValue(cUnit, mir->dalvikInsn.vB, r0); |
| 982 | } else { |
| 983 | loadValuePair(cUnit, mir->dalvikInsn.vB, r0, r1); |
| 984 | } |
| 985 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 986 | if (tgtSize == 1) { |
| 987 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 988 | } else { |
| 989 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 990 | } |
| 991 | return false; |
| 992 | } |
| 993 | |
| 994 | /* Experimental example of completely inlining a native replacement */ |
| 995 | static bool genInlinedStringLength(CompilationUnit *cUnit, MIR *mir) |
| 996 | { |
| 997 | int offset = (int) &((InterpState *) NULL)->retval; |
| 998 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 999 | assert(dInsn->vA == 1); |
| 1000 | loadValue(cUnit, dInsn->arg[0], r0); |
| 1001 | loadConstant(cUnit, r1, gDvm.offJavaLangString_count); |
| 1002 | genNullCheck(cUnit, r0, mir->offset, NULL); |
| 1003 | newLIR3(cUnit, ARMV5TE_LDR_RRR, r0, r0, r1); |
| 1004 | newLIR3(cUnit, ARMV5TE_STR_RRI5, r0, rGLUE, offset >> 2); |
| 1005 | return false; |
| 1006 | } |
| 1007 | |
| 1008 | static void genProcessArgsNoRange(CompilationUnit *cUnit, MIR *mir, |
| 1009 | DecodedInstruction *dInsn, |
| 1010 | Armv5teLIR **pcrLabel) |
| 1011 | { |
| 1012 | unsigned int i; |
| 1013 | unsigned int regMask = 0; |
| 1014 | |
| 1015 | /* Load arguments to r0..r4 */ |
| 1016 | for (i = 0; i < dInsn->vA; i++) { |
| 1017 | regMask |= 1 << i; |
| 1018 | loadValue(cUnit, dInsn->arg[i], i); |
| 1019 | } |
| 1020 | if (regMask) { |
| 1021 | /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */ |
| 1022 | newLIR2(cUnit, ARMV5TE_MOV_RR, r7, rFP); |
| 1023 | newLIR2(cUnit, ARMV5TE_SUB_RI8, r7, |
| 1024 | sizeof(StackSaveArea) + (dInsn->vA << 2)); |
| 1025 | /* generate null check */ |
| 1026 | if (pcrLabel) { |
| 1027 | *pcrLabel = genNullCheck(cUnit, r0, mir->offset, NULL); |
| 1028 | } |
| 1029 | newLIR2(cUnit, ARMV5TE_STMIA, r7, regMask); |
| 1030 | } |
| 1031 | } |
| 1032 | |
| 1033 | static void genProcessArgsRange(CompilationUnit *cUnit, MIR *mir, |
| 1034 | DecodedInstruction *dInsn, |
| 1035 | Armv5teLIR **pcrLabel) |
| 1036 | { |
| 1037 | int srcOffset = dInsn->vC << 2; |
| 1038 | int numArgs = dInsn->vA; |
| 1039 | int regMask; |
| 1040 | /* |
| 1041 | * r4PC : &rFP[vC] |
| 1042 | * r7: &newFP[0] |
| 1043 | */ |
| 1044 | if (srcOffset < 8) { |
| 1045 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, r4PC, rFP, srcOffset); |
| 1046 | } else { |
| 1047 | loadConstant(cUnit, r4PC, srcOffset); |
| 1048 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r4PC, rFP, r4PC); |
| 1049 | } |
| 1050 | /* load [r0 .. min(numArgs,4)] */ |
| 1051 | regMask = (1 << ((numArgs < 4) ? numArgs : 4)) - 1; |
| 1052 | newLIR2(cUnit, ARMV5TE_LDMIA, r4PC, regMask); |
| 1053 | |
| 1054 | if (sizeof(StackSaveArea) + (numArgs << 2) < 256) { |
| 1055 | newLIR2(cUnit, ARMV5TE_MOV_RR, r7, rFP); |
| 1056 | newLIR2(cUnit, ARMV5TE_SUB_RI8, r7, |
| 1057 | sizeof(StackSaveArea) + (numArgs << 2)); |
| 1058 | } else { |
| 1059 | loadConstant(cUnit, r7, sizeof(StackSaveArea) + (numArgs << 2)); |
| 1060 | newLIR3(cUnit, ARMV5TE_SUB_RRR, r7, rFP, r7); |
| 1061 | } |
| 1062 | |
| 1063 | /* generate null check */ |
| 1064 | if (pcrLabel) { |
| 1065 | *pcrLabel = genNullCheck(cUnit, r0, mir->offset, NULL); |
| 1066 | } |
| 1067 | |
| 1068 | /* |
| 1069 | * Handle remaining 4n arguments: |
| 1070 | * store previously loaded 4 values and load the next 4 values |
| 1071 | */ |
| 1072 | if (numArgs >= 8) { |
| 1073 | Armv5teLIR *loopLabel = NULL; |
| 1074 | /* |
| 1075 | * r0 contains "this" and it will be used later, so push it to the stack |
| 1076 | * first. Pushing r5 is just for stack alignment purposes. |
| 1077 | */ |
| 1078 | newLIR1(cUnit, ARMV5TE_PUSH, 1 << r0 | 1 << 5); |
| 1079 | /* No need to generate the loop structure if numArgs <= 11 */ |
| 1080 | if (numArgs > 11) { |
| 1081 | loadConstant(cUnit, 5, ((numArgs - 4) >> 2) << 2); |
| 1082 | loopLabel = newLIR0(cUnit, ARMV5TE_PSEUDO_TARGET_LABEL); |
| 1083 | } |
| 1084 | newLIR2(cUnit, ARMV5TE_STMIA, r7, regMask); |
| 1085 | newLIR2(cUnit, ARMV5TE_LDMIA, r4PC, regMask); |
| 1086 | /* No need to generate the loop structure if numArgs <= 11 */ |
| 1087 | if (numArgs > 11) { |
| 1088 | newLIR2(cUnit, ARMV5TE_SUB_RI8, 5, 4); |
| 1089 | genConditionalBranch(cUnit, ARM_COND_NE, loopLabel); |
| 1090 | } |
| 1091 | } |
| 1092 | |
| 1093 | /* Save the last batch of loaded values */ |
| 1094 | newLIR2(cUnit, ARMV5TE_STMIA, r7, regMask); |
| 1095 | |
| 1096 | /* Generate the loop epilogue - don't use r0 */ |
| 1097 | if ((numArgs > 4) && (numArgs % 4)) { |
| 1098 | regMask = ((1 << (numArgs & 0x3)) - 1) << 1; |
| 1099 | newLIR2(cUnit, ARMV5TE_LDMIA, r4PC, regMask); |
| 1100 | } |
| 1101 | if (numArgs >= 8) |
| 1102 | newLIR1(cUnit, ARMV5TE_POP, 1 << r0 | 1 << 5); |
| 1103 | |
| 1104 | /* Save the modulo 4 arguments */ |
| 1105 | if ((numArgs > 4) && (numArgs % 4)) { |
| 1106 | newLIR2(cUnit, ARMV5TE_STMIA, r7, regMask); |
| 1107 | } |
| 1108 | } |
| 1109 | |
| 1110 | static void genInvokeCommon(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb, |
| 1111 | Armv5teLIR *labelList, Armv5teLIR *pcrLabel, |
| 1112 | const Method *calleeMethod) |
| 1113 | { |
| 1114 | Armv5teLIR *retChainingCell = &labelList[bb->fallThrough->id]; |
| 1115 | |
| 1116 | /* r1 = &retChainingCell */ |
| 1117 | Armv5teLIR *addrRetChain = newLIR2(cUnit, ARMV5TE_ADD_PC_REL, |
| 1118 | r1, 0); |
| 1119 | /* r4PC = dalvikCallsite */ |
| 1120 | loadConstant(cUnit, r4PC, |
| 1121 | (int) (cUnit->method->insns + mir->offset)); |
| 1122 | addrRetChain->generic.target = (LIR *) retChainingCell; |
| 1123 | /* |
| 1124 | * r0 = calleeMethod (loaded upon calling genInvokeCommon) |
| 1125 | * r1 = &ChainingCell |
| 1126 | * r4PC = callsiteDPC |
| 1127 | */ |
| 1128 | if (dvmIsNativeMethod(calleeMethod)) { |
| 1129 | genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT); |
| 1130 | #if defined(INVOKE_STATS) |
| 1131 | gDvmJit.invokeNoOpt++; |
| 1132 | #endif |
| 1133 | } else { |
| 1134 | genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_CHAIN); |
| 1135 | #if defined(INVOKE_STATS) |
| 1136 | gDvmJit.invokeChain++; |
| 1137 | #endif |
| 1138 | genUnconditionalBranch(cUnit, &labelList[bb->taken->id]); |
| 1139 | } |
| 1140 | /* Handle exceptions using the interpreter */ |
| 1141 | genTrap(cUnit, mir->offset, pcrLabel); |
| 1142 | } |
| 1143 | |
| 1144 | /* Geneate a branch to go back to the interpreter */ |
| 1145 | static void genPuntToInterp(CompilationUnit *cUnit, unsigned int offset) |
| 1146 | { |
| 1147 | /* r0 = dalvik pc */ |
| 1148 | loadConstant(cUnit, r0, (int) (cUnit->method->insns + offset)); |
| 1149 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r1, rGLUE, |
| 1150 | offsetof(InterpState, jitToInterpEntries.dvmJitToInterpPunt) >> 2); |
| 1151 | newLIR1(cUnit, ARMV5TE_BLX_R, r1); |
| 1152 | } |
| 1153 | |
| 1154 | /* |
| 1155 | * Attempt to single step one instruction using the interpreter and return |
| 1156 | * to the compiled code for the next Dalvik instruction |
| 1157 | */ |
| 1158 | static void genInterpSingleStep(CompilationUnit *cUnit, MIR *mir) |
| 1159 | { |
| 1160 | int flags = dexGetInstrFlags(gDvm.instrFlags, mir->dalvikInsn.opCode); |
| 1161 | int flagsToCheck = kInstrCanBranch | kInstrCanSwitch | kInstrCanReturn | |
| 1162 | kInstrCanThrow; |
| 1163 | if ((mir->next == NULL) || (flags & flagsToCheck)) { |
| 1164 | genPuntToInterp(cUnit, mir->offset); |
| 1165 | return; |
| 1166 | } |
| 1167 | int entryAddr = offsetof(InterpState, |
| 1168 | jitToInterpEntries.dvmJitToInterpSingleStep); |
| 1169 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r2, rGLUE, entryAddr >> 2); |
| 1170 | /* r0 = dalvik pc */ |
| 1171 | loadConstant(cUnit, r0, (int) (cUnit->method->insns + mir->offset)); |
| 1172 | /* r1 = dalvik pc of following instruction */ |
| 1173 | loadConstant(cUnit, r1, (int) (cUnit->method->insns + mir->next->offset)); |
| 1174 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 1175 | } |
| 1176 | |
| 1177 | |
| 1178 | /*****************************************************************************/ |
| 1179 | /* |
| 1180 | * The following are the first-level codegen routines that analyze the format |
| 1181 | * of each bytecode then either dispatch special purpose codegen routines |
| 1182 | * or produce corresponding Thumb instructions directly. |
| 1183 | */ |
| 1184 | |
| 1185 | static bool handleFmt10t_Fmt20t_Fmt30t(CompilationUnit *cUnit, MIR *mir, |
| 1186 | BasicBlock *bb, Armv5teLIR *labelList) |
| 1187 | { |
| 1188 | /* For OP_GOTO, OP_GOTO_16, and OP_GOTO_32 */ |
| 1189 | genUnconditionalBranch(cUnit, &labelList[bb->taken->id]); |
| 1190 | return false; |
| 1191 | } |
| 1192 | |
| 1193 | static bool handleFmt10x(CompilationUnit *cUnit, MIR *mir) |
| 1194 | { |
| 1195 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1196 | if (((dalvikOpCode >= OP_UNUSED_3E) && (dalvikOpCode <= OP_UNUSED_43)) || |
| 1197 | ((dalvikOpCode >= OP_UNUSED_E3) && (dalvikOpCode <= OP_UNUSED_EC))) { |
| 1198 | LOGE("Codegen: got unused opcode 0x%x\n",dalvikOpCode); |
| 1199 | return true; |
| 1200 | } |
| 1201 | switch (dalvikOpCode) { |
| 1202 | case OP_RETURN_VOID: |
| 1203 | genReturnCommon(cUnit,mir); |
| 1204 | break; |
| 1205 | case OP_UNUSED_73: |
| 1206 | case OP_UNUSED_79: |
| 1207 | case OP_UNUSED_7A: |
| 1208 | LOGE("Codegen: got unused opcode 0x%x\n",dalvikOpCode); |
| 1209 | return true; |
| 1210 | case OP_NOP: |
| 1211 | break; |
| 1212 | default: |
| 1213 | return true; |
| 1214 | } |
| 1215 | return false; |
| 1216 | } |
| 1217 | |
| 1218 | static bool handleFmt11n_Fmt31i(CompilationUnit *cUnit, MIR *mir) |
| 1219 | { |
| 1220 | switch (mir->dalvikInsn.opCode) { |
| 1221 | case OP_CONST: |
| 1222 | case OP_CONST_4: |
| 1223 | loadConstant(cUnit, r0, mir->dalvikInsn.vB); |
| 1224 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1225 | break; |
| 1226 | case OP_CONST_WIDE_32: |
| 1227 | loadConstant(cUnit, r0, mir->dalvikInsn.vB); |
| 1228 | newLIR3(cUnit, ARMV5TE_ASR, r1, r0, 31); |
| 1229 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 1230 | break; |
| 1231 | default: |
| 1232 | return true; |
| 1233 | } |
| 1234 | return false; |
| 1235 | } |
| 1236 | |
| 1237 | static bool handleFmt21h(CompilationUnit *cUnit, MIR *mir) |
| 1238 | { |
| 1239 | switch (mir->dalvikInsn.opCode) { |
| 1240 | case OP_CONST_HIGH16: |
| 1241 | loadConstant(cUnit, r0, mir->dalvikInsn.vB << 16); |
| 1242 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1243 | break; |
| 1244 | case OP_CONST_WIDE_HIGH16: |
| 1245 | loadConstant(cUnit, r1, mir->dalvikInsn.vB << 16); |
| 1246 | loadConstant(cUnit, r0, 0); |
| 1247 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 1248 | break; |
| 1249 | default: |
| 1250 | return true; |
| 1251 | } |
| 1252 | return false; |
| 1253 | } |
| 1254 | |
| 1255 | static bool handleFmt20bc(CompilationUnit *cUnit, MIR *mir) |
| 1256 | { |
| 1257 | /* For OP_THROW_VERIFICATION_ERROR */ |
| 1258 | genInterpSingleStep(cUnit, mir); |
| 1259 | return false; |
| 1260 | } |
| 1261 | |
| 1262 | static bool handleFmt21c_Fmt31c(CompilationUnit *cUnit, MIR *mir) |
| 1263 | { |
| 1264 | switch (mir->dalvikInsn.opCode) { |
| 1265 | /* |
| 1266 | * TODO: Verify that we can ignore the resolution check here because |
| 1267 | * it will have already successfully been interpreted once |
| 1268 | */ |
| 1269 | case OP_CONST_STRING_JUMBO: |
| 1270 | case OP_CONST_STRING: { |
| 1271 | void *strPtr = (void*) |
| 1272 | (cUnit->method->clazz->pDvmDex->pResStrings[mir->dalvikInsn.vB]); |
| 1273 | assert(strPtr != NULL); |
| 1274 | loadConstant(cUnit, r0, (int) strPtr ); |
| 1275 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1276 | break; |
| 1277 | } |
| 1278 | /* |
| 1279 | * TODO: Verify that we can ignore the resolution check here because |
| 1280 | * it will have already successfully been interpreted once |
| 1281 | */ |
| 1282 | case OP_CONST_CLASS: { |
| 1283 | void *classPtr = (void*) |
| 1284 | (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]); |
| 1285 | assert(classPtr != NULL); |
| 1286 | loadConstant(cUnit, r0, (int) classPtr ); |
| 1287 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1288 | break; |
| 1289 | } |
| 1290 | case OP_SGET_OBJECT: |
| 1291 | case OP_SGET_BOOLEAN: |
| 1292 | case OP_SGET_CHAR: |
| 1293 | case OP_SGET_BYTE: |
| 1294 | case OP_SGET_SHORT: |
| 1295 | case OP_SGET: { |
| 1296 | int valOffset = (int)&((struct StaticField*)NULL)->value; |
| 1297 | void *fieldPtr = (void*) |
| 1298 | (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]); |
| 1299 | assert(fieldPtr != NULL); |
| 1300 | loadConstant(cUnit, r0, (int) fieldPtr + valOffset); |
| 1301 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, 0); |
| 1302 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r2); |
| 1303 | break; |
| 1304 | } |
| 1305 | case OP_SGET_WIDE: { |
| 1306 | int valOffset = (int)&((struct StaticField*)NULL)->value; |
| 1307 | void *fieldPtr = (void*) |
| 1308 | (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]); |
| 1309 | assert(fieldPtr != NULL); |
| 1310 | loadConstant(cUnit, r2, (int) fieldPtr + valOffset); |
| 1311 | newLIR2(cUnit, ARMV5TE_LDMIA, r2, (1<<r0 | 1<<r1)); |
| 1312 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 1313 | break; |
| 1314 | } |
| 1315 | case OP_SPUT_OBJECT: |
| 1316 | case OP_SPUT_BOOLEAN: |
| 1317 | case OP_SPUT_CHAR: |
| 1318 | case OP_SPUT_BYTE: |
| 1319 | case OP_SPUT_SHORT: |
| 1320 | case OP_SPUT: { |
| 1321 | int valOffset = (int)&((struct StaticField*)NULL)->value; |
| 1322 | void *fieldPtr = (void*) |
| 1323 | (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]); |
| 1324 | assert(fieldPtr != NULL); |
| 1325 | loadValue(cUnit, mir->dalvikInsn.vA, r0); |
| 1326 | loadConstant(cUnit, r1, (int) fieldPtr + valOffset); |
| 1327 | newLIR3(cUnit, ARMV5TE_STR_RRI5, r0, r1, 0); |
| 1328 | break; |
| 1329 | } |
| 1330 | case OP_SPUT_WIDE: { |
| 1331 | int valOffset = (int)&((struct StaticField*)NULL)->value; |
| 1332 | void *fieldPtr = (void*) |
| 1333 | (cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]); |
| 1334 | assert(fieldPtr != NULL); |
| 1335 | loadValuePair(cUnit, mir->dalvikInsn.vA, r0, r1); |
| 1336 | loadConstant(cUnit, r2, (int) fieldPtr + valOffset); |
| 1337 | newLIR2(cUnit, ARMV5TE_STMIA, r2, (1<<r0 | 1<<r1)); |
| 1338 | break; |
| 1339 | } |
| 1340 | case OP_NEW_INSTANCE: { |
| 1341 | ClassObject *classPtr = (void*) |
| 1342 | (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]); |
| 1343 | assert(classPtr != NULL); |
| 1344 | assert(classPtr->status & CLASS_INITIALIZED); |
| 1345 | if ((classPtr->accessFlags & (ACC_INTERFACE|ACC_ABSTRACT)) != 0) { |
| 1346 | /* It's going to throw, just let the interp. deal with it. */ |
| 1347 | genInterpSingleStep(cUnit, mir); |
| 1348 | return false; |
| 1349 | } |
| 1350 | loadConstant(cUnit, r0, (int) classPtr); |
| 1351 | loadConstant(cUnit, r4PC, (int)dvmAllocObject); |
| 1352 | genExportPC(cUnit, mir, r2, r3 ); |
| 1353 | loadConstant(cUnit, r1, ALLOC_DONT_TRACK); |
| 1354 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 1355 | /* |
| 1356 | * TODO: As coded, we'll bail and reinterpret on alloc failure. |
| 1357 | * Need a general mechanism to bail to thrown exception code. |
| 1358 | */ |
| 1359 | genNullCheck(cUnit, r0, mir->offset, NULL); |
| 1360 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1361 | break; |
| 1362 | } |
| 1363 | case OP_CHECK_CAST: { |
| 1364 | ClassObject *classPtr = |
| 1365 | (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vB]); |
| 1366 | loadConstant(cUnit, r1, (int) classPtr ); |
| 1367 | loadValue(cUnit, mir->dalvikInsn.vA, r0); /* Ref */ |
| 1368 | /* |
| 1369 | * TODO - in theory classPtr should be resoved by the time this |
| 1370 | * instruction made into a trace, but we are seeing NULL at runtime |
| 1371 | * so this check is temporarily used as a workaround. |
| 1372 | */ |
| 1373 | Armv5teLIR * pcrLabel = genNullCheck(cUnit, r1, mir->offset, NULL); |
| 1374 | newLIR2(cUnit, ARMV5TE_CMP_RI8, r0, 0); /* Null? */ |
| 1375 | Armv5teLIR *branch1 = |
| 1376 | newLIR2(cUnit, ARMV5TE_B_COND, 4, ARM_COND_EQ); |
| 1377 | /* r0 now contains object->clazz */ |
| 1378 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, |
| 1379 | offsetof(Object, clazz) >> 2); |
| 1380 | loadConstant(cUnit, r4PC, (int)dvmInstanceofNonTrivial); |
| 1381 | newLIR2(cUnit, ARMV5TE_CMP_RR, r0, r1); |
| 1382 | Armv5teLIR *branch2 = |
| 1383 | newLIR2(cUnit, ARMV5TE_B_COND, 2, ARM_COND_EQ); |
| 1384 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 1385 | /* check cast failed - punt to the interpreter */ |
| 1386 | genNullCheck(cUnit, r0, mir->offset, pcrLabel); |
| 1387 | /* check cast passed - branch target here */ |
| 1388 | Armv5teLIR *target = newLIR0(cUnit, ARMV5TE_PSEUDO_TARGET_LABEL); |
| 1389 | branch1->generic.target = (LIR *)target; |
| 1390 | branch2->generic.target = (LIR *)target; |
| 1391 | break; |
| 1392 | } |
| 1393 | default: |
| 1394 | return true; |
| 1395 | } |
| 1396 | return false; |
| 1397 | } |
| 1398 | |
| 1399 | static bool handleFmt11x(CompilationUnit *cUnit, MIR *mir) |
| 1400 | { |
| 1401 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1402 | switch (dalvikOpCode) { |
| 1403 | case OP_MOVE_EXCEPTION: { |
| 1404 | int offset = offsetof(InterpState, self); |
| 1405 | int exOffset = offsetof(Thread, exception); |
| 1406 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, rGLUE, offset >> 2); |
| 1407 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r1, r0, exOffset >> 2); |
| 1408 | storeValue(cUnit, r1, mir->dalvikInsn.vA, r0); |
| 1409 | break; |
| 1410 | } |
| 1411 | case OP_MOVE_RESULT: |
| 1412 | case OP_MOVE_RESULT_OBJECT: { |
| 1413 | int offset = offsetof(InterpState, retval); |
| 1414 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, rGLUE, offset >> 2); |
| 1415 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1416 | break; |
| 1417 | } |
| 1418 | case OP_MOVE_RESULT_WIDE: { |
| 1419 | int offset = offsetof(InterpState, retval); |
| 1420 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, rGLUE, offset >> 2); |
| 1421 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r1, rGLUE, (offset >> 2)+1); |
| 1422 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 1423 | break; |
| 1424 | } |
| 1425 | case OP_RETURN_WIDE: { |
| 1426 | loadValuePair(cUnit, mir->dalvikInsn.vA, r0, r1); |
| 1427 | int offset = offsetof(InterpState, retval); |
| 1428 | newLIR3(cUnit, ARMV5TE_STR_RRI5, r0, rGLUE, offset >> 2); |
| 1429 | newLIR3(cUnit, ARMV5TE_STR_RRI5, r1, rGLUE, (offset >> 2)+1); |
| 1430 | genReturnCommon(cUnit,mir); |
| 1431 | break; |
| 1432 | } |
| 1433 | case OP_RETURN: |
| 1434 | case OP_RETURN_OBJECT: { |
| 1435 | loadValue(cUnit, mir->dalvikInsn.vA, r0); |
| 1436 | int offset = offsetof(InterpState, retval); |
| 1437 | newLIR3(cUnit, ARMV5TE_STR_RRI5, r0, rGLUE, offset >> 2); |
| 1438 | genReturnCommon(cUnit,mir); |
| 1439 | break; |
| 1440 | } |
| 1441 | /* |
| 1442 | * TODO-VERIFY: May be playing a bit fast and loose here. As coded, |
| 1443 | * a failure on lock/unlock will cause us to revert to the interpeter |
| 1444 | * to try again. This means we essentially ignore the first failure on |
| 1445 | * the assumption that the interpreter will correctly handle the 2nd. |
| 1446 | */ |
| 1447 | case OP_MONITOR_ENTER: |
| 1448 | case OP_MONITOR_EXIT: { |
| 1449 | int offset = offsetof(InterpState, self); |
| 1450 | loadValue(cUnit, mir->dalvikInsn.vA, r1); |
| 1451 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, rGLUE, offset >> 2); |
| 1452 | if (dalvikOpCode == OP_MONITOR_ENTER) { |
| 1453 | loadConstant(cUnit, r2, (int)dvmLockObject); |
| 1454 | } else { |
| 1455 | loadConstant(cUnit, r2, (int)dvmUnlockObject); |
| 1456 | } |
| 1457 | /* |
| 1458 | * TODO-VERIFY: Note that we're not doing an EXPORT_PC, as |
| 1459 | * Lock/unlock won't throw, and this code does not support |
| 1460 | * DEADLOCK_PREDICTION or MONITOR_TRACKING. Should it? |
| 1461 | */ |
| 1462 | genNullCheck(cUnit, r1, mir->offset, NULL); |
| 1463 | /* Do the call */ |
| 1464 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 1465 | break; |
| 1466 | } |
| 1467 | case OP_THROW: { |
| 1468 | genInterpSingleStep(cUnit, mir); |
| 1469 | break; |
| 1470 | } |
| 1471 | default: |
| 1472 | return true; |
| 1473 | } |
| 1474 | return false; |
| 1475 | } |
| 1476 | |
| 1477 | static bool handleFmt12x(CompilationUnit *cUnit, MIR *mir) |
| 1478 | { |
| 1479 | OpCode opCode = mir->dalvikInsn.opCode; |
| 1480 | int vSrc1Dest = mir->dalvikInsn.vA; |
| 1481 | int vSrc2 = mir->dalvikInsn.vB; |
| 1482 | |
| 1483 | /* TODO - find the proper include file to declare these */ |
| 1484 | float __aeabi_i2f( int op1 ); |
| 1485 | int __aeabi_f2iz( float op1 ); |
| 1486 | float __aeabi_d2f( double op1 ); |
| 1487 | double __aeabi_f2d( float op1 ); |
| 1488 | double __aeabi_i2d( int op1 ); |
| 1489 | int __aeabi_d2iz( double op1 ); |
| 1490 | long __aeabi_f2lz( float op1 ); |
| 1491 | float __aeabi_l2f( long op1 ); |
| 1492 | long __aeabi_d2lz( double op1 ); |
| 1493 | double __aeabi_l2d( long op1 ); |
| 1494 | |
| 1495 | if ( (opCode >= OP_ADD_INT_2ADDR) && (opCode <= OP_REM_DOUBLE_2ADDR)) { |
| 1496 | return genArithOp( cUnit, mir ); |
| 1497 | } |
| 1498 | |
| 1499 | switch (opCode) { |
| 1500 | case OP_INT_TO_FLOAT: |
| 1501 | return genConversion(cUnit, mir, (void*)__aeabi_i2f, 1, 1); |
| 1502 | case OP_FLOAT_TO_INT: |
| 1503 | return genConversion(cUnit, mir, (void*)__aeabi_f2iz, 1, 1); |
| 1504 | case OP_DOUBLE_TO_FLOAT: |
| 1505 | return genConversion(cUnit, mir, (void*)__aeabi_d2f, 2, 1); |
| 1506 | case OP_FLOAT_TO_DOUBLE: |
| 1507 | return genConversion(cUnit, mir, (void*)__aeabi_f2d, 1, 2); |
| 1508 | case OP_INT_TO_DOUBLE: |
| 1509 | return genConversion(cUnit, mir, (void*)__aeabi_i2d, 1, 2); |
| 1510 | case OP_DOUBLE_TO_INT: |
| 1511 | return genConversion(cUnit, mir, (void*)__aeabi_d2iz, 2, 1); |
| 1512 | case OP_FLOAT_TO_LONG: |
| 1513 | return genConversion(cUnit, mir, (void*)__aeabi_f2lz, 1, 2); |
| 1514 | case OP_LONG_TO_FLOAT: |
| 1515 | return genConversion(cUnit, mir, (void*)__aeabi_l2f, 2, 1); |
| 1516 | case OP_DOUBLE_TO_LONG: |
| 1517 | return genConversion(cUnit, mir, (void*)__aeabi_d2lz, 2, 2); |
| 1518 | case OP_LONG_TO_DOUBLE: |
| 1519 | return genConversion(cUnit, mir, (void*)__aeabi_l2d, 2, 2); |
| 1520 | case OP_NEG_INT: |
| 1521 | case OP_NOT_INT: |
| 1522 | return genArithOpInt(cUnit, mir, vSrc1Dest, vSrc1Dest, vSrc2); |
| 1523 | case OP_NEG_LONG: |
| 1524 | case OP_NOT_LONG: |
| 1525 | return genArithOpLong(cUnit,mir, vSrc1Dest, vSrc1Dest, vSrc2); |
| 1526 | case OP_NEG_FLOAT: |
| 1527 | return genArithOpFloat(cUnit,mir,vSrc1Dest,vSrc1Dest,vSrc2); |
| 1528 | case OP_NEG_DOUBLE: |
| 1529 | return genArithOpDouble(cUnit,mir,vSrc1Dest,vSrc1Dest,vSrc2); |
| 1530 | case OP_MOVE_WIDE: |
| 1531 | loadValuePair(cUnit, mir->dalvikInsn.vB, r0, r1); |
| 1532 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 1533 | break; |
| 1534 | case OP_INT_TO_LONG: |
| 1535 | loadValue(cUnit, mir->dalvikInsn.vB, r0); |
| 1536 | newLIR3(cUnit, ARMV5TE_ASR, r1, r0, 31); |
| 1537 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 1538 | break; |
| 1539 | case OP_MOVE: |
| 1540 | case OP_MOVE_OBJECT: |
| 1541 | case OP_LONG_TO_INT: |
| 1542 | loadValue(cUnit, vSrc2, r0); |
| 1543 | storeValue(cUnit, r0, vSrc1Dest, r1); |
| 1544 | break; |
| 1545 | case OP_INT_TO_BYTE: |
| 1546 | loadValue(cUnit, vSrc2, r0); |
| 1547 | newLIR3(cUnit, ARMV5TE_LSL, r0, r0, 24); |
| 1548 | newLIR3(cUnit, ARMV5TE_ASR, r0, r0, 24); |
| 1549 | storeValue(cUnit, r0, vSrc1Dest, r1); |
| 1550 | break; |
| 1551 | case OP_INT_TO_SHORT: |
| 1552 | loadValue(cUnit, vSrc2, r0); |
| 1553 | newLIR3(cUnit, ARMV5TE_LSL, r0, r0, 16); |
| 1554 | newLIR3(cUnit, ARMV5TE_ASR, r0, r0, 16); |
| 1555 | storeValue(cUnit, r0, vSrc1Dest, r1); |
| 1556 | break; |
| 1557 | case OP_INT_TO_CHAR: |
| 1558 | loadValue(cUnit, vSrc2, r0); |
| 1559 | newLIR3(cUnit, ARMV5TE_LSL, r0, r0, 16); |
| 1560 | newLIR3(cUnit, ARMV5TE_LSR, r0, r0, 16); |
| 1561 | storeValue(cUnit, r0, vSrc1Dest, r1); |
| 1562 | break; |
| 1563 | case OP_ARRAY_LENGTH: { |
| 1564 | int lenOffset = offsetof(ArrayObject, length); |
| 1565 | loadValue(cUnit, vSrc2, r0); |
| 1566 | genNullCheck(cUnit, r0, mir->offset, NULL); |
| 1567 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, lenOffset >> 2); |
| 1568 | storeValue(cUnit, r0, vSrc1Dest, r1); |
| 1569 | break; |
| 1570 | } |
| 1571 | default: |
| 1572 | return true; |
| 1573 | } |
| 1574 | return false; |
| 1575 | } |
| 1576 | |
| 1577 | static bool handleFmt21s(CompilationUnit *cUnit, MIR *mir) |
| 1578 | { |
| 1579 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1580 | /* It takes few instructions to handle OP_CONST_WIDE_16 inline */ |
| 1581 | if (dalvikOpCode == OP_CONST_WIDE_16) { |
| 1582 | int rDest = mir->dalvikInsn.vA; |
| 1583 | int BBBB = mir->dalvikInsn.vB; |
| 1584 | int rLow = r0, rHigh = r1; |
| 1585 | if (BBBB == 0) { |
| 1586 | newLIR2(cUnit, ARMV5TE_MOV_IMM, rLow, 0); |
| 1587 | rHigh = rLow; |
| 1588 | } else if (BBBB > 0 && BBBB <= 255) { |
| 1589 | /* rLow = ssssBBBB */ |
| 1590 | newLIR2(cUnit, ARMV5TE_MOV_IMM, rLow, BBBB); |
| 1591 | /* rHigh = 0 */ |
| 1592 | newLIR2(cUnit, ARMV5TE_MOV_IMM, rHigh, 0); |
| 1593 | } else { |
| 1594 | loadConstant(cUnit, rLow, BBBB); |
| 1595 | /* |
| 1596 | * arithmetic-shift-right 32 bits to get the high half of long |
| 1597 | * [63..32] |
| 1598 | */ |
| 1599 | newLIR3(cUnit, ARMV5TE_ASR, rHigh, rLow, 0); |
| 1600 | } |
| 1601 | |
| 1602 | /* Save the long values to the specified Dalvik register pair */ |
| 1603 | /* |
| 1604 | * If rDest is no greater than 30, use two "str rd, [rFP + immed_5]" |
| 1605 | * instructions to store the results. Effective address is |
| 1606 | * rFP + immed_5 << 2. |
| 1607 | */ |
| 1608 | if (rDest < 31) { |
| 1609 | newLIR3(cUnit, ARMV5TE_STR_RRI5, rLow, rFP, rDest); |
| 1610 | newLIR3(cUnit, ARMV5TE_STR_RRI5, rHigh, rFP, rDest+1); |
| 1611 | } else { |
| 1612 | /* |
| 1613 | * Otherwise just load the frame offset from the constant pool and add |
| 1614 | * it to rFP. Then use stmia to store the results to the specified |
| 1615 | * register pair. |
| 1616 | */ |
| 1617 | /* Need to replicate the content in r0 to r1 */ |
| 1618 | if (rLow == rHigh) { |
| 1619 | newLIR3(cUnit, ARMV5TE_ADD_RRI3, rLow+1, rLow, 0); |
| 1620 | } |
| 1621 | /* load the rFP offset into r2 */ |
| 1622 | loadConstant(cUnit, r2, rDest*4); |
| 1623 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r2, rFP, r2); |
| 1624 | newLIR2(cUnit, ARMV5TE_STMIA, r2, (1<<r0 | 1 << r1)); |
| 1625 | } |
| 1626 | } else if (dalvikOpCode == OP_CONST_16) { |
| 1627 | int rDest = mir->dalvikInsn.vA; |
| 1628 | int BBBB = mir->dalvikInsn.vB; |
| 1629 | if (BBBB >= 0 && BBBB <= 255) { |
| 1630 | /* r0 = BBBB */ |
| 1631 | newLIR2(cUnit, ARMV5TE_MOV_IMM, r0, BBBB); |
| 1632 | } else { |
| 1633 | loadConstant(cUnit, r0, BBBB); |
| 1634 | } |
| 1635 | |
| 1636 | /* Save the constant to the specified Dalvik register */ |
| 1637 | /* |
| 1638 | * If rDest is no greater than 31, effective address is |
| 1639 | * rFP + immed_5 << 2. |
| 1640 | */ |
| 1641 | if (rDest < 32) { |
| 1642 | newLIR3(cUnit, ARMV5TE_STR_RRI5, r0, rFP, rDest); |
| 1643 | } else { |
| 1644 | /* |
| 1645 | * Otherwise just load the frame offset from the constant pool and add |
| 1646 | * it to rFP. Then use stmia to store the results to the specified |
| 1647 | * register pair. |
| 1648 | */ |
| 1649 | /* load the rFP offset into r2 */ |
| 1650 | loadConstant(cUnit, r2, rDest*4); |
| 1651 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r2, rFP, r2); |
| 1652 | newLIR3(cUnit, ARMV5TE_STR_RRI5, r0, r2, 0); |
| 1653 | } |
| 1654 | } else { |
| 1655 | return true; |
| 1656 | } |
| 1657 | return false; |
| 1658 | } |
| 1659 | |
| 1660 | /* Compare agaist zero */ |
| 1661 | static bool handleFmt21t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb, |
| 1662 | Armv5teLIR *labelList) |
| 1663 | { |
| 1664 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1665 | Armv5teConditionCode cond; |
| 1666 | |
| 1667 | loadValue(cUnit, mir->dalvikInsn.vA, r0); |
| 1668 | newLIR2(cUnit, ARMV5TE_CMP_RI8, r0, 0); |
| 1669 | |
| 1670 | switch (dalvikOpCode) { |
| 1671 | case OP_IF_EQZ: |
| 1672 | cond = ARM_COND_EQ; |
| 1673 | break; |
| 1674 | case OP_IF_NEZ: |
| 1675 | cond = ARM_COND_NE; |
| 1676 | break; |
| 1677 | case OP_IF_LTZ: |
| 1678 | cond = ARM_COND_LT; |
| 1679 | break; |
| 1680 | case OP_IF_GEZ: |
| 1681 | cond = ARM_COND_GE; |
| 1682 | break; |
| 1683 | case OP_IF_GTZ: |
| 1684 | cond = ARM_COND_GT; |
| 1685 | break; |
| 1686 | case OP_IF_LEZ: |
| 1687 | cond = ARM_COND_LE; |
| 1688 | break; |
| 1689 | default: |
| 1690 | cond = 0; |
| 1691 | LOGE("Unexpected opcode (%d) for Fmt21t\n", dalvikOpCode); |
| 1692 | dvmAbort(); |
| 1693 | } |
| 1694 | genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]); |
| 1695 | /* This mostly likely will be optimized away in a later phase */ |
| 1696 | genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]); |
| 1697 | return false; |
| 1698 | } |
| 1699 | |
| 1700 | static bool handleFmt22b_Fmt22s(CompilationUnit *cUnit, MIR *mir) |
| 1701 | { |
| 1702 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1703 | int vSrc = mir->dalvikInsn.vB; |
| 1704 | int vDest = mir->dalvikInsn.vA; |
| 1705 | int lit = mir->dalvikInsn.vC; |
| 1706 | int armOp; |
| 1707 | |
| 1708 | /* TODO: find the proper .h file to declare these */ |
| 1709 | int __aeabi_idivmod(int op1, int op2); |
| 1710 | int __aeabi_idiv(int op1, int op2); |
| 1711 | |
| 1712 | switch (dalvikOpCode) { |
| 1713 | case OP_ADD_INT_LIT8: |
| 1714 | case OP_ADD_INT_LIT16: |
| 1715 | loadValue(cUnit, vSrc, r0); |
| 1716 | if (lit <= 255 && lit >= 0) { |
| 1717 | newLIR2(cUnit, ARMV5TE_ADD_RI8, r0, lit); |
| 1718 | storeValue(cUnit, r0, vDest, r1); |
| 1719 | } else if (lit >= -255 && lit <= 0) { |
| 1720 | /* Convert to a small constant subtraction */ |
| 1721 | newLIR2(cUnit, ARMV5TE_SUB_RI8, r0, -lit); |
| 1722 | storeValue(cUnit, r0, vDest, r1); |
| 1723 | } else { |
| 1724 | loadConstant(cUnit, r1, lit); |
| 1725 | genBinaryOp(cUnit, vDest, ARMV5TE_ADD_RRR); |
| 1726 | } |
| 1727 | break; |
| 1728 | |
| 1729 | case OP_RSUB_INT_LIT8: |
| 1730 | case OP_RSUB_INT: |
| 1731 | loadValue(cUnit, vSrc, r1); |
| 1732 | loadConstant(cUnit, r0, lit); |
| 1733 | genBinaryOp(cUnit, vDest, ARMV5TE_SUB_RRR); |
| 1734 | break; |
| 1735 | |
| 1736 | case OP_MUL_INT_LIT8: |
| 1737 | case OP_MUL_INT_LIT16: |
| 1738 | case OP_AND_INT_LIT8: |
| 1739 | case OP_AND_INT_LIT16: |
| 1740 | case OP_OR_INT_LIT8: |
| 1741 | case OP_OR_INT_LIT16: |
| 1742 | case OP_XOR_INT_LIT8: |
| 1743 | case OP_XOR_INT_LIT16: |
| 1744 | loadValue(cUnit, vSrc, r0); |
| 1745 | loadConstant(cUnit, r1, lit); |
| 1746 | switch (dalvikOpCode) { |
| 1747 | case OP_MUL_INT_LIT8: |
| 1748 | case OP_MUL_INT_LIT16: |
| 1749 | armOp = ARMV5TE_MUL; |
| 1750 | break; |
| 1751 | case OP_AND_INT_LIT8: |
| 1752 | case OP_AND_INT_LIT16: |
| 1753 | armOp = ARMV5TE_AND_RR; |
| 1754 | break; |
| 1755 | case OP_OR_INT_LIT8: |
| 1756 | case OP_OR_INT_LIT16: |
| 1757 | armOp = ARMV5TE_ORR; |
| 1758 | break; |
| 1759 | case OP_XOR_INT_LIT8: |
| 1760 | case OP_XOR_INT_LIT16: |
| 1761 | armOp = ARMV5TE_EOR; |
| 1762 | break; |
| 1763 | default: |
| 1764 | dvmAbort(); |
| 1765 | } |
| 1766 | genBinaryOp(cUnit, vDest, armOp); |
| 1767 | break; |
| 1768 | |
| 1769 | case OP_SHL_INT_LIT8: |
| 1770 | case OP_SHR_INT_LIT8: |
| 1771 | case OP_USHR_INT_LIT8: |
| 1772 | loadValue(cUnit, vSrc, r0); |
| 1773 | switch (dalvikOpCode) { |
| 1774 | case OP_SHL_INT_LIT8: |
| 1775 | armOp = ARMV5TE_LSL; |
| 1776 | break; |
| 1777 | case OP_SHR_INT_LIT8: |
| 1778 | armOp = ARMV5TE_ASR; |
| 1779 | break; |
| 1780 | case OP_USHR_INT_LIT8: |
| 1781 | armOp = ARMV5TE_LSR; |
| 1782 | break; |
| 1783 | default: dvmAbort(); |
| 1784 | } |
| 1785 | newLIR3(cUnit, armOp, r0, r0, lit); |
| 1786 | storeValue(cUnit, r0, vDest, r1); |
| 1787 | break; |
| 1788 | |
| 1789 | case OP_DIV_INT_LIT8: |
| 1790 | case OP_DIV_INT_LIT16: |
| 1791 | if (lit == 0) { |
| 1792 | /* Let the interpreter deal with div by 0 */ |
| 1793 | genInterpSingleStep(cUnit, mir); |
| 1794 | return false; |
| 1795 | } |
| 1796 | loadConstant(cUnit, r2, (int)__aeabi_idiv); |
| 1797 | loadConstant(cUnit, r1, lit); |
| 1798 | loadValue(cUnit, vSrc, r0); |
| 1799 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 1800 | storeValue(cUnit, r0, vDest, r2); |
| 1801 | break; |
| 1802 | |
| 1803 | case OP_REM_INT_LIT8: |
| 1804 | case OP_REM_INT_LIT16: |
| 1805 | if (lit == 0) { |
| 1806 | /* Let the interpreter deal with div by 0 */ |
| 1807 | genInterpSingleStep(cUnit, mir); |
| 1808 | return false; |
| 1809 | } |
| 1810 | loadConstant(cUnit, r2, (int)__aeabi_idivmod); |
| 1811 | loadConstant(cUnit, r1, lit); |
| 1812 | loadValue(cUnit, vSrc, r0); |
| 1813 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 1814 | storeValue(cUnit, r1, vDest, r2); |
| 1815 | break; |
| 1816 | default: |
| 1817 | return true; |
| 1818 | } |
| 1819 | return false; |
| 1820 | } |
| 1821 | |
| 1822 | static bool handleFmt22c(CompilationUnit *cUnit, MIR *mir) |
| 1823 | { |
| 1824 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1825 | int fieldOffset; |
| 1826 | |
| 1827 | if (dalvikOpCode >= OP_IGET && dalvikOpCode <= OP_IPUT_SHORT) { |
| 1828 | InstField *pInstField = (InstField *) |
| 1829 | cUnit->method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vC]; |
| 1830 | int fieldOffset; |
| 1831 | |
| 1832 | assert(pInstField != NULL); |
| 1833 | fieldOffset = pInstField->byteOffset; |
| 1834 | } else { |
| 1835 | /* To make the compiler happy */ |
| 1836 | fieldOffset = 0; |
| 1837 | } |
| 1838 | switch (dalvikOpCode) { |
| 1839 | /* |
| 1840 | * TODO: I may be assuming too much here. |
| 1841 | * Verify what is known at JIT time. |
| 1842 | */ |
| 1843 | case OP_NEW_ARRAY: { |
| 1844 | void *classPtr = (void*) |
| 1845 | (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]); |
| 1846 | assert(classPtr != NULL); |
| 1847 | loadValue(cUnit, mir->dalvikInsn.vB, r1); /* Len */ |
| 1848 | loadConstant(cUnit, r0, (int) classPtr ); |
| 1849 | loadConstant(cUnit, r4PC, (int)dvmAllocArrayByClass); |
| 1850 | Armv5teLIR *pcrLabel = |
| 1851 | genRegImmCheck(cUnit, ARM_COND_MI, r1, 0, mir->offset, NULL); |
| 1852 | genExportPC(cUnit, mir, r2, r3 ); |
| 1853 | newLIR2(cUnit, ARMV5TE_MOV_IMM,r2,ALLOC_DONT_TRACK); |
| 1854 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 1855 | /* |
| 1856 | * TODO: As coded, we'll bail and reinterpret on alloc failure. |
| 1857 | * Need a general mechanism to bail to thrown exception code. |
| 1858 | */ |
| 1859 | genNullCheck(cUnit, r0, mir->offset, pcrLabel); |
| 1860 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1861 | break; |
| 1862 | } |
| 1863 | /* |
| 1864 | * TODO: I may be assuming too much here. |
| 1865 | * Verify what is known at JIT time. |
| 1866 | */ |
| 1867 | case OP_INSTANCE_OF: { |
| 1868 | ClassObject *classPtr = |
| 1869 | (cUnit->method->clazz->pDvmDex->pResClasses[mir->dalvikInsn.vC]); |
| 1870 | assert(classPtr != NULL); |
| 1871 | loadValue(cUnit, mir->dalvikInsn.vB, r1); /* Ref */ |
| 1872 | loadConstant(cUnit, r2, (int) classPtr ); |
| 1873 | loadConstant(cUnit, r0, 1); /* Assume true */ |
| 1874 | newLIR2(cUnit, ARMV5TE_CMP_RI8, r1, 0); /* Null? */ |
| 1875 | Armv5teLIR *branch1 = newLIR2(cUnit, ARMV5TE_B_COND, 4, |
| 1876 | ARM_COND_EQ); |
| 1877 | /* r1 now contains object->clazz */ |
| 1878 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r1, r1, |
| 1879 | offsetof(Object, clazz) >> 2); |
| 1880 | loadConstant(cUnit, r4PC, (int)dvmInstanceofNonTrivial); |
| 1881 | newLIR2(cUnit, ARMV5TE_CMP_RR, r1, r2); |
| 1882 | Armv5teLIR *branch2 = newLIR2(cUnit, ARMV5TE_B_COND, 2, |
| 1883 | ARM_COND_EQ); |
| 1884 | newLIR2(cUnit, ARMV5TE_MOV_RR, r0, r1); |
| 1885 | newLIR2(cUnit, ARMV5TE_MOV_RR, r1, r2); |
| 1886 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 1887 | /* branch target here */ |
| 1888 | Armv5teLIR *target = newLIR0(cUnit, ARMV5TE_PSEUDO_TARGET_LABEL); |
| 1889 | storeValue(cUnit, r0, mir->dalvikInsn.vA, r1); |
| 1890 | branch1->generic.target = (LIR *)target; |
| 1891 | branch2->generic.target = (LIR *)target; |
| 1892 | break; |
| 1893 | } |
| 1894 | case OP_IGET_WIDE: |
| 1895 | genIGetWide(cUnit, mir, fieldOffset); |
| 1896 | break; |
| 1897 | case OP_IGET: |
| 1898 | case OP_IGET_OBJECT: |
| 1899 | genIGet(cUnit, mir, ARMV5TE_LDR_RRR, fieldOffset); |
| 1900 | break; |
| 1901 | case OP_IGET_BOOLEAN: |
| 1902 | genIGet(cUnit, mir, ARMV5TE_LDRB_RRR, fieldOffset); |
| 1903 | break; |
| 1904 | case OP_IGET_BYTE: |
| 1905 | genIGet(cUnit, mir, ARMV5TE_LDRSB_RRR, fieldOffset); |
| 1906 | break; |
| 1907 | case OP_IGET_CHAR: |
| 1908 | genIGet(cUnit, mir, ARMV5TE_LDRH_RRR, fieldOffset); |
| 1909 | break; |
| 1910 | case OP_IGET_SHORT: |
| 1911 | genIGet(cUnit, mir, ARMV5TE_LDRSH_RRR, fieldOffset); |
| 1912 | break; |
| 1913 | case OP_IPUT_WIDE: |
| 1914 | genIPutWide(cUnit, mir, fieldOffset); |
| 1915 | break; |
| 1916 | case OP_IPUT: |
| 1917 | case OP_IPUT_OBJECT: |
| 1918 | genIPut(cUnit, mir, ARMV5TE_STR_RRR, fieldOffset); |
| 1919 | break; |
| 1920 | case OP_IPUT_SHORT: |
| 1921 | case OP_IPUT_CHAR: |
| 1922 | genIPut(cUnit, mir, ARMV5TE_STRH_RRR, fieldOffset); |
| 1923 | break; |
| 1924 | case OP_IPUT_BYTE: |
| 1925 | case OP_IPUT_BOOLEAN: |
| 1926 | genIPut(cUnit, mir, ARMV5TE_STRB_RRR, fieldOffset); |
| 1927 | break; |
| 1928 | default: |
| 1929 | return true; |
| 1930 | } |
| 1931 | return false; |
| 1932 | } |
| 1933 | |
| 1934 | static bool handleFmt22cs(CompilationUnit *cUnit, MIR *mir) |
| 1935 | { |
| 1936 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1937 | int fieldOffset = mir->dalvikInsn.vC; |
| 1938 | switch (dalvikOpCode) { |
| 1939 | case OP_IGET_QUICK: |
| 1940 | case OP_IGET_OBJECT_QUICK: |
| 1941 | genIGet(cUnit, mir, ARMV5TE_LDR_RRR, fieldOffset); |
| 1942 | break; |
| 1943 | case OP_IPUT_QUICK: |
| 1944 | case OP_IPUT_OBJECT_QUICK: |
| 1945 | genIPut(cUnit, mir, ARMV5TE_STR_RRR, fieldOffset); |
| 1946 | break; |
| 1947 | case OP_IGET_WIDE_QUICK: |
| 1948 | genIGetWide(cUnit, mir, fieldOffset); |
| 1949 | break; |
| 1950 | case OP_IPUT_WIDE_QUICK: |
| 1951 | genIPutWide(cUnit, mir, fieldOffset); |
| 1952 | break; |
| 1953 | default: |
| 1954 | return true; |
| 1955 | } |
| 1956 | return false; |
| 1957 | |
| 1958 | } |
| 1959 | |
| 1960 | /* Compare agaist zero */ |
| 1961 | static bool handleFmt22t(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb, |
| 1962 | Armv5teLIR *labelList) |
| 1963 | { |
| 1964 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 1965 | Armv5teConditionCode cond; |
| 1966 | |
| 1967 | loadValue(cUnit, mir->dalvikInsn.vA, r0); |
| 1968 | loadValue(cUnit, mir->dalvikInsn.vB, r1); |
| 1969 | newLIR2(cUnit, ARMV5TE_CMP_RR, r0, r1); |
| 1970 | |
| 1971 | switch (dalvikOpCode) { |
| 1972 | case OP_IF_EQ: |
| 1973 | cond = ARM_COND_EQ; |
| 1974 | break; |
| 1975 | case OP_IF_NE: |
| 1976 | cond = ARM_COND_NE; |
| 1977 | break; |
| 1978 | case OP_IF_LT: |
| 1979 | cond = ARM_COND_LT; |
| 1980 | break; |
| 1981 | case OP_IF_GE: |
| 1982 | cond = ARM_COND_GE; |
| 1983 | break; |
| 1984 | case OP_IF_GT: |
| 1985 | cond = ARM_COND_GT; |
| 1986 | break; |
| 1987 | case OP_IF_LE: |
| 1988 | cond = ARM_COND_LE; |
| 1989 | break; |
| 1990 | default: |
| 1991 | cond = 0; |
| 1992 | LOGE("Unexpected opcode (%d) for Fmt22t\n", dalvikOpCode); |
| 1993 | dvmAbort(); |
| 1994 | } |
| 1995 | genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]); |
| 1996 | /* This mostly likely will be optimized away in a later phase */ |
| 1997 | genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]); |
| 1998 | return false; |
| 1999 | } |
| 2000 | |
| 2001 | static bool handleFmt22x_Fmt32x(CompilationUnit *cUnit, MIR *mir) |
| 2002 | { |
| 2003 | OpCode opCode = mir->dalvikInsn.opCode; |
| 2004 | int vSrc1Dest = mir->dalvikInsn.vA; |
| 2005 | int vSrc2 = mir->dalvikInsn.vB; |
| 2006 | |
| 2007 | switch (opCode) { |
| 2008 | case OP_MOVE_16: |
| 2009 | case OP_MOVE_OBJECT_16: |
| 2010 | case OP_MOVE_FROM16: |
| 2011 | case OP_MOVE_OBJECT_FROM16: |
| 2012 | loadValue(cUnit, vSrc2, r0); |
| 2013 | storeValue(cUnit, r0, vSrc1Dest, r1); |
| 2014 | break; |
| 2015 | case OP_MOVE_WIDE_16: |
| 2016 | case OP_MOVE_WIDE_FROM16: |
| 2017 | loadValuePair(cUnit, vSrc2, r0, r1); |
| 2018 | storeValuePair(cUnit, r0, r1, vSrc1Dest, r2); |
| 2019 | break; |
| 2020 | default: |
| 2021 | return true; |
| 2022 | } |
| 2023 | return false; |
| 2024 | } |
| 2025 | |
| 2026 | static bool handleFmt23x(CompilationUnit *cUnit, MIR *mir) |
| 2027 | { |
| 2028 | OpCode opCode = mir->dalvikInsn.opCode; |
| 2029 | int vA = mir->dalvikInsn.vA; |
| 2030 | int vB = mir->dalvikInsn.vB; |
| 2031 | int vC = mir->dalvikInsn.vC; |
| 2032 | |
| 2033 | if ( (opCode >= OP_ADD_INT) && (opCode <= OP_REM_DOUBLE)) { |
| 2034 | return genArithOp( cUnit, mir ); |
| 2035 | } |
| 2036 | |
| 2037 | switch (opCode) { |
| 2038 | case OP_CMP_LONG: |
| 2039 | loadValuePair(cUnit,vB, r0, r1); |
| 2040 | loadValuePair(cUnit, vC, r2, r3); |
| 2041 | genDispatchToHandler(cUnit, TEMPLATE_CMP_LONG); |
| 2042 | storeValue(cUnit, r0, vA, r1); |
| 2043 | break; |
| 2044 | case OP_CMPL_FLOAT: |
| 2045 | loadValue(cUnit, vB, r0); |
| 2046 | loadValue(cUnit, vC, r1); |
| 2047 | genDispatchToHandler(cUnit, TEMPLATE_CMPL_FLOAT); |
| 2048 | storeValue(cUnit, r0, vA, r1); |
| 2049 | break; |
| 2050 | case OP_CMPG_FLOAT: |
| 2051 | loadValue(cUnit, vB, r0); |
| 2052 | loadValue(cUnit, vC, r1); |
| 2053 | genDispatchToHandler(cUnit, TEMPLATE_CMPG_FLOAT); |
| 2054 | storeValue(cUnit, r0, vA, r1); |
| 2055 | break; |
| 2056 | case OP_CMPL_DOUBLE: |
| 2057 | loadValueAddress(cUnit, vB, r0); |
| 2058 | loadValueAddress(cUnit, vC, r1); |
| 2059 | genDispatchToHandler(cUnit, TEMPLATE_CMPL_DOUBLE); |
| 2060 | storeValue(cUnit, r0, vA, r1); |
| 2061 | break; |
| 2062 | case OP_CMPG_DOUBLE: |
| 2063 | loadValueAddress(cUnit, vB, r0); |
| 2064 | loadValueAddress(cUnit, vC, r1); |
| 2065 | genDispatchToHandler(cUnit, TEMPLATE_CMPG_DOUBLE); |
| 2066 | storeValue(cUnit, r0, vA, r1); |
| 2067 | break; |
| 2068 | case OP_AGET_WIDE: |
| 2069 | genArrayGet(cUnit, mir, ARMV5TE_LDR_RRR, vB, vC, vA, 3); |
| 2070 | break; |
| 2071 | case OP_AGET: |
| 2072 | case OP_AGET_OBJECT: |
| 2073 | genArrayGet(cUnit, mir, ARMV5TE_LDR_RRR, vB, vC, vA, 2); |
| 2074 | break; |
| 2075 | case OP_AGET_BOOLEAN: |
| 2076 | genArrayGet(cUnit, mir, ARMV5TE_LDRB_RRR, vB, vC, vA, 0); |
| 2077 | break; |
| 2078 | case OP_AGET_BYTE: |
| 2079 | genArrayGet(cUnit, mir, ARMV5TE_LDRSB_RRR, vB, vC, vA, 0); |
| 2080 | break; |
| 2081 | case OP_AGET_CHAR: |
| 2082 | genArrayGet(cUnit, mir, ARMV5TE_LDRH_RRR, vB, vC, vA, 1); |
| 2083 | break; |
| 2084 | case OP_AGET_SHORT: |
| 2085 | genArrayGet(cUnit, mir, ARMV5TE_LDRSH_RRR, vB, vC, vA, 1); |
| 2086 | break; |
| 2087 | case OP_APUT_WIDE: |
| 2088 | genArrayPut(cUnit, mir, ARMV5TE_STR_RRR, vB, vC, vA, 3); |
| 2089 | break; |
| 2090 | case OP_APUT: |
| 2091 | case OP_APUT_OBJECT: |
| 2092 | genArrayPut(cUnit, mir, ARMV5TE_STR_RRR, vB, vC, vA, 2); |
| 2093 | break; |
| 2094 | case OP_APUT_SHORT: |
| 2095 | case OP_APUT_CHAR: |
| 2096 | genArrayPut(cUnit, mir, ARMV5TE_STRH_RRR, vB, vC, vA, 1); |
| 2097 | break; |
| 2098 | case OP_APUT_BYTE: |
| 2099 | case OP_APUT_BOOLEAN: |
| 2100 | genArrayPut(cUnit, mir, ARMV5TE_STRB_RRR, vB, vC, vA, 0); |
| 2101 | break; |
| 2102 | default: |
| 2103 | return true; |
| 2104 | } |
| 2105 | return false; |
| 2106 | } |
| 2107 | |
| 2108 | static bool handleFmt31t(CompilationUnit *cUnit, MIR *mir) |
| 2109 | { |
| 2110 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 2111 | switch (dalvikOpCode) { |
| 2112 | case OP_FILL_ARRAY_DATA: { |
| 2113 | loadConstant(cUnit, r4PC, (int)dvmInterpHandleFillArrayData); |
| 2114 | loadValue(cUnit, mir->dalvikInsn.vA, r0); |
| 2115 | loadConstant(cUnit, r1, (mir->dalvikInsn.vB << 1) + |
| 2116 | (int) (cUnit->method->insns + mir->offset)); |
| 2117 | genExportPC(cUnit, mir, r2, r3 ); |
| 2118 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 2119 | genNullCheck(cUnit, r0, mir->offset, NULL); |
| 2120 | break; |
| 2121 | } |
| 2122 | /* |
| 2123 | * TODO |
| 2124 | * - Add a 1 to 3-entry per-location cache here to completely |
| 2125 | * bypass the dvmInterpHandle[Packed/Sparse]Switch call w/ chaining |
| 2126 | * - Use out-of-line handlers for both of these |
| 2127 | */ |
| 2128 | case OP_PACKED_SWITCH: |
| 2129 | case OP_SPARSE_SWITCH: { |
| 2130 | if (dalvikOpCode == OP_PACKED_SWITCH) { |
| 2131 | loadConstant(cUnit, r4PC, (int)dvmInterpHandlePackedSwitch); |
| 2132 | } else { |
| 2133 | loadConstant(cUnit, r4PC, (int)dvmInterpHandleSparseSwitch); |
| 2134 | } |
| 2135 | loadValue(cUnit, mir->dalvikInsn.vA, r1); |
| 2136 | loadConstant(cUnit, r0, (mir->dalvikInsn.vB << 1) + |
| 2137 | (int) (cUnit->method->insns + mir->offset)); |
| 2138 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 2139 | loadConstant(cUnit, r1, (int)(cUnit->method->insns + mir->offset)); |
| 2140 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r2, rGLUE, |
| 2141 | offsetof(InterpState, jitToInterpEntries.dvmJitToInterpNoChain) |
| 2142 | >> 2); |
| 2143 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r0, r0, r0); |
| 2144 | newLIR3(cUnit, ARMV5TE_ADD_RRR, r4PC, r0, r1); |
| 2145 | newLIR1(cUnit, ARMV5TE_BLX_R, r2); |
| 2146 | break; |
| 2147 | } |
| 2148 | default: |
| 2149 | return true; |
| 2150 | } |
| 2151 | return false; |
| 2152 | } |
| 2153 | |
| 2154 | static bool handleFmt35c_3rc(CompilationUnit *cUnit, MIR *mir, BasicBlock *bb, |
| 2155 | Armv5teLIR *labelList) |
| 2156 | { |
| 2157 | Armv5teLIR *retChainingCell = &labelList[bb->fallThrough->id]; |
| 2158 | Armv5teLIR *pcrLabel = NULL; |
| 2159 | |
| 2160 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 2161 | switch (mir->dalvikInsn.opCode) { |
| 2162 | /* |
| 2163 | * calleeMethod = this->clazz->vtable[ |
| 2164 | * method->clazz->pDvmDex->pResMethods[BBBB]->methodIndex |
| 2165 | * ] |
| 2166 | */ |
| 2167 | case OP_INVOKE_VIRTUAL: |
| 2168 | case OP_INVOKE_VIRTUAL_RANGE: { |
| 2169 | int methodIndex = |
| 2170 | cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]-> |
| 2171 | methodIndex; |
| 2172 | |
| 2173 | if (mir->dalvikInsn.opCode == OP_INVOKE_VIRTUAL) |
| 2174 | genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel); |
| 2175 | else |
| 2176 | genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel); |
| 2177 | |
| 2178 | /* r0 now contains this->clazz */ |
| 2179 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, |
| 2180 | offsetof(Object, clazz) >> 2); |
| 2181 | /* r1 = &retChainingCell */ |
| 2182 | Armv5teLIR *addrRetChain = newLIR2(cUnit, ARMV5TE_ADD_PC_REL, |
| 2183 | r1, 0); |
| 2184 | /* r4PC = dalvikCallsite */ |
| 2185 | loadConstant(cUnit, r4PC, |
| 2186 | (int) (cUnit->method->insns + mir->offset)); |
| 2187 | |
| 2188 | /* r0 now contains this->clazz->vtable */ |
| 2189 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, |
| 2190 | offsetof(ClassObject, vtable) >> 2); |
| 2191 | addrRetChain->generic.target = (LIR *) retChainingCell; |
| 2192 | |
| 2193 | if (methodIndex < 32) { |
| 2194 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, methodIndex); |
| 2195 | } else { |
| 2196 | loadConstant(cUnit, r7, methodIndex<<2); |
| 2197 | newLIR3(cUnit, ARMV5TE_LDR_RRR, r0, r0, r7); |
| 2198 | } |
| 2199 | |
| 2200 | /* |
| 2201 | * r0 = calleeMethod, |
| 2202 | * r1 = &ChainingCell, |
| 2203 | * r4PC = callsiteDPC, |
| 2204 | */ |
| 2205 | genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT); |
| 2206 | #if defined(INVOKE_STATS) |
| 2207 | gDvmJit.invokeNoOpt++; |
| 2208 | #endif |
| 2209 | /* Handle exceptions using the interpreter */ |
| 2210 | genTrap(cUnit, mir->offset, pcrLabel); |
| 2211 | break; |
| 2212 | } |
| 2213 | /* |
| 2214 | * calleeMethod = method->clazz->super->vtable[method->clazz->pDvmDex |
| 2215 | * ->pResMethods[BBBB]->methodIndex] |
| 2216 | */ |
| 2217 | /* TODO - not excersized in RunPerf.jar */ |
| 2218 | case OP_INVOKE_SUPER: |
| 2219 | case OP_INVOKE_SUPER_RANGE: { |
| 2220 | int mIndex = cUnit->method->clazz->pDvmDex-> |
| 2221 | pResMethods[dInsn->vB]->methodIndex; |
| 2222 | const Method *calleeMethod = |
| 2223 | cUnit->method->clazz->super->vtable[mIndex]; |
| 2224 | |
| 2225 | if (mir->dalvikInsn.opCode == OP_INVOKE_SUPER) |
| 2226 | genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel); |
| 2227 | else |
| 2228 | genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel); |
| 2229 | |
| 2230 | /* r0 = calleeMethod */ |
| 2231 | loadConstant(cUnit, r0, (int) calleeMethod); |
| 2232 | |
| 2233 | genInvokeCommon(cUnit, mir, bb, labelList, pcrLabel, |
| 2234 | calleeMethod); |
| 2235 | break; |
| 2236 | } |
| 2237 | /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */ |
| 2238 | case OP_INVOKE_DIRECT: |
| 2239 | case OP_INVOKE_DIRECT_RANGE: { |
| 2240 | const Method *calleeMethod = |
| 2241 | cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]; |
| 2242 | |
| 2243 | if (mir->dalvikInsn.opCode == OP_INVOKE_DIRECT) |
| 2244 | genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel); |
| 2245 | else |
| 2246 | genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel); |
| 2247 | |
| 2248 | /* r0 = calleeMethod */ |
| 2249 | loadConstant(cUnit, r0, (int) calleeMethod); |
| 2250 | |
| 2251 | genInvokeCommon(cUnit, mir, bb, labelList, pcrLabel, |
| 2252 | calleeMethod); |
| 2253 | break; |
| 2254 | } |
| 2255 | /* calleeMethod = method->clazz->pDvmDex->pResMethods[BBBB] */ |
| 2256 | case OP_INVOKE_STATIC: |
| 2257 | case OP_INVOKE_STATIC_RANGE: { |
| 2258 | const Method *calleeMethod = |
| 2259 | cUnit->method->clazz->pDvmDex->pResMethods[dInsn->vB]; |
| 2260 | |
| 2261 | if (mir->dalvikInsn.opCode == OP_INVOKE_STATIC) |
| 2262 | genProcessArgsNoRange(cUnit, mir, dInsn, |
| 2263 | NULL /* no null check */); |
| 2264 | else |
| 2265 | genProcessArgsRange(cUnit, mir, dInsn, |
| 2266 | NULL /* no null check */); |
| 2267 | |
| 2268 | /* r0 = calleeMethod */ |
| 2269 | loadConstant(cUnit, r0, (int) calleeMethod); |
| 2270 | |
| 2271 | genInvokeCommon(cUnit, mir, bb, labelList, pcrLabel, |
| 2272 | calleeMethod); |
| 2273 | break; |
| 2274 | } |
| 2275 | /* |
| 2276 | * calleeMethod = dvmFindInterfaceMethodInCache(this->clazz, |
| 2277 | * BBBB, method, method->clazz->pDvmDex) |
| 2278 | */ |
| 2279 | case OP_INVOKE_INTERFACE: |
| 2280 | case OP_INVOKE_INTERFACE_RANGE: { |
| 2281 | int methodIndex = dInsn->vB; |
| 2282 | |
| 2283 | if (mir->dalvikInsn.opCode == OP_INVOKE_INTERFACE) |
| 2284 | genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel); |
| 2285 | else |
| 2286 | genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel); |
| 2287 | |
| 2288 | /* r0 now contains this->clazz */ |
| 2289 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, |
| 2290 | offsetof(Object, clazz) >> 2); |
| 2291 | |
| 2292 | /* r1 = BBBB */ |
| 2293 | loadConstant(cUnit, r1, dInsn->vB); |
| 2294 | |
| 2295 | /* r2 = method (caller) */ |
| 2296 | loadConstant(cUnit, r2, (int) cUnit->method); |
| 2297 | |
| 2298 | /* r3 = pDvmDex */ |
| 2299 | loadConstant(cUnit, r3, (int) cUnit->method->clazz->pDvmDex); |
| 2300 | |
| 2301 | loadConstant(cUnit, r7, |
| 2302 | (intptr_t) dvmFindInterfaceMethodInCache); |
| 2303 | newLIR1(cUnit, ARMV5TE_BLX_R, r7); |
| 2304 | |
| 2305 | /* r0 = calleeMethod (returned from dvmFindInterfaceMethodInCache */ |
| 2306 | |
| 2307 | /* r1 = &retChainingCell */ |
| 2308 | Armv5teLIR *addrRetChain = newLIR2(cUnit, ARMV5TE_ADD_PC_REL, |
| 2309 | r1, 0); |
| 2310 | /* r4PC = dalvikCallsite */ |
| 2311 | loadConstant(cUnit, r4PC, |
| 2312 | (int) (cUnit->method->insns + mir->offset)); |
| 2313 | |
| 2314 | addrRetChain->generic.target = (LIR *) retChainingCell; |
| 2315 | /* |
| 2316 | * r0 = this, r1 = calleeMethod, |
| 2317 | * r1 = &ChainingCell, |
| 2318 | * r4PC = callsiteDPC, |
| 2319 | */ |
| 2320 | genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT); |
| 2321 | #if defined(INVOKE_STATS) |
| 2322 | gDvmJit.invokeNoOpt++; |
| 2323 | #endif |
| 2324 | /* Handle exceptions using the interpreter */ |
| 2325 | genTrap(cUnit, mir->offset, pcrLabel); |
| 2326 | break; |
| 2327 | } |
| 2328 | /* NOP */ |
| 2329 | case OP_INVOKE_DIRECT_EMPTY: { |
| 2330 | return false; |
| 2331 | } |
| 2332 | case OP_FILLED_NEW_ARRAY: |
| 2333 | case OP_FILLED_NEW_ARRAY_RANGE: { |
| 2334 | /* Just let the interpreter deal with these */ |
| 2335 | genInterpSingleStep(cUnit, mir); |
| 2336 | break; |
| 2337 | } |
| 2338 | default: |
| 2339 | return true; |
| 2340 | } |
| 2341 | return false; |
| 2342 | } |
| 2343 | |
| 2344 | static bool handleFmt35ms_3rms(CompilationUnit *cUnit, MIR *mir, |
| 2345 | BasicBlock *bb, Armv5teLIR *labelList) |
| 2346 | { |
| 2347 | Armv5teLIR *retChainingCell = &labelList[bb->fallThrough->id]; |
| 2348 | Armv5teLIR *pcrLabel = NULL; |
| 2349 | |
| 2350 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 2351 | switch (mir->dalvikInsn.opCode) { |
| 2352 | /* calleeMethod = this->clazz->vtable[BBBB] */ |
| 2353 | case OP_INVOKE_VIRTUAL_QUICK_RANGE: |
| 2354 | case OP_INVOKE_VIRTUAL_QUICK: { |
| 2355 | int methodIndex = dInsn->vB; |
| 2356 | if (mir->dalvikInsn.opCode == OP_INVOKE_VIRTUAL_QUICK) |
| 2357 | genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel); |
| 2358 | else |
| 2359 | genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel); |
| 2360 | |
| 2361 | /* r0 now contains this->clazz */ |
| 2362 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, |
| 2363 | offsetof(Object, clazz) >> 2); |
| 2364 | /* r1 = &retChainingCell */ |
| 2365 | Armv5teLIR *addrRetChain = newLIR2(cUnit, ARMV5TE_ADD_PC_REL, |
| 2366 | r1, 0); |
| 2367 | /* r4PC = dalvikCallsite */ |
| 2368 | loadConstant(cUnit, r4PC, |
| 2369 | (int) (cUnit->method->insns + mir->offset)); |
| 2370 | |
| 2371 | /* r0 now contains this->clazz->vtable */ |
| 2372 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, |
| 2373 | offsetof(ClassObject, vtable) >> 2); |
| 2374 | addrRetChain->generic.target = (LIR *) retChainingCell; |
| 2375 | |
| 2376 | if (methodIndex < 32) { |
| 2377 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, r0, methodIndex); |
| 2378 | } else { |
| 2379 | loadConstant(cUnit, r7, methodIndex<<2); |
| 2380 | newLIR3(cUnit, ARMV5TE_LDR_RRR, r0, r0, r7); |
| 2381 | } |
| 2382 | |
| 2383 | /* |
| 2384 | * r0 = calleeMethod, |
| 2385 | * r1 = &ChainingCell, |
| 2386 | * r4PC = callsiteDPC, |
| 2387 | */ |
| 2388 | genDispatchToHandler(cUnit, TEMPLATE_INVOKE_METHOD_NO_OPT); |
| 2389 | #if defined(INVOKE_STATS) |
| 2390 | gDvmJit.invokeNoOpt++; |
| 2391 | #endif |
| 2392 | break; |
| 2393 | } |
| 2394 | /* calleeMethod = method->clazz->super->vtable[BBBB] */ |
| 2395 | case OP_INVOKE_SUPER_QUICK: |
| 2396 | case OP_INVOKE_SUPER_QUICK_RANGE: { |
| 2397 | const Method *calleeMethod = |
| 2398 | cUnit->method->clazz->super->vtable[dInsn->vB]; |
| 2399 | |
| 2400 | if (mir->dalvikInsn.opCode == OP_INVOKE_SUPER_QUICK) |
| 2401 | genProcessArgsNoRange(cUnit, mir, dInsn, &pcrLabel); |
| 2402 | else |
| 2403 | genProcessArgsRange(cUnit, mir, dInsn, &pcrLabel); |
| 2404 | |
| 2405 | /* r0 = calleeMethod */ |
| 2406 | loadConstant(cUnit, r0, (int) calleeMethod); |
| 2407 | |
| 2408 | genInvokeCommon(cUnit, mir, bb, labelList, pcrLabel, |
| 2409 | calleeMethod); |
| 2410 | break; |
| 2411 | } |
| 2412 | /* calleeMethod = method->clazz->super->vtable[BBBB] */ |
| 2413 | default: |
| 2414 | return true; |
| 2415 | } |
| 2416 | /* Handle exceptions using the interpreter */ |
| 2417 | genTrap(cUnit, mir->offset, pcrLabel); |
| 2418 | return false; |
| 2419 | } |
| 2420 | |
| 2421 | /* |
| 2422 | * NOTE: We assume here that the special native inline routines |
| 2423 | * are side-effect free. By making this assumption, we can safely |
| 2424 | * re-execute the routine from the interpreter if it decides it |
| 2425 | * wants to throw an exception. We still need to EXPORT_PC(), though. |
| 2426 | */ |
| 2427 | static bool handleFmt3inline(CompilationUnit *cUnit, MIR *mir) |
| 2428 | { |
| 2429 | DecodedInstruction *dInsn = &mir->dalvikInsn; |
| 2430 | switch( mir->dalvikInsn.opCode) { |
| 2431 | case OP_EXECUTE_INLINE: { |
| 2432 | unsigned int i; |
| 2433 | const InlineOperation* inLineTable = dvmGetInlineOpsTable(); |
| 2434 | int offset = (int) &((InterpState *) NULL)->retval; |
| 2435 | int operation = dInsn->vB; |
| 2436 | |
| 2437 | if (!strcmp(inLineTable[operation].classDescriptor, |
| 2438 | "Ljava/lang/String;") && |
| 2439 | !strcmp(inLineTable[operation].methodName, |
| 2440 | "length") && |
| 2441 | !strcmp(inLineTable[operation].methodSignature, |
| 2442 | "()I")) { |
| 2443 | return genInlinedStringLength(cUnit,mir); |
| 2444 | } |
| 2445 | |
| 2446 | /* Materialize pointer to retval & push */ |
| 2447 | newLIR2(cUnit, ARMV5TE_MOV_RR, r4PC, rGLUE); |
| 2448 | newLIR2(cUnit, ARMV5TE_ADD_RI8, r4PC, offset); |
| 2449 | /* Push r4 and (just to take up space) r5) */ |
| 2450 | newLIR1(cUnit, ARMV5TE_PUSH, (1<<r4PC | 1<<rFP)); |
| 2451 | |
| 2452 | /* Get code pointer to inline routine */ |
| 2453 | loadConstant(cUnit, r4PC, (int)inLineTable[operation].func); |
| 2454 | |
| 2455 | /* Export PC */ |
| 2456 | genExportPC(cUnit, mir, r0, r1 ); |
| 2457 | |
| 2458 | /* Load arguments to r0 through r3 as applicable */ |
| 2459 | for (i=0; i < dInsn->vA; i++) { |
| 2460 | loadValue(cUnit, dInsn->arg[i], i); |
| 2461 | } |
| 2462 | /* Call inline routine */ |
| 2463 | newLIR1(cUnit, ARMV5TE_BLX_R, r4PC); |
| 2464 | |
| 2465 | /* Strip frame */ |
| 2466 | newLIR1(cUnit, ARMV5TE_ADD_SPI7, 2); |
| 2467 | |
| 2468 | /* Did we throw? If so, redo under interpreter*/ |
| 2469 | genNullCheck(cUnit, r0, mir->offset, NULL); |
| 2470 | |
| 2471 | break; |
| 2472 | } |
| 2473 | default: |
| 2474 | return true; |
| 2475 | } |
| 2476 | return false; |
| 2477 | } |
| 2478 | |
| 2479 | static bool handleFmt51l(CompilationUnit *cUnit, MIR *mir) |
| 2480 | { |
| 2481 | loadConstant(cUnit, r0, mir->dalvikInsn.vB_wide & 0xFFFFFFFFUL); |
| 2482 | loadConstant(cUnit, r1, (mir->dalvikInsn.vB_wide>>32) & 0xFFFFFFFFUL); |
| 2483 | storeValuePair(cUnit, r0, r1, mir->dalvikInsn.vA, r2); |
| 2484 | return false; |
| 2485 | } |
| 2486 | |
| 2487 | /*****************************************************************************/ |
| 2488 | /* |
| 2489 | * The following are special processing routines that handle transfer of |
| 2490 | * controls between compiled code and the interpreter. Certain VM states like |
| 2491 | * Dalvik PC and special-purpose registers are reconstructed here. |
| 2492 | */ |
| 2493 | |
| 2494 | /* Chaining cell for normal-ending compiles (eg branches) */ |
| 2495 | static void handleGenericChainingCell(CompilationUnit *cUnit, |
| 2496 | unsigned int offset) |
| 2497 | { |
| 2498 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, rGLUE, |
| 2499 | offsetof(InterpState, jitToInterpEntries.dvmJitToInterpNormal) >> 2); |
| 2500 | newLIR1(cUnit, ARMV5TE_BLX_R, r0); |
| 2501 | addWordData(cUnit, (int) (cUnit->method->insns + offset), true); |
| 2502 | } |
| 2503 | |
| 2504 | /* |
| 2505 | * Chaining cell for instructions that immediately following a method |
| 2506 | * invocation. |
| 2507 | */ |
| 2508 | static void handlePostInvokeChainingCell(CompilationUnit *cUnit, |
| 2509 | unsigned int offset) |
| 2510 | { |
| 2511 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, rGLUE, |
| 2512 | offsetof(InterpState, jitToInterpEntries.dvmJitToTraceSelect) >> 2); |
| 2513 | newLIR1(cUnit, ARMV5TE_BLX_R, r0); |
| 2514 | addWordData(cUnit, (int) (cUnit->method->insns + offset), true); |
| 2515 | } |
| 2516 | |
| 2517 | /* Chaining cell for monomorphic method invocations. */ |
| 2518 | static void handleInvokeChainingCell(CompilationUnit *cUnit, |
| 2519 | const Method *callee) |
| 2520 | { |
| 2521 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r0, rGLUE, |
| 2522 | offsetof(InterpState, jitToInterpEntries.dvmJitToTraceSelect) >> 2); |
| 2523 | newLIR1(cUnit, ARMV5TE_BLX_R, r0); |
| 2524 | addWordData(cUnit, (int) (callee->insns), true); |
| 2525 | } |
| 2526 | |
| 2527 | /* Load the Dalvik PC into r0 and jump to the specified target */ |
| 2528 | static void handlePCReconstruction(CompilationUnit *cUnit, |
| 2529 | Armv5teLIR *targetLabel) |
| 2530 | { |
| 2531 | Armv5teLIR **pcrLabel = |
| 2532 | (Armv5teLIR **) cUnit->pcReconstructionList.elemList; |
| 2533 | int numElems = cUnit->pcReconstructionList.numUsed; |
| 2534 | int i; |
| 2535 | for (i = 0; i < numElems; i++) { |
| 2536 | dvmCompilerAppendLIR(cUnit, (LIR *) pcrLabel[i]); |
| 2537 | /* r0 = dalvik PC */ |
| 2538 | loadConstant(cUnit, r0, pcrLabel[i]->operands[0]); |
| 2539 | genUnconditionalBranch(cUnit, targetLabel); |
| 2540 | } |
| 2541 | } |
| 2542 | |
| 2543 | /* Entry function to invoke the backend of the JIT compiler */ |
| 2544 | void dvmCompilerMIR2LIR(CompilationUnit *cUnit) |
| 2545 | { |
| 2546 | /* Used to hold the labels of each block */ |
| 2547 | Armv5teLIR *labelList = |
| 2548 | dvmCompilerNew(sizeof(Armv5teLIR) * cUnit->numBlocks, true); |
| 2549 | GrowableList chainingListByType[CHAINING_CELL_LAST]; |
| 2550 | int i; |
| 2551 | |
| 2552 | /* |
| 2553 | * Initialize the three chaining lists for generic, post-invoke, and invoke |
| 2554 | * chains. |
| 2555 | */ |
| 2556 | for (i = 0; i < CHAINING_CELL_LAST; i++) { |
| 2557 | dvmInitGrowableList(&chainingListByType[i], 2); |
| 2558 | } |
| 2559 | |
| 2560 | BasicBlock **blockList = cUnit->blockList; |
| 2561 | |
| 2562 | /* Handle the content in each basic block */ |
| 2563 | for (i = 0; i < cUnit->numBlocks; i++) { |
| 2564 | blockList[i]->visited = true; |
| 2565 | MIR *mir; |
| 2566 | |
| 2567 | labelList[i].operands[0] = blockList[i]->startOffset; |
| 2568 | |
| 2569 | if (blockList[i]->blockType >= CHAINING_CELL_LAST) { |
| 2570 | /* |
| 2571 | * Append the label pseudo LIR first. Chaining cells will be handled |
| 2572 | * separately afterwards. |
| 2573 | */ |
| 2574 | dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[i]); |
| 2575 | } |
| 2576 | |
| 2577 | if (blockList[i]->blockType == DALVIK_BYTECODE) { |
| 2578 | labelList[i].opCode = ARMV5TE_PSEUDO_NORMAL_BLOCK_LABEL; |
| 2579 | } else { |
| 2580 | switch (blockList[i]->blockType) { |
| 2581 | case CHAINING_CELL_GENERIC: |
| 2582 | labelList[i].opCode = ARMV5TE_PSEUDO_CHAINING_CELL_GENERIC; |
| 2583 | /* handle the codegen later */ |
| 2584 | dvmInsertGrowableList( |
| 2585 | &chainingListByType[CHAINING_CELL_GENERIC], (void *) i); |
| 2586 | break; |
| 2587 | case CHAINING_CELL_INVOKE: |
| 2588 | labelList[i].opCode = ARMV5TE_PSEUDO_CHAINING_CELL_INVOKE; |
| 2589 | labelList[i].operands[0] = |
| 2590 | (int) blockList[i]->containingMethod; |
| 2591 | /* handle the codegen later */ |
| 2592 | dvmInsertGrowableList( |
| 2593 | &chainingListByType[CHAINING_CELL_INVOKE], (void *) i); |
| 2594 | break; |
| 2595 | case CHAINING_CELL_POST_INVOKE: |
| 2596 | labelList[i].opCode = |
| 2597 | ARMV5TE_PSEUDO_CHAINING_CELL_POST_INVOKE; |
| 2598 | /* handle the codegen later */ |
| 2599 | dvmInsertGrowableList( |
| 2600 | &chainingListByType[CHAINING_CELL_POST_INVOKE], |
| 2601 | (void *) i); |
| 2602 | break; |
| 2603 | case PC_RECONSTRUCTION: |
| 2604 | /* Make sure exception handling block is next */ |
| 2605 | labelList[i].opCode = |
| 2606 | ARMV5TE_PSEUDO_PC_RECONSTRUCTION_BLOCK_LABEL; |
| 2607 | assert (i == cUnit->numBlocks - 2); |
| 2608 | handlePCReconstruction(cUnit, &labelList[i+1]); |
| 2609 | break; |
| 2610 | case EXCEPTION_HANDLING: |
| 2611 | labelList[i].opCode = ARMV5TE_PSEUDO_EH_BLOCK_LABEL; |
| 2612 | if (cUnit->pcReconstructionList.numUsed) { |
| 2613 | newLIR3(cUnit, ARMV5TE_LDR_RRI5, r1, rGLUE, |
| 2614 | offsetof(InterpState, |
| 2615 | jitToInterpEntries.dvmJitToInterpPunt) |
| 2616 | >> 2); |
| 2617 | newLIR1(cUnit, ARMV5TE_BLX_R, r1); |
| 2618 | } |
| 2619 | break; |
| 2620 | default: |
| 2621 | break; |
| 2622 | } |
| 2623 | continue; |
| 2624 | } |
| 2625 | for (mir = blockList[i]->firstMIRInsn; mir; mir = mir->next) { |
| 2626 | OpCode dalvikOpCode = mir->dalvikInsn.opCode; |
| 2627 | InstructionFormat dalvikFormat = |
| 2628 | dexGetInstrFormat(gDvm.instrFormat, dalvikOpCode); |
| 2629 | newLIR2(cUnit, ARMV5TE_PSEUDO_DALVIK_BYTECODE_BOUNDARY, |
| 2630 | mir->offset,dalvikOpCode); |
| 2631 | bool notHandled; |
| 2632 | /* |
| 2633 | * Debugging: screen the opcode first to see if it is in the |
| 2634 | * do[-not]-compile list |
| 2635 | */ |
| 2636 | bool singleStepMe = |
| 2637 | gDvmJit.includeSelectedOp != |
| 2638 | ((gDvmJit.opList[dalvikOpCode >> 3] & |
| 2639 | (1 << (dalvikOpCode & 0x7))) != |
| 2640 | 0); |
| 2641 | if (singleStepMe || cUnit->allSingleStep) { |
| 2642 | notHandled = false; |
| 2643 | genInterpSingleStep(cUnit, mir); |
| 2644 | } else { |
| 2645 | opcodeCoverage[dalvikOpCode]++; |
| 2646 | switch (dalvikFormat) { |
| 2647 | case kFmt10t: |
| 2648 | case kFmt20t: |
| 2649 | case kFmt30t: |
| 2650 | notHandled = handleFmt10t_Fmt20t_Fmt30t(cUnit, |
| 2651 | mir, blockList[i], labelList); |
| 2652 | break; |
| 2653 | case kFmt10x: |
| 2654 | notHandled = handleFmt10x(cUnit, mir); |
| 2655 | break; |
| 2656 | case kFmt11n: |
| 2657 | case kFmt31i: |
| 2658 | notHandled = handleFmt11n_Fmt31i(cUnit, mir); |
| 2659 | break; |
| 2660 | case kFmt11x: |
| 2661 | notHandled = handleFmt11x(cUnit, mir); |
| 2662 | break; |
| 2663 | case kFmt12x: |
| 2664 | notHandled = handleFmt12x(cUnit, mir); |
| 2665 | break; |
| 2666 | case kFmt20bc: |
| 2667 | notHandled = handleFmt20bc(cUnit, mir); |
| 2668 | break; |
| 2669 | case kFmt21c: |
| 2670 | case kFmt31c: |
| 2671 | notHandled = handleFmt21c_Fmt31c(cUnit, mir); |
| 2672 | break; |
| 2673 | case kFmt21h: |
| 2674 | notHandled = handleFmt21h(cUnit, mir); |
| 2675 | break; |
| 2676 | case kFmt21s: |
| 2677 | notHandled = handleFmt21s(cUnit, mir); |
| 2678 | break; |
| 2679 | case kFmt21t: |
| 2680 | notHandled = handleFmt21t(cUnit, mir, blockList[i], |
| 2681 | labelList); |
| 2682 | break; |
| 2683 | case kFmt22b: |
| 2684 | case kFmt22s: |
| 2685 | notHandled = handleFmt22b_Fmt22s(cUnit, mir); |
| 2686 | break; |
| 2687 | case kFmt22c: |
| 2688 | notHandled = handleFmt22c(cUnit, mir); |
| 2689 | break; |
| 2690 | case kFmt22cs: |
| 2691 | notHandled = handleFmt22cs(cUnit, mir); |
| 2692 | break; |
| 2693 | case kFmt22t: |
| 2694 | notHandled = handleFmt22t(cUnit, mir, blockList[i], |
| 2695 | labelList); |
| 2696 | break; |
| 2697 | case kFmt22x: |
| 2698 | case kFmt32x: |
| 2699 | notHandled = handleFmt22x_Fmt32x(cUnit, mir); |
| 2700 | break; |
| 2701 | case kFmt23x: |
| 2702 | notHandled = handleFmt23x(cUnit, mir); |
| 2703 | break; |
| 2704 | case kFmt31t: |
| 2705 | notHandled = handleFmt31t(cUnit, mir); |
| 2706 | break; |
| 2707 | case kFmt3rc: |
| 2708 | case kFmt35c: |
| 2709 | notHandled = handleFmt35c_3rc(cUnit, mir, blockList[i], |
| 2710 | labelList); |
| 2711 | break; |
| 2712 | case kFmt3rms: |
| 2713 | case kFmt35ms: |
| 2714 | notHandled = handleFmt35ms_3rms(cUnit, mir,blockList[i], |
| 2715 | labelList); |
| 2716 | break; |
| 2717 | case kFmt3inline: |
| 2718 | notHandled = handleFmt3inline(cUnit, mir); |
| 2719 | break; |
| 2720 | case kFmt51l: |
| 2721 | notHandled = handleFmt51l(cUnit, mir); |
| 2722 | break; |
| 2723 | default: |
| 2724 | notHandled = true; |
| 2725 | break; |
| 2726 | } |
| 2727 | } |
| 2728 | if (notHandled) { |
| 2729 | LOGE("%#06x: Opcode 0x%x (%s) / Fmt %d not handled\n", |
| 2730 | mir->offset, |
| 2731 | dalvikOpCode, getOpcodeName(dalvikOpCode), |
| 2732 | dalvikFormat); |
| 2733 | dvmAbort(); |
| 2734 | break; |
| 2735 | } else { |
| 2736 | gDvmJit.opHistogram[dalvikOpCode]++; |
| 2737 | } |
| 2738 | } |
| 2739 | } |
| 2740 | |
| 2741 | /* Handle the codegen in predefined order */ |
| 2742 | for (i = 0; i < CHAINING_CELL_LAST; i++) { |
| 2743 | size_t j; |
| 2744 | int *blockIdList = (int *) chainingListByType[i].elemList; |
| 2745 | |
| 2746 | cUnit->numChainingCells[i] = chainingListByType[i].numUsed; |
| 2747 | |
| 2748 | /* No chaining cells of this type */ |
| 2749 | if (cUnit->numChainingCells[i] == 0) |
| 2750 | continue; |
| 2751 | |
| 2752 | /* Record the first LIR for a new type of chaining cell */ |
| 2753 | cUnit->firstChainingLIR[i] = (LIR *) &labelList[blockIdList[0]]; |
| 2754 | |
| 2755 | for (j = 0; j < chainingListByType[i].numUsed; j++) { |
| 2756 | int blockId = blockIdList[j]; |
| 2757 | |
| 2758 | /* Align this chaining cell first */ |
| 2759 | newLIR0(cUnit, ARMV5TE_PSEUDO_ALIGN4); |
| 2760 | |
| 2761 | /* Insert the pseudo chaining instruction */ |
| 2762 | dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]); |
| 2763 | |
| 2764 | |
| 2765 | switch (blockList[blockId]->blockType) { |
| 2766 | case CHAINING_CELL_GENERIC: |
| 2767 | handleGenericChainingCell(cUnit, |
| 2768 | blockList[blockId]->startOffset); |
| 2769 | break; |
| 2770 | case CHAINING_CELL_INVOKE: |
| 2771 | handleInvokeChainingCell(cUnit, |
| 2772 | blockList[blockId]->containingMethod); |
| 2773 | break; |
| 2774 | case CHAINING_CELL_POST_INVOKE: |
| 2775 | handlePostInvokeChainingCell(cUnit, |
| 2776 | blockList[blockId]->startOffset); |
| 2777 | break; |
| 2778 | default: |
| 2779 | dvmAbort(); |
| 2780 | break; |
| 2781 | } |
| 2782 | } |
| 2783 | } |
| 2784 | } |
| 2785 | |
| 2786 | /* Accept the work and start compiling */ |
| 2787 | void *dvmCompilerDoWork(CompilerWorkOrder *work) |
| 2788 | { |
| 2789 | void *res; |
| 2790 | |
| 2791 | if (gDvmJit.codeCacheFull) { |
| 2792 | return NULL; |
| 2793 | } |
| 2794 | |
| 2795 | switch (work->kind) { |
| 2796 | case kWorkOrderMethod: |
| 2797 | res = dvmCompileMethod(work->info); |
| 2798 | break; |
| 2799 | case kWorkOrderTrace: |
| 2800 | res = dvmCompileTrace(work->info); |
| 2801 | break; |
| 2802 | default: |
| 2803 | res = NULL; |
| 2804 | dvmAbort(); |
| 2805 | } |
| 2806 | return res; |
| 2807 | } |
| 2808 | |
| 2809 | /* Architecture-specific initializations and checks go here */ |
| 2810 | bool dvmCompilerArchInit(void) |
| 2811 | { |
| 2812 | /* First, declare dvmCompiler_TEMPLATE_XXX for each template */ |
| 2813 | #define JIT_TEMPLATE(X) extern void dvmCompiler_TEMPLATE_##X(); |
| 2814 | #include "../../template/armv5te/TemplateOpList.h" |
| 2815 | #undef JIT_TEMPLATE |
| 2816 | |
| 2817 | int i = 0; |
| 2818 | extern void dvmCompilerTemplateStart(void); |
| 2819 | |
| 2820 | /* |
| 2821 | * Then, populate the templateEntryOffsets array with the offsets from the |
| 2822 | * the dvmCompilerTemplateStart symbol for each template. |
| 2823 | */ |
| 2824 | #define JIT_TEMPLATE(X) templateEntryOffsets[i++] = \ |
| 2825 | (intptr_t) dvmCompiler_TEMPLATE_##X - (intptr_t) dvmCompilerTemplateStart; |
| 2826 | #include "../../template/armv5te/TemplateOpList.h" |
| 2827 | #undef JIT_TEMPLATE |
| 2828 | |
| 2829 | /* Codegen-specific assumptions */ |
| 2830 | assert(offsetof(ClassObject, vtable) < 128 && |
| 2831 | (offsetof(ClassObject, vtable) & 0x3) == 0); |
| 2832 | assert(offsetof(ArrayObject, length) < 128 && |
| 2833 | (offsetof(ArrayObject, length) & 0x3) == 0); |
| 2834 | assert(offsetof(ArrayObject, contents) < 256); |
| 2835 | |
| 2836 | /* Up to 5 args are pushed on top of FP - sizeofStackSaveArea */ |
| 2837 | assert(sizeof(StackSaveArea) < 236); |
| 2838 | |
| 2839 | /* |
| 2840 | * EA is calculated by doing "Rn + imm5 << 2", and there are 5 entry points |
| 2841 | * that codegen may access, make sure that the offset from the top of the |
| 2842 | * struct is less than 108. |
| 2843 | */ |
| 2844 | assert(offsetof(InterpState, jitToInterpEntries) < 108); |
| 2845 | return true; |
| 2846 | } |
| 2847 | |
| 2848 | /* Architectural-specific debugging helpers go here */ |
| 2849 | void dvmCompilerArchDump(void) |
| 2850 | { |
| 2851 | /* Print compiled opcode in this VM instance */ |
| 2852 | int i, start, streak; |
| 2853 | char buf[1024]; |
| 2854 | |
| 2855 | streak = i = 0; |
| 2856 | buf[0] = 0; |
| 2857 | while (opcodeCoverage[i] == 0 && i < 256) { |
| 2858 | i++; |
| 2859 | } |
| 2860 | if (i == 256) { |
| 2861 | return; |
| 2862 | } |
| 2863 | for (start = i++, streak = 1; i < 256; i++) { |
| 2864 | if (opcodeCoverage[i]) { |
| 2865 | streak++; |
| 2866 | } else { |
| 2867 | if (streak == 1) { |
| 2868 | sprintf(buf+strlen(buf), "%x,", start); |
| 2869 | } else { |
| 2870 | sprintf(buf+strlen(buf), "%x-%x,", start, start + streak - 1); |
| 2871 | } |
| 2872 | streak = 0; |
| 2873 | while (opcodeCoverage[i] == 0 && i < 256) { |
| 2874 | i++; |
| 2875 | } |
| 2876 | if (i < 256) { |
| 2877 | streak = 1; |
| 2878 | start = i; |
| 2879 | } |
| 2880 | } |
| 2881 | } |
| 2882 | if (streak) { |
| 2883 | if (streak == 1) { |
| 2884 | sprintf(buf+strlen(buf), "%x", start); |
| 2885 | } else { |
| 2886 | sprintf(buf+strlen(buf), "%x-%x", start, start + streak - 1); |
| 2887 | } |
| 2888 | } |
| 2889 | if (strlen(buf)) { |
| 2890 | LOGD("dalvik.vm.jitop = %s", buf); |
| 2891 | } |
| 2892 | } |