blob: 7af1aa033cf3be46bfdb2fa0bbc3a2d6f26264ee [file] [log] [blame]
buzbee31a4a6f2012-02-28 15:36:15 -08001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17namespace art {
18
19/*
20 * This source files contains "gen" codegen routines that should
21 * be applicable to most targets. Only mid-level support utilities
22 * and "op" calls may be used here.
23 */
24
25#if defined(TARGET_ARM)
26LIR* genIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide);
27#endif
28
29LIR* callRuntimeHelper(CompilationUnit* cUnit, int reg)
30{
31 oatClobberCalleeSave(cUnit);
32 return opReg(cUnit, kOpBlx, reg);
33}
34
35/*
36 * Generate an kPseudoBarrier marker to indicate the boundary of special
37 * blocks.
38 */
39void genBarrier(CompilationUnit* cUnit)
40{
41 LIR* barrier = newLIR0(cUnit, kPseudoBarrier);
42 /* Mark all resources as being clobbered */
43 barrier->defMask = -1;
44}
45
46/* Generate conditional branch instructions */
47LIR* genConditionalBranch(CompilationUnit* cUnit, ConditionCode cond,
48 LIR* target)
49{
50 LIR* branch = opCondBranch(cUnit, cond);
51 branch->target = (LIR*) target;
52 return branch;
53}
54
55/* Generate unconditional branch instructions */
56LIR* genUnconditionalBranch(CompilationUnit* cUnit, LIR* target)
57{
58 LIR* branch = opNone(cUnit, kOpUncondBr);
59 branch->target = (LIR*) target;
60 return branch;
61}
62
63LIR* genCheck(CompilationUnit* cUnit, ConditionCode cCode, MIR* mir,
64 ThrowKind kind)
65{
66 LIR* tgt = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
67 tgt->opcode = kPseudoThrowTarget;
68 tgt->operands[0] = kind;
69 tgt->operands[1] = mir ? mir->offset : 0;
70 LIR* branch = genConditionalBranch(cUnit, cCode, tgt);
71 // Remember branch target - will process later
72 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
73 return branch;
74}
75
76LIR* genImmedCheck(CompilationUnit* cUnit, ConditionCode cCode,
77 int reg, int immVal, MIR* mir, ThrowKind kind)
78{
79 LIR* tgt = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
80 tgt->opcode = kPseudoThrowTarget;
81 tgt->operands[0] = kind;
82 tgt->operands[1] = mir->offset;
83 LIR* branch;
84 if (cCode == kCondAl) {
85 branch = genUnconditionalBranch(cUnit, tgt);
86 } else {
87 branch = genCmpImmBranch(cUnit, cCode, reg, immVal);
88 branch->target = (LIR*)tgt;
89 }
90 // Remember branch target - will process later
91 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
92 return branch;
93}
94
95/* Perform null-check on a register. */
96LIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg, MIR* mir)
97{
98 if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
99 mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
100 return NULL;
101 }
102 return genImmedCheck(cUnit, kCondEq, mReg, 0, mir, kThrowNullPointer);
103}
104
105/* Perform check on two registers */
106LIR* genRegRegCheck(CompilationUnit* cUnit, ConditionCode cCode,
107 int reg1, int reg2, MIR* mir, ThrowKind kind)
108{
109 LIR* tgt = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
110 tgt->opcode = kPseudoThrowTarget;
111 tgt->operands[0] = kind;
112 tgt->operands[1] = mir ? mir->offset : 0;
113 tgt->operands[2] = reg1;
114 tgt->operands[3] = reg2;
115 opRegReg(cUnit, kOpCmp, reg1, reg2);
116 LIR* branch = genConditionalBranch(cUnit, cCode, tgt);
117 // Remember branch target - will process later
118 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
119 return branch;
120}
121
122void genCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
123 RegLocation rlSrc1, RegLocation rlSrc2, LIR* labelList)
124{
125 ConditionCode cond;
126 rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
127 rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
128 opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
129 Opcode opcode = mir->dalvikInsn.opcode;
130 switch(opcode) {
131 case OP_IF_EQ:
132 cond = kCondEq;
133 break;
134 case OP_IF_NE:
135 cond = kCondNe;
136 break;
137 case OP_IF_LT:
138 cond = kCondLt;
139 break;
140 case OP_IF_GE:
141 cond = kCondGe;
142 break;
143 case OP_IF_GT:
144 cond = kCondGt;
145 break;
146 case OP_IF_LE:
147 cond = kCondLe;
148 break;
149 default:
150 cond = (ConditionCode)0;
151 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
152 }
153 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
154 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
155}
156
157void genCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
158 RegLocation rlSrc, LIR* labelList)
159{
160 ConditionCode cond;
161 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
162 opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
163 Opcode opcode = mir->dalvikInsn.opcode;
164 switch(opcode) {
165 case OP_IF_EQZ:
166 cond = kCondEq;
167 break;
168 case OP_IF_NEZ:
169 cond = kCondNe;
170 break;
171 case OP_IF_LTZ:
172 cond = kCondLt;
173 break;
174 case OP_IF_GEZ:
175 cond = kCondGe;
176 break;
177 case OP_IF_GTZ:
178 cond = kCondGt;
179 break;
180 case OP_IF_LEZ:
181 cond = kCondLe;
182 break;
183 default:
184 cond = (ConditionCode)0;
185 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
186 }
187 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
188 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
189}
190
191void genIntToLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
192 RegLocation rlSrc)
193{
194 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
195 if (rlSrc.location == kLocPhysReg) {
196 genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
197 } else {
198 loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
199 }
200 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
201 rlResult.lowReg, 31);
202 storeValueWide(cUnit, rlDest, rlResult);
203}
204
205void genIntNarrowing(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
206 RegLocation rlSrc)
207{
208 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
209 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
210 OpKind op = kOpInvalid;
211 switch(mir->dalvikInsn.opcode) {
212 case OP_INT_TO_BYTE:
213 op = kOp2Byte;
214 break;
215 case OP_INT_TO_SHORT:
216 op = kOp2Short;
217 break;
218 case OP_INT_TO_CHAR:
219 op = kOp2Char;
220 break;
221 default:
222 LOG(ERROR) << "Bad int conversion type";
223 }
224 opRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
225 storeValue(cUnit, rlDest, rlResult);
226}
227
228/*
229 * Let helper function take care of everything. Will call
230 * Array::AllocFromCode(type_idx, method, count);
231 * Note: AllocFromCode will handle checks for errNegativeArraySize.
232 */
233void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
234 RegLocation rlSrc)
235{
236 oatFlushAllRegs(cUnit); /* Everything to home location */
237 uint32_t type_idx = mir->dalvikInsn.vC;
238 int rTgt;
239 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
240 cUnit->dex_cache,
241 *cUnit->dex_file,
242 type_idx)) {
243 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pAllocArrayFromCode));
244 } else {
245 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
246 pAllocArrayFromCodeWithAccessCheck));
247 }
248 loadCurrMethodDirect(cUnit, rARG1); // arg1 <- Method*
249 loadConstant(cUnit, rARG0, type_idx); // arg0 <- type_id
250 loadValueDirectFixed(cUnit, rlSrc, rARG2); // arg2 <- count
251 callRuntimeHelper(cUnit, rTgt);
252 RegLocation rlResult = oatGetReturn(cUnit);
253 storeValue(cUnit, rlDest, rlResult);
254}
255
256/*
257 * Similar to genNewArray, but with post-allocation initialization.
258 * Verifier guarantees we're dealing with an array class. Current
259 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
260 * Current code also throws internal unimp if not 'L', '[' or 'I'.
261 */
262void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
263{
264 DecodedInstruction* dInsn = &mir->dalvikInsn;
265 int elems = dInsn->vA;
266 int typeId = dInsn->vB;
267 oatFlushAllRegs(cUnit); /* Everything to home location */
268 int rTgt;
269 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
270 cUnit->dex_cache,
271 *cUnit->dex_file,
272 typeId)) {
273 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
274 pCheckAndAllocArrayFromCode));
275 } else {
276 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
277 pCheckAndAllocArrayFromCodeWithAccessCheck));
278 }
279 loadCurrMethodDirect(cUnit, rARG1); // arg1 <- Method*
280 loadConstant(cUnit, rARG0, typeId); // arg0 <- type_id
281 loadConstant(cUnit, rARG2, elems); // arg2 <- count
282 callRuntimeHelper(cUnit, rTgt);
283 /*
284 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
285 * return region. Because AllocFromCode placed the new array
286 * in rRET0, we'll just lock it into place. When debugger support is
287 * added, it may be necessary to additionally copy all return
288 * values to a home location in thread-local storage
289 */
290 oatLockTemp(cUnit, rRET0);
291
292 // TODO: use the correct component size, currently all supported types
293 // share array alignment with ints (see comment at head of function)
294 size_t component_size = sizeof(int32_t);
295
296 // Having a range of 0 is legal
297 if (isRange && (dInsn->vA > 0)) {
298 /*
299 * Bit of ugliness here. We're going generate a mem copy loop
300 * on the register range, but it is possible that some regs
301 * in the range have been promoted. This is unlikely, but
302 * before generating the copy, we'll just force a flush
303 * of any regs in the source range that have been promoted to
304 * home location.
305 */
306 for (unsigned int i = 0; i < dInsn->vA; i++) {
307 RegLocation loc = oatUpdateLoc(cUnit,
308 oatGetSrc(cUnit, mir, i));
309 if (loc.location == kLocPhysReg) {
310 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
311 loc.lowReg, kWord);
312 }
313 }
314 /*
315 * TUNING note: generated code here could be much improved, but
316 * this is an uncommon operation and isn't especially performance
317 * critical.
318 */
319 int rSrc = oatAllocTemp(cUnit);
320 int rDst = oatAllocTemp(cUnit);
321 int rIdx = oatAllocTemp(cUnit);
322 int rVal = rLR; // Using a lot of temps, rLR is known free here
323 // Set up source pointer
324 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
325 opRegRegImm(cUnit, kOpAdd, rSrc, rSP,
326 oatSRegOffset(cUnit, rlFirst.sRegLow));
327 // Set up the target pointer
328 opRegRegImm(cUnit, kOpAdd, rDst, rRET0,
329 Array::DataOffset(component_size).Int32Value());
330 // Set up the loop counter (known to be > 0)
331 loadConstant(cUnit, rIdx, dInsn->vA - 1);
332 // Generate the copy loop. Going backwards for convenience
333 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
334 target->defMask = ENCODE_ALL;
335 // Copy next element
336 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
337 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
338#if defined(TARGET_ARM)
339 // Combine sub & test using sub setflags encoding here
340 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
341 LIR* branch = opCondBranch(cUnit, kCondGe);
342#else
343 opRegImm(cUnit, kOpSub, rIdx, 1);
344 LIR* branch = opCompareBranchImm(cUnit, kCondGe, rIdx, 0);
345#endif
346 branch->target = (LIR*)target;
347 } else if (!isRange) {
348 // TUNING: interleave
349 for (unsigned int i = 0; i < dInsn->vA; i++) {
350 RegLocation rlArg = loadValue(cUnit,
351 oatGetSrc(cUnit, mir, i), kCoreReg);
352 storeBaseDisp(cUnit, rRET0,
353 Array::DataOffset(component_size).Int32Value() +
354 i * 4, rlArg.lowReg, kWord);
355 // If the loadValue caused a temp to be allocated, free it
356 if (oatIsTemp(cUnit, rlArg.lowReg)) {
357 oatFreeTemp(cUnit, rlArg.lowReg);
358 }
359 }
360 }
361}
362
363void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
364 bool isLongOrDouble, bool isObject)
365{
366 int fieldOffset;
367 int ssbIndex;
368 bool isVolatile;
369 bool isReferrersClass;
370 uint32_t fieldIdx = mir->dalvikInsn.vB;
371
372 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
373 *cUnit->dex_file, *cUnit->dex_cache,
374 cUnit->code_item, cUnit->method_idx,
375 cUnit->access_flags);
376
377 bool fastPath =
378 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
379 fieldOffset, ssbIndex,
380 isReferrersClass, isVolatile, true);
381 if (fastPath && !SLOW_FIELD_PATH) {
382 DCHECK_GE(fieldOffset, 0);
383 int rBase;
384 int rMethod;
385 if (isReferrersClass) {
386 // Fast path, static storage base is this method's class
387 rMethod = loadCurrMethod(cUnit);
388 rBase = oatAllocTemp(cUnit);
389 loadWordDisp(cUnit, rMethod,
390 Method::DeclaringClassOffset().Int32Value(), rBase);
391 } else {
392 // Medium path, static storage base in a different class which
393 // requires checks that the other class is initialized.
394 DCHECK_GE(ssbIndex, 0);
395 // May do runtime call so everything to home locations.
396 oatFlushAllRegs(cUnit);
397 // Using fixed register to sync with possible call to runtime
398 // support.
399 rMethod = rARG1;
400 oatLockTemp(cUnit, rMethod);
401 loadCurrMethodDirect(cUnit, rMethod);
402 rBase = rARG0;
403 oatLockTemp(cUnit, rBase);
404 loadWordDisp(cUnit, rMethod,
405 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
406 rBase);
407 loadWordDisp(cUnit, rBase,
408 Array::DataOffset(sizeof(Object*)).Int32Value() + sizeof(int32_t*) *
409 ssbIndex, rBase);
410 // rBase now points at appropriate static storage base (Class*)
411 // or NULL if not initialized. Check for NULL and call helper if NULL.
412 // TUNING: fast path should fall through
413 LIR* branchOver = genCmpImmBranch(cUnit, kCondNe, rBase, 0);
414 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
415 pInitializeStaticStorage));
416 loadConstant(cUnit, rARG0, ssbIndex);
417 callRuntimeHelper(cUnit, rTgt);
418#if defined(TARGET_MIPS)
419 // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
420 genRegCopy(cUnit, rBase, rRET0);
421#endif
422 LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
423 skipTarget->defMask = ENCODE_ALL;
424 branchOver->target = (LIR*)skipTarget;
425 }
426 // rBase now holds static storage base
427 oatFreeTemp(cUnit, rMethod);
428 if (isLongOrDouble) {
429 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
430 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
431 } else {
432 rlSrc = oatGetSrc(cUnit, mir, 0);
433 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
434 }
435//FIXME: need to generalize the barrier call
436 if (isVolatile) {
437 oatGenMemBarrier(cUnit, kST);
438 }
439 if (isLongOrDouble) {
440 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
441 rlSrc.highReg);
442 } else {
443 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
444 }
445 if (isVolatile) {
446 oatGenMemBarrier(cUnit, kSY);
447 }
448 if (isObject) {
449 markGCCard(cUnit, rlSrc.lowReg, rBase);
450 }
451 oatFreeTemp(cUnit, rBase);
452 } else {
453 oatFlushAllRegs(cUnit); // Everything to home locations
454 int setterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pSet64Static) :
455 (isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
456 : OFFSETOF_MEMBER(Thread, pSet32Static));
457 int rTgt = loadHelper(cUnit, setterOffset);
458 loadConstant(cUnit, rARG0, fieldIdx);
459 if (isLongOrDouble) {
460 loadValueDirectWideFixed(cUnit, rlSrc, rARG2, rARG3);
461 } else {
462 loadValueDirect(cUnit, rlSrc, rARG1);
463 }
464 callRuntimeHelper(cUnit, rTgt);
465 }
466}
467
468void genSget(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
469 bool isLongOrDouble, bool isObject)
470{
471 int fieldOffset;
472 int ssbIndex;
473 bool isVolatile;
474 bool isReferrersClass;
475 uint32_t fieldIdx = mir->dalvikInsn.vB;
476
477 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
478 *cUnit->dex_file, *cUnit->dex_cache,
479 cUnit->code_item, cUnit->method_idx,
480 cUnit->access_flags);
481
482 bool fastPath =
483 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
484 fieldOffset, ssbIndex,
485 isReferrersClass, isVolatile,
486 false);
487 if (fastPath && !SLOW_FIELD_PATH) {
488 DCHECK_GE(fieldOffset, 0);
489 int rBase;
490 int rMethod;
491 if (isReferrersClass) {
492 // Fast path, static storage base is this method's class
493 rMethod = loadCurrMethod(cUnit);
494 rBase = oatAllocTemp(cUnit);
495 loadWordDisp(cUnit, rMethod,
496 Method::DeclaringClassOffset().Int32Value(), rBase);
497 } else {
498 // Medium path, static storage base in a different class which
499 // requires checks that the other class is initialized
500 DCHECK_GE(ssbIndex, 0);
501 // May do runtime call so everything to home locations.
502 oatFlushAllRegs(cUnit);
503 // Using fixed register to sync with possible call to runtime
504 // support
505 rMethod = rARG1;
506 oatLockTemp(cUnit, rMethod);
507 loadCurrMethodDirect(cUnit, rMethod);
508 rBase = rARG0;
509 oatLockTemp(cUnit, rBase);
510 loadWordDisp(cUnit, rMethod,
511 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
512 rBase);
513 loadWordDisp(cUnit, rBase,
514 Array::DataOffset(sizeof(Object*)).Int32Value() +
515 sizeof(int32_t*) * ssbIndex,
516 rBase);
517 // rBase now points at appropriate static storage base (Class*)
518 // or NULL if not initialized. Check for NULL and call helper if NULL.
519 // TUNING: fast path should fall through
520 LIR* branchOver = genCmpImmBranch(cUnit, kCondNe, rBase, 0);
521 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
522 pInitializeStaticStorage));
523 loadConstant(cUnit, rARG0, ssbIndex);
524 callRuntimeHelper(cUnit, rTgt);
525#if defined(TARGET_MIPS)
526 // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
527 genRegCopy(cUnit, rBase, rRET0);
528#endif
529 LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
530 skipTarget->defMask = ENCODE_ALL;
531 branchOver->target = (LIR*)skipTarget;
532 }
533 // rBase now holds static storage base
534 oatFreeTemp(cUnit, rMethod);
535 rlDest = isLongOrDouble ? oatGetDestWide(cUnit, mir, 0, 1)
536 : oatGetDest(cUnit, mir, 0);
537 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
538 if (isVolatile) {
539 oatGenMemBarrier(cUnit, kSY);
540 }
541 if (isLongOrDouble) {
542 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
543 rlResult.highReg, INVALID_SREG);
544 } else {
545 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
546 }
547 oatFreeTemp(cUnit, rBase);
548 if (isLongOrDouble) {
549 storeValueWide(cUnit, rlDest, rlResult);
550 } else {
551 storeValue(cUnit, rlDest, rlResult);
552 }
553 } else {
554 oatFlushAllRegs(cUnit); // Everything to home locations
555 int getterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pGet64Static) :
556 (isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
557 : OFFSETOF_MEMBER(Thread, pGet32Static));
558 int rTgt = loadHelper(cUnit, getterOffset);
559 loadConstant(cUnit, rARG0, fieldIdx);
560 callRuntimeHelper(cUnit, rTgt);
561 if (isLongOrDouble) {
562 RegLocation rlResult = oatGetReturnWide(cUnit);
563 storeValueWide(cUnit, rlDest, rlResult);
564 } else {
565 RegLocation rlResult = oatGetReturn(cUnit);
566 storeValue(cUnit, rlDest, rlResult);
567 }
568 }
569}
570
571
572// Debugging routine - if null target, branch to DebugMe
573void genShowTarget(CompilationUnit* cUnit)
574{
575 LIR* branchOver = genCmpImmBranch(cUnit, kCondNe, rLINK, 0);
576 loadWordDisp(cUnit, rSELF,
577 OFFSETOF_MEMBER(Thread, pDebugMe), rLINK);
578 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
579 target->defMask = -1;
580 branchOver->target = (LIR*)target;
581}
582
583void genThrowVerificationError(CompilationUnit* cUnit, MIR* mir)
584{
585 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
586 pThrowVerificationErrorFromCode));
587 loadConstant(cUnit, rARG0, mir->dalvikInsn.vA);
588 loadConstant(cUnit, rARG1, mir->dalvikInsn.vB);
589 callRuntimeHelper(cUnit, rTgt);
590}
591
592void handleSuspendLaunchpads(CompilationUnit *cUnit)
593{
594 LIR** suspendLabel =
595 (LIR **) cUnit->suspendLaunchpads.elemList;
596 int numElems = cUnit->suspendLaunchpads.numUsed;
597
598 for (int i = 0; i < numElems; i++) {
599 /* TUNING: move suspend count load into helper */
600 LIR* lab = suspendLabel[i];
601 LIR* resumeLab = (LIR*)lab->operands[0];
602 cUnit->currentDalvikOffset = lab->operands[1];
603 oatAppendLIR(cUnit, (LIR *)lab);
604 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
605 pTestSuspendFromCode));
606 if (!cUnit->genDebugger) {
607 // use rSUSPEND for suspend count
608 loadWordDisp(cUnit, rSELF,
609 Thread::SuspendCountOffset().Int32Value(), rSUSPEND);
610 }
611 opReg(cUnit, kOpBlx, rTgt);
612 if ( cUnit->genDebugger) {
613 // use rSUSPEND for update debugger
614 loadWordDisp(cUnit, rSELF,
615 OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode),
616 rSUSPEND);
617 }
618 genUnconditionalBranch(cUnit, resumeLab);
619 }
620}
621
622void handleThrowLaunchpads(CompilationUnit *cUnit)
623{
624 LIR** throwLabel = (LIR **) cUnit->throwLaunchpads.elemList;
625 int numElems = cUnit->throwLaunchpads.numUsed;
626 int i;
627
628 for (i = 0; i < numElems; i++) {
629 LIR* lab = throwLabel[i];
630 cUnit->currentDalvikOffset = lab->operands[1];
631 oatAppendLIR(cUnit, (LIR *)lab);
632 int funcOffset = 0;
633 int v1 = lab->operands[2];
634 int v2 = lab->operands[3];
635 switch(lab->operands[0]) {
636 case kThrowNullPointer:
637 funcOffset = OFFSETOF_MEMBER(Thread, pThrowNullPointerFromCode);
638 break;
639 case kThrowArrayBounds:
640 if (v2 != r0) {
641 genRegCopy(cUnit, rARG0, v1);
642 genRegCopy(cUnit, rARG1, v2);
643 } else {
644 if (v1 == r1) {
645#if defined(TARGET_ARM)
646 int rTmp = r12;
647#else
648 int rTmp = oatAllocTemp(cUnit);
649#endif
650 genRegCopy(cUnit, rTmp, v1);
651 genRegCopy(cUnit, rARG1, v2);
652 genRegCopy(cUnit, rARG0, rTmp);
653#if !(defined(TARGET_ARM))
654 oatFreeTemp(cUnit, rTmp);
655#endif
656 } else {
657 genRegCopy(cUnit, rARG1, v2);
658 genRegCopy(cUnit, rARG0, v1);
659 }
660 }
661 funcOffset = OFFSETOF_MEMBER(Thread, pThrowArrayBoundsFromCode);
662 break;
663 case kThrowDivZero:
664 funcOffset = OFFSETOF_MEMBER(Thread, pThrowDivZeroFromCode);
665 break;
666 case kThrowVerificationError:
667 loadConstant(cUnit, rARG0, v1);
668 loadConstant(cUnit, rARG1, v2);
669 funcOffset =
670 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode);
671 break;
672 case kThrowNegArraySize:
673 genRegCopy(cUnit, rARG0, v1);
674 funcOffset =
675 OFFSETOF_MEMBER(Thread, pThrowNegArraySizeFromCode);
676 break;
677 case kThrowNoSuchMethod:
678 genRegCopy(cUnit, rARG0, v1);
679 funcOffset =
680 OFFSETOF_MEMBER(Thread, pThrowNoSuchMethodFromCode);
681 break;
682 case kThrowStackOverflow:
683 funcOffset =
684 OFFSETOF_MEMBER(Thread, pThrowStackOverflowFromCode);
685 // Restore stack alignment
686 opRegImm(cUnit, kOpAdd, rSP,
687 (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
688 break;
689 default:
690 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
691 }
692 int rTgt = loadHelper(cUnit, funcOffset);
693 callRuntimeHelper(cUnit, rTgt);
694 }
695}
696
697/* Needed by the Assembler */
698void oatSetupResourceMasks(LIR* lir)
699{
700 setupResourceMasks(lir);
701}
702
703void genIGet(CompilationUnit* cUnit, MIR* mir, OpSize size,
704 RegLocation rlDest, RegLocation rlObj,
705 bool isLongOrDouble, bool isObject)
706{
707 int fieldOffset;
708 bool isVolatile;
709 uint32_t fieldIdx = mir->dalvikInsn.vC;
710
711 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
712 *cUnit->dex_file, *cUnit->dex_cache,
713 cUnit->code_item, cUnit->method_idx,
714 cUnit->access_flags);
715
716 bool fastPath = cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit,
717 fieldOffset, isVolatile, false);
718
719 if (fastPath && !SLOW_FIELD_PATH) {
720 RegLocation rlResult;
721 RegisterClass regClass = oatRegClassBySize(size);
722 DCHECK_GE(fieldOffset, 0);
723 rlObj = loadValue(cUnit, rlObj, kCoreReg);
724 if (isLongOrDouble) {
725 DCHECK(rlDest.wide);
726 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
727 int regPtr = oatAllocTemp(cUnit);
728 opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
729 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
730 loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
731 if (isVolatile) {
732 oatGenMemBarrier(cUnit, kSY);
733 }
734 oatFreeTemp(cUnit, regPtr);
735 storeValueWide(cUnit, rlDest, rlResult);
736 } else {
737 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
738 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
739 loadBaseDisp(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
740 kWord, rlObj.sRegLow);
741 if (isVolatile) {
742 oatGenMemBarrier(cUnit, kSY);
743 }
744 storeValue(cUnit, rlDest, rlResult);
745 }
746 } else {
747 int getterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pGet64Instance) :
748 (isObject ? OFFSETOF_MEMBER(Thread, pGetObjInstance)
749 : OFFSETOF_MEMBER(Thread, pGet32Instance));
750 int rTgt = loadHelper(cUnit, getterOffset);
751 loadValueDirect(cUnit, rlObj, rARG1);
752 loadConstant(cUnit, rARG0, fieldIdx);
753 callRuntimeHelper(cUnit, rTgt);
754 if (isLongOrDouble) {
755 RegLocation rlResult = oatGetReturnWide(cUnit);
756 storeValueWide(cUnit, rlDest, rlResult);
757 } else {
758 RegLocation rlResult = oatGetReturn(cUnit);
759 storeValue(cUnit, rlDest, rlResult);
760 }
761 }
762}
763
764void genIPut(CompilationUnit* cUnit, MIR* mir, OpSize size, RegLocation rlSrc,
765 RegLocation rlObj, bool isLongOrDouble, bool isObject)
766{
767 int fieldOffset;
768 bool isVolatile;
769 uint32_t fieldIdx = mir->dalvikInsn.vC;
770
771 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
772 *cUnit->dex_file, *cUnit->dex_cache,
773 cUnit->code_item, cUnit->method_idx,
774 cUnit->access_flags);
775
776 bool fastPath = cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit,
777 fieldOffset, isVolatile, true);
778 if (fastPath && !SLOW_FIELD_PATH) {
779 RegisterClass regClass = oatRegClassBySize(size);
780 DCHECK_GE(fieldOffset, 0);
781 rlObj = loadValue(cUnit, rlObj, kCoreReg);
782 if (isLongOrDouble) {
783 int regPtr;
784 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
785 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
786 regPtr = oatAllocTemp(cUnit);
787 opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
788 if (isVolatile) {
789 oatGenMemBarrier(cUnit, kST);
790 }
791 storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
792 if (isVolatile) {
793 oatGenMemBarrier(cUnit, kSY);
794 }
795 oatFreeTemp(cUnit, regPtr);
796 } else {
797 rlSrc = loadValue(cUnit, rlSrc, regClass);
798 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
799 if (isVolatile) {
800 oatGenMemBarrier(cUnit, kST);
801 }
802 storeBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, kWord);
803 if (isVolatile) {
804 oatGenMemBarrier(cUnit, kSY);
805 }
806 }
807 } else {
808 int setterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pSet64Instance) :
809 (isObject ? OFFSETOF_MEMBER(Thread, pSetObjInstance)
810 : OFFSETOF_MEMBER(Thread, pSet32Instance));
811 int rTgt = loadHelper(cUnit, setterOffset);
812 loadValueDirect(cUnit, rlObj, rARG1);
813 if (isLongOrDouble) {
814 loadValueDirectWide(cUnit, rlSrc, rARG2, rARG3);
815 } else {
816 loadValueDirect(cUnit, rlSrc, rARG2);
817 }
818 loadConstant(cUnit, rARG0, fieldIdx);
819 callRuntimeHelper(cUnit, rTgt);
820 }
821}
822
823void genConstClass(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
824 RegLocation rlSrc)
825{
826 uint32_t type_idx = mir->dalvikInsn.vB;
827 int mReg = loadCurrMethod(cUnit);
828 int resReg = oatAllocTemp(cUnit);
829 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
830 if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
831 cUnit->dex_cache,
832 *cUnit->dex_file,
833 type_idx)) {
834 // Call out to helper which resolves type and verifies access.
835 // Resolved type returned in rRET0.
836 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
837 pInitializeTypeAndVerifyAccessFromCode));
838 genRegCopy(cUnit, rARG1, mReg);
839 loadConstant(cUnit, rARG0, type_idx);
840 callRuntimeHelper(cUnit, rTgt);
841 RegLocation rlResult = oatGetReturn(cUnit);
842 storeValue(cUnit, rlDest, rlResult);
843 } else {
844 // We're don't need access checks, load type from dex cache
845 int32_t dex_cache_offset =
846 Method::DexCacheResolvedTypesOffset().Int32Value();
847 loadWordDisp(cUnit, mReg, dex_cache_offset, resReg);
848 int32_t offset_of_type =
849 Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
850 * type_idx);
851 loadWordDisp(cUnit, resReg, offset_of_type, rlResult.lowReg);
852 if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(cUnit->dex_cache,
853 type_idx) || SLOW_TYPE_PATH) {
854 // Slow path, at runtime test if type is null and if so initialize
855 oatFlushAllRegs(cUnit);
856 LIR* branch1 = genCmpImmBranch(cUnit, kCondEq, rlResult.lowReg, 0);
857 // Resolved, store and hop over following code
858 storeValue(cUnit, rlDest, rlResult);
859 LIR* branch2 = genUnconditionalBranch(cUnit,0);
860 // TUNING: move slow path to end & remove unconditional branch
861 LIR* target1 = newLIR0(cUnit, kPseudoTargetLabel);
862 target1->defMask = ENCODE_ALL;
863 // Call out to helper, which will return resolved type in r0
864 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
865 pInitializeTypeFromCode));
866 genRegCopy(cUnit, rARG1, mReg);
867 loadConstant(cUnit, rARG0, type_idx);
868 callRuntimeHelper(cUnit, rTgt);
869 RegLocation rlResult = oatGetReturn(cUnit);
870 storeValue(cUnit, rlDest, rlResult);
871 // Rejoin code paths
872 LIR* target2 = newLIR0(cUnit, kPseudoTargetLabel);
873 target2->defMask = ENCODE_ALL;
874 branch1->target = (LIR*)target1;
875 branch2->target = (LIR*)target2;
876 } else {
877 // Fast path, we're done - just store result
878 storeValue(cUnit, rlDest, rlResult);
879 }
880 }
881}
882void genConstString(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
883 RegLocation rlSrc)
884{
885 /* NOTE: Most strings should be available at compile time */
886 uint32_t string_idx = mir->dalvikInsn.vB;
887 int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() +
888 (sizeof(String*) * string_idx);
889 if (!cUnit->compiler->CanAssumeStringIsPresentInDexCache(
890 cUnit->dex_cache, string_idx) || SLOW_STRING_PATH) {
891 // slow path, resolve string if not in dex cache
892 oatFlushAllRegs(cUnit);
893 oatLockCallTemps(cUnit); // Using explicit registers
894 loadCurrMethodDirect(cUnit, rARG2);
895 loadWordDisp(cUnit, rARG2,
896 Method::DexCacheStringsOffset().Int32Value(), rARG0);
897 // Might call out to helper, which will return resolved string in rRET0
898 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
899 pResolveStringFromCode));
900 loadWordDisp(cUnit, rRET0, offset_of_string, rARG0);
901 loadConstant(cUnit, rARG1, string_idx);
902#if defined(TARGET_ARM)
903 opRegImm(cUnit, kOpCmp, rRET0, 0); // Is resolved?
904 genBarrier(cUnit);
905 // For testing, always force through helper
906 if (!EXERCISE_SLOWEST_STRING_PATH) {
907 genIT(cUnit, kArmCondEq, "T");
908 }
909 genRegCopy(cUnit, rARG0, rARG2); // .eq
910 opReg(cUnit, kOpBlx, rTgt); // .eq, helper(Method*, string_idx)
911#else
912 LIR* branch = genCmpImmBranch(cUnit, kCondNe, 0);
913 genRegCopy(cUnit, rARG0, rARG2); // .eq
914 opReg(cUnit, kOpBlx, rTgt);
915 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
916 target->defMask = ENCODE_ALL;
917 branch->target = target;
918#endif
919 genBarrier(cUnit);
920 storeValue(cUnit, rlDest, getRetLoc(cUnit));
921 } else {
922 int mReg = loadCurrMethod(cUnit);
923 int resReg = oatAllocTemp(cUnit);
924 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
925 loadWordDisp(cUnit, mReg,
926 Method::DexCacheStringsOffset().Int32Value(), resReg);
927 loadWordDisp(cUnit, resReg, offset_of_string, rlResult.lowReg);
928 storeValue(cUnit, rlDest, rlResult);
929 }
930}
931
932/*
933 * Let helper function take care of everything. Will
934 * call Class::NewInstanceFromCode(type_idx, method);
935 */
936void genNewInstance(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest)
937{
938 oatFlushAllRegs(cUnit); /* Everything to home location */
939 uint32_t type_idx = mir->dalvikInsn.vB;
940 // alloc will always check for resolution, do we also need to verify
941 // access because the verifier was unable to?
942 int rTgt;
943 if (cUnit->compiler->CanAccessInstantiableTypeWithoutChecks(
944 cUnit->method_idx, cUnit->dex_cache, *cUnit->dex_file, type_idx)) {
945 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pAllocObjectFromCode));
946 } else {
947 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
948 pAllocObjectFromCodeWithAccessCheck));
949 }
950 loadCurrMethodDirect(cUnit, rARG1); // arg1 <= Method*
951 loadConstant(cUnit, rARG0, type_idx); // arg0 <- type_idx
952 callRuntimeHelper(cUnit, rTgt);
953 RegLocation rlResult = oatGetReturn(cUnit);
954 storeValue(cUnit, rlDest, rlResult);
955}
956
957void genInstanceof(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
958 RegLocation rlSrc)
959{
960 oatFlushAllRegs(cUnit);
961 // May generate a call - use explicit registers
962 oatLockCallTemps(cUnit);
963 uint32_t type_idx = mir->dalvikInsn.vC;
964 loadCurrMethodDirect(cUnit, rARG1); // r1 <= current Method*
965 int classReg = rARG2; // rARG2 will hold the Class*
966 if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
967 cUnit->dex_cache,
968 *cUnit->dex_file,
969 type_idx)) {
970 // Check we have access to type_idx and if not throw IllegalAccessError,
971 // returns Class* in r0
972 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
973 pInitializeTypeAndVerifyAccessFromCode));
974 loadConstant(cUnit, rARG0, type_idx);
975 callRuntimeHelper(cUnit, rTgt); // InitializeTypeAndVerifyAccess(idx, method)
976 genRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
977 loadValueDirectFixed(cUnit, rlSrc, rARG0); // r0 <= ref
978 } else {
979 // Load dex cache entry into classReg (r2)
980 loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
981 loadWordDisp(cUnit, rARG1,
982 Method::DexCacheResolvedTypesOffset().Int32Value(),
983 classReg);
984 int32_t offset_of_type =
985 Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
986 * type_idx);
987 loadWordDisp(cUnit, classReg, offset_of_type, classReg);
988 if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
989 cUnit->dex_cache, type_idx)) {
990 // Need to test presence of type in dex cache at runtime
991 LIR* hopBranch = genCmpImmBranch(cUnit, kCondNe, classReg, 0);
992 // Not resolved
993 // Call out to helper, which will return resolved type in rRET0
994 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
995 pInitializeTypeFromCode));
996 loadConstant(cUnit, rARG0, type_idx);
997 callRuntimeHelper(cUnit, rTgt); // InitializeTypeFromCode(idx, method)
998 genRegCopy(cUnit, r2, rRET0); // Align usage with fast path
999 loadValueDirectFixed(cUnit, rlSrc, rARG0); /* reload Ref */
1000 // Rejoin code paths
1001 LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
1002 hopTarget->defMask = ENCODE_ALL;
1003 hopBranch->target = (LIR*)hopTarget;
1004 }
1005 }
1006 /* rARG0 is ref, rARG2 is class. If ref==null, use directly as bool result */
1007 LIR* branch1 = genCmpImmBranch(cUnit, kCondEq, rARG0, 0);
1008 /* load object->clazz */
1009 DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
1010 loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
1011 /* rARG0 is ref, rARG1 is ref->clazz, rARG2 is class */
1012 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1013 pInstanceofNonTrivialFromCode));
1014#if defined(TARGET_ARM)
1015 opRegReg(cUnit, kOpCmp, rARG1, rARG2); // Same?
1016 genBarrier(cUnit);
1017 genIT(cUnit, kArmCondEq, "EE"); // if-convert the test
1018 loadConstant(cUnit, rARG0, 1); // .eq case - load true
1019 genRegCopy(cUnit, rARG0, rARG2); // .ne case - arg0 <= class
1020 opReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
1021 genBarrier(cUnit);
1022 oatClobberCalleeSave(cUnit);
1023#else
1024 // Perhaps a general-purpose kOpSelect operator?
1025 UNIMPLEMENTED(FATAL) << "Need non IT implementation";
1026#endif
1027 /* branch target here */
1028 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
1029 target->defMask = ENCODE_ALL;
1030 RegLocation rlResult = oatGetReturn(cUnit);
1031 storeValue(cUnit, rlDest, rlResult);
1032 branch1->target = (LIR*)target;
1033}
1034
1035void genCheckCast(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
1036{
1037 oatFlushAllRegs(cUnit);
1038 // May generate a call - use explicit registers
1039 oatLockCallTemps(cUnit);
1040 uint32_t type_idx = mir->dalvikInsn.vB;
1041 loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method*
1042 int classReg = rARG2; // rARG2 will hold the Class*
1043 if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
1044 cUnit->dex_cache,
1045 *cUnit->dex_file,
1046 type_idx)) {
1047 // Check we have access to type_idx and if not throw IllegalAccessError,
1048 // returns Class* in rRET0
1049 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1050 pInitializeTypeAndVerifyAccessFromCode));
1051 loadConstant(cUnit, rARG0, type_idx);
1052 callRuntimeHelper(cUnit, rTgt); // InitializeTypeAndVerifyAccess(idx, method)
1053 genRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
1054 } else {
1055 // Load dex cache entry into classReg (rARG2)
1056 loadWordDisp(cUnit, rARG1,
1057 Method::DexCacheResolvedTypesOffset().Int32Value(),
1058 classReg);
1059 int32_t offset_of_type =
1060 Array::DataOffset(sizeof(Class*)).Int32Value() +
1061 (sizeof(Class*) * type_idx);
1062 loadWordDisp(cUnit, classReg, offset_of_type, classReg);
1063 if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
1064 cUnit->dex_cache, type_idx)) {
1065 // Need to test presence of type in dex cache at runtime
1066 LIR* hopBranch = genCmpImmBranch(cUnit, kCondNe, classReg, 0);
1067 // Not resolved
1068 // Call out to helper, which will return resolved type in r0
1069 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pInitializeTypeFromCode), rLR);
1070 loadConstant(cUnit, r0, type_idx);
1071 callRuntimeHelper(cUnit, rLR); // InitializeTypeFromCode(idx, method)
1072 genRegCopy(cUnit, classReg, r0); // Align usage with fast path
1073 // Rejoin code paths
1074 LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
1075 hopTarget->defMask = ENCODE_ALL;
1076 hopBranch->target = (LIR*)hopTarget;
1077 }
1078 }
1079 // At this point, classReg (r2) has class
1080 loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
1081 /* Null is OK - continue */
1082 LIR* branch1 = genCmpImmBranch(cUnit, kCondEq, rARG0, 0);
1083 /* load object->clazz */
1084 DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
1085 loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
1086 /* rARG1 now contains object->clazz */
1087 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1088 pCheckCastFromCode));
1089 opRegReg(cUnit, kOpCmp, rARG1, classReg);
1090 LIR* branch2 = opCondBranch(cUnit, kCondEq); /* If equal, trivial yes */
1091 genRegCopy(cUnit, rARG0, rARG1);
1092 genRegCopy(cUnit, rARG1, rARG2);
1093 callRuntimeHelper(cUnit, rTgt);
1094 /* branch target here */
1095 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
1096 target->defMask = ENCODE_ALL;
1097 branch1->target = (LIR*)target;
1098 branch2->target = (LIR*)target;
1099}
1100
1101
1102void genThrow(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
1103{
1104 oatFlushAllRegs(cUnit);
1105 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pDeliverException));
1106 loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get exception object
1107 callRuntimeHelper(cUnit, rTgt); // art_deliver_exception(exception);
1108}
1109
1110/*
1111 * Generate array store
1112 *
1113 */
1114void genArrayObjPut(CompilationUnit* cUnit, MIR* mir, RegLocation rlArray,
1115 RegLocation rlIndex, RegLocation rlSrc, int scale)
1116{
1117 RegisterClass regClass = oatRegClassBySize(kWord);
1118 int lenOffset = Array::LengthOffset().Int32Value();
1119 int dataOffset = Array::DataOffset(sizeof(Object*)).Int32Value();
1120
1121 oatFlushAllRegs(cUnit);
1122 /* Make sure it's a legal object Put. Use direct regs at first */
1123 loadValueDirectFixed(cUnit, rlArray, rARG1);
1124 loadValueDirectFixed(cUnit, rlSrc, rARG0);
1125
1126 /* null array object? */
1127 genNullCheck(cUnit, rlArray.sRegLow, rARG1, mir);
1128 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1129 pCanPutArrayElementFromCode));
1130 /* Get the array's clazz */
1131 loadWordDisp(cUnit, rARG1, Object::ClassOffset().Int32Value(), rARG1);
1132 callRuntimeHelper(cUnit, rTgt);
1133 oatFreeTemp(cUnit, rARG0);
1134 oatFreeTemp(cUnit, rARG1);
1135
1136 // Now, redo loadValues in case they didn't survive the call
1137
1138 int regPtr;
1139 rlArray = loadValue(cUnit, rlArray, kCoreReg);
1140 rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
1141
1142 if (oatIsTemp(cUnit, rlArray.lowReg)) {
1143 oatClobber(cUnit, rlArray.lowReg);
1144 regPtr = rlArray.lowReg;
1145 } else {
1146 regPtr = oatAllocTemp(cUnit);
1147 genRegCopy(cUnit, regPtr, rlArray.lowReg);
1148 }
1149
1150 if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1151 int regLen = oatAllocTemp(cUnit);
1152 //NOTE: max live temps(4) here.
1153 /* Get len */
1154 loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
1155 /* regPtr -> array data */
1156 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1157 genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
1158 kThrowArrayBounds);
1159 oatFreeTemp(cUnit, regLen);
1160 } else {
1161 /* regPtr -> array data */
1162 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1163 }
1164 /* at this point, regPtr points to array, 2 live temps */
1165 rlSrc = loadValue(cUnit, rlSrc, regClass);
1166 storeBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
1167 scale, kWord);
1168}
1169
1170/*
1171 * Generate array load
1172 */
1173void genArrayGet(CompilationUnit* cUnit, MIR* mir, OpSize size,
1174 RegLocation rlArray, RegLocation rlIndex,
1175 RegLocation rlDest, int scale)
1176{
1177 RegisterClass regClass = oatRegClassBySize(size);
1178 int lenOffset = Array::LengthOffset().Int32Value();
1179 int dataOffset;
1180 RegLocation rlResult;
1181 rlArray = loadValue(cUnit, rlArray, kCoreReg);
1182 rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
1183 int regPtr;
1184
1185 if (size == kLong || size == kDouble) {
1186 dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
1187 } else {
1188 dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
1189 }
1190
1191 /* null object? */
1192 genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
1193
1194 regPtr = oatAllocTemp(cUnit);
1195
1196 if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1197 int regLen = oatAllocTemp(cUnit);
1198 /* Get len */
1199 loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
1200 /* regPtr -> array data */
1201 opRegRegImm(cUnit, kOpAdd, regPtr, rlArray.lowReg, dataOffset);
1202 genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
1203 kThrowArrayBounds);
1204 oatFreeTemp(cUnit, regLen);
1205 } else {
1206 /* regPtr -> array data */
1207 opRegRegImm(cUnit, kOpAdd, regPtr, rlArray.lowReg, dataOffset);
1208 }
1209 oatFreeTemp(cUnit, rlArray.lowReg);
1210 if ((size == kLong) || (size == kDouble)) {
1211 if (scale) {
1212 int rNewIndex = oatAllocTemp(cUnit);
1213 opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
1214 opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
1215 oatFreeTemp(cUnit, rNewIndex);
1216 } else {
1217 opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
1218 }
1219 oatFreeTemp(cUnit, rlIndex.lowReg);
1220 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
1221
1222 loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
1223
1224 oatFreeTemp(cUnit, regPtr);
1225 storeValueWide(cUnit, rlDest, rlResult);
1226 } else {
1227 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
1228
1229 loadBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlResult.lowReg,
1230 scale, size);
1231
1232 oatFreeTemp(cUnit, regPtr);
1233 storeValue(cUnit, rlDest, rlResult);
1234 }
1235}
1236
1237/*
1238 * Generate array store
1239 *
1240 */
1241void genArrayPut(CompilationUnit* cUnit, MIR* mir, OpSize size,
1242 RegLocation rlArray, RegLocation rlIndex,
1243 RegLocation rlSrc, int scale)
1244{
1245 RegisterClass regClass = oatRegClassBySize(size);
1246 int lenOffset = Array::LengthOffset().Int32Value();
1247 int dataOffset;
1248
1249 if (size == kLong || size == kDouble) {
1250 dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
1251 } else {
1252 dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
1253 }
1254
1255 int regPtr;
1256 rlArray = loadValue(cUnit, rlArray, kCoreReg);
1257 rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
1258
1259 if (oatIsTemp(cUnit, rlArray.lowReg)) {
1260 oatClobber(cUnit, rlArray.lowReg);
1261 regPtr = rlArray.lowReg;
1262 } else {
1263 regPtr = oatAllocTemp(cUnit);
1264 genRegCopy(cUnit, regPtr, rlArray.lowReg);
1265 }
1266
1267 /* null object? */
1268 genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
1269
1270 if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1271 int regLen = oatAllocTemp(cUnit);
1272 //NOTE: max live temps(4) here.
1273 /* Get len */
1274 loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
1275 /* regPtr -> array data */
1276 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1277 genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
1278 kThrowArrayBounds);
1279 oatFreeTemp(cUnit, regLen);
1280 } else {
1281 /* regPtr -> array data */
1282 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1283 }
1284 /* at this point, regPtr points to array, 2 live temps */
1285 if ((size == kLong) || (size == kDouble)) {
1286 //TUNING: specific wide routine that can handle fp regs
1287 if (scale) {
1288 int rNewIndex = oatAllocTemp(cUnit);
1289 opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
1290 opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
1291 oatFreeTemp(cUnit, rNewIndex);
1292 } else {
1293 opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
1294 }
1295 rlSrc = loadValueWide(cUnit, rlSrc, regClass);
1296
1297 storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
1298
1299 oatFreeTemp(cUnit, regPtr);
1300 } else {
1301 rlSrc = loadValue(cUnit, rlSrc, regClass);
1302
1303 storeBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
1304 scale, size);
1305 }
1306}
1307
1308void genLong3Addr(CompilationUnit* cUnit, MIR* mir, OpKind firstOp,
1309 OpKind secondOp, RegLocation rlDest,
1310 RegLocation rlSrc1, RegLocation rlSrc2)
1311{
1312 RegLocation rlResult;
1313#if defined(TARGET_ARM)
1314 /*
1315 * NOTE: This is the one place in the code in which we might have
1316 * as many as six live temporary registers. There are 5 in the normal
1317 * set for Arm. Until we have spill capabilities, temporarily add
1318 * lr to the temp set. It is safe to do this locally, but note that
1319 * lr is used explicitly elsewhere in the code generator and cannot
1320 * normally be used as a general temp register.
1321 */
1322 oatMarkTemp(cUnit, rLR); // Add lr to the temp pool
1323 oatFreeTemp(cUnit, rLR); // and make it available
1324#endif
1325 rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
1326 rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
1327 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1328 // The longs may overlap - use intermediate temp if so
1329 if (rlResult.lowReg == rlSrc1.highReg) {
1330 int tReg = oatAllocTemp(cUnit);
1331 genRegCopy(cUnit, tReg, rlSrc1.highReg);
1332 opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg,
1333 rlSrc2.lowReg);
1334 opRegRegReg(cUnit, secondOp, rlResult.highReg, tReg,
1335 rlSrc2.highReg);
1336 oatFreeTemp(cUnit, tReg);
1337 } else {
1338 opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg,
1339 rlSrc2.lowReg);
1340 opRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg,
1341 rlSrc2.highReg);
1342 }
1343 /*
1344 * NOTE: If rlDest refers to a frame variable in a large frame, the
1345 * following storeValueWide might need to allocate a temp register.
1346 * To further work around the lack of a spill capability, explicitly
1347 * free any temps from rlSrc1 & rlSrc2 that aren't still live in rlResult.
1348 * Remove when spill is functional.
1349 */
1350 freeRegLocTemps(cUnit, rlResult, rlSrc1);
1351 freeRegLocTemps(cUnit, rlResult, rlSrc2);
1352 storeValueWide(cUnit, rlDest, rlResult);
1353#if defined(TARGET_ARM)
1354 oatClobber(cUnit, rLR);
1355 oatUnmarkTemp(cUnit, rLR); // Remove lr from the temp pool
1356#endif
1357}
1358
1359
1360bool genShiftOpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1361 RegLocation rlSrc1, RegLocation rlShift)
1362{
1363 int funcOffset;
1364
1365 switch( mir->dalvikInsn.opcode) {
1366 case OP_SHL_LONG:
1367 case OP_SHL_LONG_2ADDR:
1368 funcOffset = OFFSETOF_MEMBER(Thread, pShlLong);
1369 break;
1370 case OP_SHR_LONG:
1371 case OP_SHR_LONG_2ADDR:
1372 funcOffset = OFFSETOF_MEMBER(Thread, pShrLong);
1373 break;
1374 case OP_USHR_LONG:
1375 case OP_USHR_LONG_2ADDR:
1376 funcOffset = OFFSETOF_MEMBER(Thread, pUshrLong);
1377 break;
1378 default:
1379 LOG(FATAL) << "Unexpected case";
1380 return true;
1381 }
1382 oatFlushAllRegs(cUnit); /* Send everything to home location */
1383 int rTgt = loadHelper(cUnit, funcOffset);
1384 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
1385 loadValueDirect(cUnit, rlShift, rARG2);
1386 callRuntimeHelper(cUnit, rTgt);
1387 RegLocation rlResult = oatGetReturnWide(cUnit);
1388 storeValueWide(cUnit, rlDest, rlResult);
1389 return false;
1390}
1391
1392
1393bool genArithOpInt(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1394 RegLocation rlSrc1, RegLocation rlSrc2)
1395{
1396 OpKind op = kOpBkpt;
1397 bool callOut = false;
1398 bool checkZero = false;
1399 bool unary = false;
1400 int retReg = rRET0;
1401 int funcOffset;
1402 RegLocation rlResult;
1403 bool shiftOp = false;
1404
1405 switch (mir->dalvikInsn.opcode) {
1406 case OP_NEG_INT:
1407 op = kOpNeg;
1408 unary = true;
1409 break;
1410 case OP_NOT_INT:
1411 op = kOpMvn;
1412 unary = true;
1413 break;
1414 case OP_ADD_INT:
1415 case OP_ADD_INT_2ADDR:
1416 op = kOpAdd;
1417 break;
1418 case OP_SUB_INT:
1419 case OP_SUB_INT_2ADDR:
1420 op = kOpSub;
1421 break;
1422 case OP_MUL_INT:
1423 case OP_MUL_INT_2ADDR:
1424 op = kOpMul;
1425 break;
1426 case OP_DIV_INT:
1427 case OP_DIV_INT_2ADDR:
1428 callOut = true;
1429 checkZero = true;
1430 funcOffset = OFFSETOF_MEMBER(Thread, pIdiv);
1431 retReg = rRET0;
1432 break;
1433 /* NOTE: returns in r1 */
1434 case OP_REM_INT:
1435 case OP_REM_INT_2ADDR:
1436 callOut = true;
1437 checkZero = true;
1438 funcOffset = OFFSETOF_MEMBER(Thread, pIdivmod);
1439 retReg = rRET1;
1440 break;
1441 case OP_AND_INT:
1442 case OP_AND_INT_2ADDR:
1443 op = kOpAnd;
1444 break;
1445 case OP_OR_INT:
1446 case OP_OR_INT_2ADDR:
1447 op = kOpOr;
1448 break;
1449 case OP_XOR_INT:
1450 case OP_XOR_INT_2ADDR:
1451 op = kOpXor;
1452 break;
1453 case OP_SHL_INT:
1454 case OP_SHL_INT_2ADDR:
1455 shiftOp = true;
1456 op = kOpLsl;
1457 break;
1458 case OP_SHR_INT:
1459 case OP_SHR_INT_2ADDR:
1460 shiftOp = true;
1461 op = kOpAsr;
1462 break;
1463 case OP_USHR_INT:
1464 case OP_USHR_INT_2ADDR:
1465 shiftOp = true;
1466 op = kOpLsr;
1467 break;
1468 default:
1469 LOG(FATAL) << "Invalid word arith op: " <<
1470 (int)mir->dalvikInsn.opcode;
1471 }
1472 if (!callOut) {
1473 rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
1474 if (unary) {
1475 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1476 opRegReg(cUnit, op, rlResult.lowReg,
1477 rlSrc1.lowReg);
1478 } else {
1479 rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
1480 if (shiftOp) {
1481 int tReg = oatAllocTemp(cUnit);
1482 opRegRegImm(cUnit, kOpAnd, tReg, rlSrc2.lowReg, 31);
1483 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1484 opRegRegReg(cUnit, op, rlResult.lowReg,
1485 rlSrc1.lowReg, tReg);
1486 oatFreeTemp(cUnit, tReg);
1487 } else {
1488 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1489 opRegRegReg(cUnit, op, rlResult.lowReg,
1490 rlSrc1.lowReg, rlSrc2.lowReg);
1491 }
1492 }
1493 storeValue(cUnit, rlDest, rlResult);
1494 } else {
1495 RegLocation rlResult;
1496 oatFlushAllRegs(cUnit); /* Send everything to home location */
1497 loadValueDirectFixed(cUnit, rlSrc2, rRET1);
1498 int rTgt = loadHelper(cUnit, funcOffset);
1499 loadValueDirectFixed(cUnit, rlSrc1, rARG0);
1500 if (checkZero) {
1501 genImmedCheck(cUnit, kCondEq, rARG1, 0, mir, kThrowDivZero);
1502 }
1503 callRuntimeHelper(cUnit, rTgt);
1504 if (retReg == rRET0)
1505 rlResult = oatGetReturn(cUnit);
1506 else
1507 rlResult = oatGetReturnAlt(cUnit);
1508 storeValue(cUnit, rlDest, rlResult);
1509 }
1510 return false;
1511}
1512
1513/*
1514 * The following are the first-level codegen routines that analyze the format
1515 * of each bytecode then either dispatch special purpose codegen routines
1516 * or produce corresponding Thumb instructions directly.
1517 */
1518
1519bool isPowerOfTwo(int x)
1520{
1521 return (x & (x - 1)) == 0;
1522}
1523
1524// Returns true if no more than two bits are set in 'x'.
1525bool isPopCountLE2(unsigned int x)
1526{
1527 x &= x - 1;
1528 return (x & (x - 1)) == 0;
1529}
1530
1531// Returns the index of the lowest set bit in 'x'.
1532int lowestSetBit(unsigned int x) {
1533 int bit_posn = 0;
1534 while ((x & 0xf) == 0) {
1535 bit_posn += 4;
1536 x >>= 4;
1537 }
1538 while ((x & 1) == 0) {
1539 bit_posn++;
1540 x >>= 1;
1541 }
1542 return bit_posn;
1543}
1544
1545// Returns true if it added instructions to 'cUnit' to divide 'rlSrc' by 'lit'
1546// and store the result in 'rlDest'.
1547bool handleEasyDivide(CompilationUnit* cUnit, Opcode dalvikOpcode,
1548 RegLocation rlSrc, RegLocation rlDest, int lit)
1549{
1550 if (lit < 2 || !isPowerOfTwo(lit)) {
1551 return false;
1552 }
1553 int k = lowestSetBit(lit);
1554 if (k >= 30) {
1555 // Avoid special cases.
1556 return false;
1557 }
1558 bool div = (dalvikOpcode == OP_DIV_INT_LIT8 ||
1559 dalvikOpcode == OP_DIV_INT_LIT16);
1560 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1561 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1562 if (div) {
1563 int tReg = oatAllocTemp(cUnit);
1564 if (lit == 2) {
1565 // Division by 2 is by far the most common division by constant.
1566 opRegRegImm(cUnit, kOpLsr, tReg, rlSrc.lowReg, 32 - k);
1567 opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
1568 opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
1569 } else {
1570 opRegRegImm(cUnit, kOpAsr, tReg, rlSrc.lowReg, 31);
1571 opRegRegImm(cUnit, kOpLsr, tReg, tReg, 32 - k);
1572 opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
1573 opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
1574 }
1575 } else {
1576 int cReg = oatAllocTemp(cUnit);
1577 loadConstant(cUnit, cReg, lit - 1);
1578 int tReg1 = oatAllocTemp(cUnit);
1579 int tReg2 = oatAllocTemp(cUnit);
1580 if (lit == 2) {
1581 opRegRegImm(cUnit, kOpLsr, tReg1, rlSrc.lowReg, 32 - k);
1582 opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
1583 opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
1584 opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
1585 } else {
1586 opRegRegImm(cUnit, kOpAsr, tReg1, rlSrc.lowReg, 31);
1587 opRegRegImm(cUnit, kOpLsr, tReg1, tReg1, 32 - k);
1588 opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
1589 opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
1590 opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
1591 }
1592 }
1593 storeValue(cUnit, rlDest, rlResult);
1594 return true;
1595}
1596
1597void genMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
1598 RegLocation rlResult, int lit,
1599 int firstBit, int secondBit)
1600{
1601 opRegRegRegShift(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
1602 encodeShift(kArmLsl, secondBit - firstBit));
1603 if (firstBit != 0) {
1604 opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
1605 }
1606}
1607
1608// Returns true if it added instructions to 'cUnit' to multiply 'rlSrc' by 'lit'
1609// and store the result in 'rlDest'.
1610bool handleEasyMultiply(CompilationUnit* cUnit, RegLocation rlSrc,
1611 RegLocation rlDest, int lit)
1612{
1613 // Can we simplify this multiplication?
1614 bool powerOfTwo = false;
1615 bool popCountLE2 = false;
1616 bool powerOfTwoMinusOne = false;
1617 if (lit < 2) {
1618 // Avoid special cases.
1619 return false;
1620 } else if (isPowerOfTwo(lit)) {
1621 powerOfTwo = true;
1622 } else if (isPopCountLE2(lit)) {
1623 popCountLE2 = true;
1624 } else if (isPowerOfTwo(lit + 1)) {
1625 powerOfTwoMinusOne = true;
1626 } else {
1627 return false;
1628 }
1629 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1630 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1631 if (powerOfTwo) {
1632 // Shift.
1633 opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlSrc.lowReg,
1634 lowestSetBit(lit));
1635 } else if (popCountLE2) {
1636 // Shift and add and shift.
1637 int firstBit = lowestSetBit(lit);
1638 int secondBit = lowestSetBit(lit ^ (1 << firstBit));
1639 genMultiplyByTwoBitMultiplier(cUnit, rlSrc, rlResult, lit,
1640 firstBit, secondBit);
1641 } else {
1642 // Reverse subtract: (src << (shift + 1)) - src.
1643 DCHECK(powerOfTwoMinusOne);
1644 // TUNING: rsb dst, src, src lsl#lowestSetBit(lit + 1)
1645 int tReg = oatAllocTemp(cUnit);
1646 opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
1647 opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
1648 }
1649 storeValue(cUnit, rlDest, rlResult);
1650 return true;
1651}
1652
1653bool genArithOpIntLit(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1654 RegLocation rlSrc, int lit)
1655{
1656 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1657 RegLocation rlResult;
1658 OpKind op = (OpKind)0; /* Make gcc happy */
1659 int shiftOp = false;
1660 bool isDiv = false;
1661 int funcOffset;
1662 int rTgt;
1663
1664 switch (dalvikOpcode) {
1665 case OP_RSUB_INT_LIT8:
1666 case OP_RSUB_INT: {
1667 int tReg;
1668 //TUNING: add support for use of Arm rsub op
1669 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1670 tReg = oatAllocTemp(cUnit);
1671 loadConstant(cUnit, tReg, lit);
1672 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1673 opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
1674 tReg, rlSrc.lowReg);
1675 storeValue(cUnit, rlDest, rlResult);
1676 return false;
1677 break;
1678 }
1679
1680 case OP_ADD_INT_LIT8:
1681 case OP_ADD_INT_LIT16:
1682 op = kOpAdd;
1683 break;
1684 case OP_MUL_INT_LIT8:
1685 case OP_MUL_INT_LIT16: {
1686 if (handleEasyMultiply(cUnit, rlSrc, rlDest, lit)) {
1687 return false;
1688 }
1689 op = kOpMul;
1690 break;
1691 }
1692 case OP_AND_INT_LIT8:
1693 case OP_AND_INT_LIT16:
1694 op = kOpAnd;
1695 break;
1696 case OP_OR_INT_LIT8:
1697 case OP_OR_INT_LIT16:
1698 op = kOpOr;
1699 break;
1700 case OP_XOR_INT_LIT8:
1701 case OP_XOR_INT_LIT16:
1702 op = kOpXor;
1703 break;
1704 case OP_SHL_INT_LIT8:
1705 lit &= 31;
1706 shiftOp = true;
1707 op = kOpLsl;
1708 break;
1709 case OP_SHR_INT_LIT8:
1710 lit &= 31;
1711 shiftOp = true;
1712 op = kOpAsr;
1713 break;
1714 case OP_USHR_INT_LIT8:
1715 lit &= 31;
1716 shiftOp = true;
1717 op = kOpLsr;
1718 break;
1719
1720 case OP_DIV_INT_LIT8:
1721 case OP_DIV_INT_LIT16:
1722 case OP_REM_INT_LIT8:
1723 case OP_REM_INT_LIT16:
1724 if (lit == 0) {
1725 genImmedCheck(cUnit, kCondAl, 0, 0, mir, kThrowDivZero);
1726 return false;
1727 }
1728 if (handleEasyDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit)) {
1729 return false;
1730 }
1731 oatFlushAllRegs(cUnit); /* Everything to home location */
1732 loadValueDirectFixed(cUnit, rlSrc, rARG0);
1733 oatClobber(cUnit, rARG0);
1734 if ((dalvikOpcode == OP_DIV_INT_LIT8) ||
1735 (dalvikOpcode == OP_DIV_INT_LIT16)) {
1736 funcOffset = OFFSETOF_MEMBER(Thread, pIdiv);
1737 isDiv = true;
1738 } else {
1739 funcOffset = OFFSETOF_MEMBER(Thread, pIdivmod);
1740 isDiv = false;
1741 }
1742 rTgt = loadHelper(cUnit, funcOffset);
1743 loadConstant(cUnit, rARG1, lit);
1744 callRuntimeHelper(cUnit, rTgt);
1745 if (isDiv)
1746 rlResult = oatGetReturn(cUnit);
1747 else
1748 rlResult = oatGetReturnAlt(cUnit);
1749 storeValue(cUnit, rlDest, rlResult);
1750 return false;
1751 break;
1752 default:
1753 return true;
1754 }
1755 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1756 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1757 // Avoid shifts by literal 0 - no support in Thumb. Change to copy
1758 if (shiftOp && (lit == 0)) {
1759 genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
1760 } else {
1761 opRegRegImm(cUnit, op, rlResult.lowReg, rlSrc.lowReg, lit);
1762 }
1763 storeValue(cUnit, rlDest, rlResult);
1764 return false;
1765}
1766
1767bool genArithOpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1768 RegLocation rlSrc1, RegLocation rlSrc2)
1769{
1770 RegLocation rlResult;
1771 OpKind firstOp = kOpBkpt;
1772 OpKind secondOp = kOpBkpt;
1773 bool callOut = false;
1774 bool checkZero = false;
1775 int funcOffset;
1776 int retReg = rRET0;
1777
1778 switch (mir->dalvikInsn.opcode) {
1779 case OP_NOT_LONG:
1780 rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
1781 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1782 // Check for destructive overlap
1783 if (rlResult.lowReg == rlSrc2.highReg) {
1784 int tReg = oatAllocTemp(cUnit);
1785 genRegCopy(cUnit, tReg, rlSrc2.highReg);
1786 opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
1787 opRegReg(cUnit, kOpMvn, rlResult.highReg, tReg);
1788 oatFreeTemp(cUnit, tReg);
1789 } else {
1790 opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
1791 opRegReg(cUnit, kOpMvn, rlResult.highReg, rlSrc2.highReg);
1792 }
1793 storeValueWide(cUnit, rlDest, rlResult);
1794 return false;
1795 break;
1796 case OP_ADD_LONG:
1797 case OP_ADD_LONG_2ADDR:
1798 firstOp = kOpAdd;
1799 secondOp = kOpAdc;
1800 break;
1801 case OP_SUB_LONG:
1802 case OP_SUB_LONG_2ADDR:
1803 firstOp = kOpSub;
1804 secondOp = kOpSbc;
1805 break;
1806 case OP_MUL_LONG:
1807 case OP_MUL_LONG_2ADDR:
1808 callOut = true;
1809 retReg = rRET0;
1810 funcOffset = OFFSETOF_MEMBER(Thread, pLmul);
1811 break;
1812 case OP_DIV_LONG:
1813 case OP_DIV_LONG_2ADDR:
1814 callOut = true;
1815 checkZero = true;
1816 retReg = rRET0;
1817 funcOffset = OFFSETOF_MEMBER(Thread, pLdivmod);
1818 break;
1819 /* NOTE - result is in rARG2/rARG3 instead of rRET0/rRET1 */
1820 // FIXME: is true, or could be made true, or other targets?
1821 case OP_REM_LONG:
1822 case OP_REM_LONG_2ADDR:
1823 callOut = true;
1824 checkZero = true;
1825 funcOffset = OFFSETOF_MEMBER(Thread, pLdivmod);
1826 retReg = rARG2;
1827 break;
1828 case OP_AND_LONG_2ADDR:
1829 case OP_AND_LONG:
1830 firstOp = kOpAnd;
1831 secondOp = kOpAnd;
1832 break;
1833 case OP_OR_LONG:
1834 case OP_OR_LONG_2ADDR:
1835 firstOp = kOpOr;
1836 secondOp = kOpOr;
1837 break;
1838 case OP_XOR_LONG:
1839 case OP_XOR_LONG_2ADDR:
1840 firstOp = kOpXor;
1841 secondOp = kOpXor;
1842 break;
1843 case OP_NEG_LONG: {
1844 rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
1845 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1846 int zReg = oatAllocTemp(cUnit);
1847 loadConstantNoClobber(cUnit, zReg, 0);
1848 // Check for destructive overlap
1849 if (rlResult.lowReg == rlSrc2.highReg) {
1850 int tReg = oatAllocTemp(cUnit);
1851 opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
1852 zReg, rlSrc2.lowReg);
1853 opRegRegReg(cUnit, kOpSbc, rlResult.highReg,
1854 zReg, tReg);
1855 oatFreeTemp(cUnit, tReg);
1856 } else {
1857 opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
1858 zReg, rlSrc2.lowReg);
1859 opRegRegReg(cUnit, kOpSbc, rlResult.highReg,
1860 zReg, rlSrc2.highReg);
1861 }
1862 oatFreeTemp(cUnit, zReg);
1863 storeValueWide(cUnit, rlDest, rlResult);
1864 return false;
1865 }
1866 default:
1867 LOG(FATAL) << "Invalid long arith op";
1868 }
1869 if (!callOut) {
1870 genLong3Addr(cUnit, mir, firstOp, secondOp, rlDest, rlSrc1, rlSrc2);
1871 } else {
1872 int rTgt;
1873 oatFlushAllRegs(cUnit); /* Send everything to home location */
1874 if (checkZero) {
1875 loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
1876 rTgt = loadHelper(cUnit, funcOffset);
1877 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
1878 int tReg = oatAllocTemp(cUnit);
1879#if defined(TARGET_ARM)
1880 newLIR4(cUnit, kThumb2OrrRRRs, tReg, rARG2, rARG3, 0);
1881 oatFreeTemp(cUnit, tReg);
1882 genCheck(cUnit, kCondEq, mir, kThrowDivZero);
1883#else
1884 opRegRegReg(cUnit, kOpOr, tReg, rARG2, rARG3);
1885 genImmedCheck(cUnit, kCondEq, mir, tReg, 0, mir, kThrowDivZero);
1886 oatFreeTemp(cUnit, tReg);
1887#endif
1888 } else {
1889 rTgt = loadHelper(cUnit, funcOffset);
1890 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
1891 loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
1892 }
1893 callRuntimeHelper(cUnit, rTgt);
1894 // Adjust return regs in to handle case of rem returning rARG2/rARG3
1895 if (retReg == rRET0)
1896 rlResult = oatGetReturnWide(cUnit);
1897 else
1898 rlResult = oatGetReturnWideAlt(cUnit);
1899 storeValueWide(cUnit, rlDest, rlResult);
1900 }
1901 return false;
1902}
1903
1904bool genConversionCall(CompilationUnit* cUnit, MIR* mir, int funcOffset,
1905 int srcSize, int tgtSize)
1906{
1907 /*
1908 * Don't optimize the register usage since it calls out to support
1909 * functions
1910 */
1911 RegLocation rlSrc;
1912 RegLocation rlDest;
1913 oatFlushAllRegs(cUnit); /* Send everything to home location */
1914 int rTgt = loadHelper(cUnit, funcOffset);
1915 if (srcSize == 1) {
1916 rlSrc = oatGetSrc(cUnit, mir, 0);
1917 loadValueDirectFixed(cUnit, rlSrc, rARG0);
1918 } else {
1919 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
1920 loadValueDirectWideFixed(cUnit, rlSrc, rARG0, rARG1);
1921 }
1922 callRuntimeHelper(cUnit, rTgt);
1923 if (tgtSize == 1) {
1924 RegLocation rlResult;
1925 rlDest = oatGetDest(cUnit, mir, 0);
1926 rlResult = oatGetReturn(cUnit);
1927 storeValue(cUnit, rlDest, rlResult);
1928 } else {
1929 RegLocation rlResult;
1930 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
1931 rlResult = oatGetReturnWide(cUnit);
1932 storeValueWide(cUnit, rlDest, rlResult);
1933 }
1934 return false;
1935}
1936
1937void genNegFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
1938bool genArithOpFloatPortable(CompilationUnit* cUnit, MIR* mir,
1939 RegLocation rlDest, RegLocation rlSrc1,
1940 RegLocation rlSrc2)
1941{
1942 RegLocation rlResult;
1943 int funcOffset;
1944
1945 switch (mir->dalvikInsn.opcode) {
1946 case OP_ADD_FLOAT_2ADDR:
1947 case OP_ADD_FLOAT:
1948 funcOffset = OFFSETOF_MEMBER(Thread, pFadd);
1949 break;
1950 case OP_SUB_FLOAT_2ADDR:
1951 case OP_SUB_FLOAT:
1952 funcOffset = OFFSETOF_MEMBER(Thread, pFsub);
1953 break;
1954 case OP_DIV_FLOAT_2ADDR:
1955 case OP_DIV_FLOAT:
1956 funcOffset = OFFSETOF_MEMBER(Thread, pFdiv);
1957 break;
1958 case OP_MUL_FLOAT_2ADDR:
1959 case OP_MUL_FLOAT:
1960 funcOffset = OFFSETOF_MEMBER(Thread, pFmul);
1961 break;
1962 case OP_REM_FLOAT_2ADDR:
1963 case OP_REM_FLOAT:
1964 funcOffset = OFFSETOF_MEMBER(Thread, pFmodf);
1965 break;
1966 case OP_NEG_FLOAT: {
1967 genNegFloat(cUnit, rlDest, rlSrc1);
1968 return false;
1969 }
1970 default:
1971 return true;
1972 }
1973 oatFlushAllRegs(cUnit); /* Send everything to home location */
1974 int rTgt = loadHelper(cUnit, funcOffset);
1975 loadValueDirectFixed(cUnit, rlSrc1, rARG0);
1976 loadValueDirectFixed(cUnit, rlSrc2, rARG1);
1977 callRuntimeHelper(cUnit, rTgt);
1978 rlResult = oatGetReturn(cUnit);
1979 storeValue(cUnit, rlDest, rlResult);
1980 return false;
1981}
1982
1983void genNegDouble(CompilationUnit* cUnit, RegLocation rlDst, RegLocation rlSrc);
1984bool genArithOpDoublePortable(CompilationUnit* cUnit, MIR* mir,
1985 RegLocation rlDest, RegLocation rlSrc1,
1986 RegLocation rlSrc2)
1987{
1988 RegLocation rlResult;
1989 int funcOffset;
1990
1991 switch (mir->dalvikInsn.opcode) {
1992 case OP_ADD_DOUBLE_2ADDR:
1993 case OP_ADD_DOUBLE:
1994 funcOffset = OFFSETOF_MEMBER(Thread, pDadd);
1995 break;
1996 case OP_SUB_DOUBLE_2ADDR:
1997 case OP_SUB_DOUBLE:
1998 funcOffset = OFFSETOF_MEMBER(Thread, pDsub);
1999 break;
2000 case OP_DIV_DOUBLE_2ADDR:
2001 case OP_DIV_DOUBLE:
2002 funcOffset = OFFSETOF_MEMBER(Thread, pDdiv);
2003 break;
2004 case OP_MUL_DOUBLE_2ADDR:
2005 case OP_MUL_DOUBLE:
2006 funcOffset = OFFSETOF_MEMBER(Thread, pDmul);
2007 break;
2008 case OP_REM_DOUBLE_2ADDR:
2009 case OP_REM_DOUBLE:
2010 funcOffset = OFFSETOF_MEMBER(Thread, pFmod);
2011 break;
2012 case OP_NEG_DOUBLE: {
2013 genNegDouble(cUnit, rlDest, rlSrc1);
2014 return false;
2015 }
2016 default:
2017 return true;
2018 }
2019 oatFlushAllRegs(cUnit); /* Send everything to home location */
2020 int rTgt = loadHelper(cUnit, funcOffset);
2021 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
2022 loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
2023 callRuntimeHelper(cUnit, rTgt);
2024 rlResult = oatGetReturnWide(cUnit);
2025 storeValueWide(cUnit, rlDest, rlResult);
2026 return false;
2027}
2028
2029bool genConversionPortable(CompilationUnit* cUnit, MIR* mir)
2030{
2031 Opcode opcode = mir->dalvikInsn.opcode;
2032
2033 switch (opcode) {
2034 case OP_INT_TO_FLOAT:
2035 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pI2f),
2036 1, 1);
2037 case OP_FLOAT_TO_INT:
2038 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pF2iz),
2039 1, 1);
2040 case OP_DOUBLE_TO_FLOAT:
2041 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pD2f),
2042 2, 1);
2043 case OP_FLOAT_TO_DOUBLE:
2044 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pF2d),
2045 1, 2);
2046 case OP_INT_TO_DOUBLE:
2047 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pI2d),
2048 1, 2);
2049 case OP_DOUBLE_TO_INT:
2050 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pD2iz),
2051 2, 1);
2052 case OP_FLOAT_TO_LONG:
2053 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread,
2054 pF2l), 1, 2);
2055 case OP_LONG_TO_FLOAT:
2056 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pL2f),
2057 2, 1);
2058 case OP_DOUBLE_TO_LONG:
2059 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread,
2060 pD2l), 2, 2);
2061 case OP_LONG_TO_DOUBLE:
2062 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pL2d),
2063 2, 2);
2064 default:
2065 return true;
2066 }
2067 return false;
2068}
2069
2070/*
2071 * Generate callout to updateDebugger. Note that we're overloading
2072 * the use of rSUSPEND here. When the debugger is active, this
2073 * register holds the address of the update function. So, if it's
2074 * non-null, we call out to it.
2075 *
2076 * Note also that rRET0 and rRET1 must be preserved across this
2077 * code. This must be handled by the stub.
2078 */
2079void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset)
2080{
2081 // Following DCHECK verifies that dPC is in range of single load immediate
2082 DCHECK((offset == DEBUGGER_METHOD_ENTRY) ||
2083 (offset == DEBUGGER_METHOD_EXIT) || ((offset & 0xffff) == offset));
2084 oatClobberCalleeSave(cUnit);
2085#if defined(TARGET_ARM)
2086 opRegImm(cUnit, kOpCmp, rSUSPEND, 0);
2087 genIT(cUnit, kArmCondNe, "T");
2088 loadConstant(cUnit, rARG2, offset); // arg2 <- Entry code
2089 opReg(cUnit, kOpBlx, rSUSPEND);
2090#else
2091 LIR* branch = genCmpImmBranch(cUnit, kCondEq, rSUSPEND, 0);
2092 loadConstant(cUnit, rARG2, offset);
2093 opReg(cUnit, kOpBlx, rSUSPEND);
2094 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
2095 target->defMask = ENCODE_ALL;
2096 branch->target = (LIR*)target;
2097#endif
2098 oatFreeTemp(cUnit, rARG2);
2099}
2100
2101/* Check if we need to check for pending suspend request */
2102void genSuspendTest(CompilationUnit* cUnit, MIR* mir)
2103{
2104 if (NO_SUSPEND || (mir->optimizationFlags & MIR_IGNORE_SUSPEND_CHECK)) {
2105 return;
2106 }
2107 oatFlushAllRegs(cUnit);
2108 LIR* branch;
2109 if (cUnit->genDebugger) {
2110 // If generating code for the debugger, always check for suspension
2111 branch = genUnconditionalBranch(cUnit, NULL);
2112 } else {
2113#if defined(TARGET_ARM)
2114 // In non-debug case, only check periodically
2115 newLIR2(cUnit, kThumbSubRI8, rSUSPEND, 1);
2116 branch = opCondBranch(cUnit, kCondEq);
2117#else
2118 opRegImm(cUnit, kOpSub, rSUSPEND, 1);
2119 branch = opCompareBranchImm(cUnit, kCondEq, rSUSPEND, 0);
2120#endif
2121 }
2122 LIR* retLab = newLIR0(cUnit, kPseudoTargetLabel);
2123 retLab->defMask = ENCODE_ALL;
2124 LIR* target = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
2125 target->dalvikOffset = cUnit->currentDalvikOffset;
2126 target->opcode = kPseudoSuspendTarget;
2127 target->operands[0] = (intptr_t)retLab;
2128 target->operands[1] = mir->offset;
2129 branch->target = (LIR*)target;
2130 oatInsertGrowableList(cUnit, &cUnit->suspendLaunchpads, (intptr_t)target);
2131}
2132
2133} // namespace art