blob: 511c47bdae67c07a377f7c1a738a24a2940bd834 [file] [log] [blame]
buzbee31a4a6f2012-02-28 15:36:15 -08001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17namespace art {
18
19/*
20 * This source files contains "gen" codegen routines that should
21 * be applicable to most targets. Only mid-level support utilities
22 * and "op" calls may be used here.
23 */
24
25#if defined(TARGET_ARM)
26LIR* genIT(CompilationUnit* cUnit, ArmConditionCode cond, const char* guide);
27#endif
28
29LIR* callRuntimeHelper(CompilationUnit* cUnit, int reg)
30{
31 oatClobberCalleeSave(cUnit);
32 return opReg(cUnit, kOpBlx, reg);
33}
34
35/*
36 * Generate an kPseudoBarrier marker to indicate the boundary of special
37 * blocks.
38 */
39void genBarrier(CompilationUnit* cUnit)
40{
41 LIR* barrier = newLIR0(cUnit, kPseudoBarrier);
42 /* Mark all resources as being clobbered */
43 barrier->defMask = -1;
44}
45
buzbee31a4a6f2012-02-28 15:36:15 -080046
47/* Generate unconditional branch instructions */
48LIR* genUnconditionalBranch(CompilationUnit* cUnit, LIR* target)
49{
50 LIR* branch = opNone(cUnit, kOpUncondBr);
51 branch->target = (LIR*) target;
52 return branch;
53}
54
buzbee5de34942012-03-01 14:51:57 -080055// FIXME: need to do some work to split out targets with
56// condition codes and those without
57#if defined(TARGET_ARM) || defined(TARGET_X86)
58/* Generate conditional branch instructions */
59LIR* genConditionalBranch(CompilationUnit* cUnit, ConditionCode cond,
60 LIR* target)
61{
62 LIR* branch = opCondBranch(cUnit, cond);
63 branch->target = (LIR*) target;
64 return branch;
65}
66
buzbee31a4a6f2012-02-28 15:36:15 -080067LIR* genCheck(CompilationUnit* cUnit, ConditionCode cCode, MIR* mir,
68 ThrowKind kind)
69{
70 LIR* tgt = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
71 tgt->opcode = kPseudoThrowTarget;
72 tgt->operands[0] = kind;
73 tgt->operands[1] = mir ? mir->offset : 0;
74 LIR* branch = genConditionalBranch(cUnit, cCode, tgt);
75 // Remember branch target - will process later
76 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
77 return branch;
78}
buzbee5de34942012-03-01 14:51:57 -080079#endif
buzbee31a4a6f2012-02-28 15:36:15 -080080
81LIR* genImmedCheck(CompilationUnit* cUnit, ConditionCode cCode,
82 int reg, int immVal, MIR* mir, ThrowKind kind)
83{
84 LIR* tgt = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
85 tgt->opcode = kPseudoThrowTarget;
86 tgt->operands[0] = kind;
87 tgt->operands[1] = mir->offset;
88 LIR* branch;
89 if (cCode == kCondAl) {
90 branch = genUnconditionalBranch(cUnit, tgt);
91 } else {
92 branch = genCmpImmBranch(cUnit, cCode, reg, immVal);
93 branch->target = (LIR*)tgt;
94 }
95 // Remember branch target - will process later
96 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
97 return branch;
98}
99
100/* Perform null-check on a register. */
101LIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg, MIR* mir)
102{
103 if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
104 mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
105 return NULL;
106 }
107 return genImmedCheck(cUnit, kCondEq, mReg, 0, mir, kThrowNullPointer);
108}
109
110/* Perform check on two registers */
111LIR* genRegRegCheck(CompilationUnit* cUnit, ConditionCode cCode,
112 int reg1, int reg2, MIR* mir, ThrowKind kind)
113{
114 LIR* tgt = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
115 tgt->opcode = kPseudoThrowTarget;
116 tgt->operands[0] = kind;
117 tgt->operands[1] = mir ? mir->offset : 0;
118 tgt->operands[2] = reg1;
119 tgt->operands[3] = reg2;
buzbee5de34942012-03-01 14:51:57 -0800120#if defined(TARGET_MIPS)
121 LIR* branch = genCompareBranch(cUnit, cCode, reg1, reg2);
122#else
buzbee31a4a6f2012-02-28 15:36:15 -0800123 opRegReg(cUnit, kOpCmp, reg1, reg2);
124 LIR* branch = genConditionalBranch(cUnit, cCode, tgt);
buzbee5de34942012-03-01 14:51:57 -0800125#endif
buzbee31a4a6f2012-02-28 15:36:15 -0800126 // Remember branch target - will process later
127 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
128 return branch;
129}
130
131void genCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
132 RegLocation rlSrc1, RegLocation rlSrc2, LIR* labelList)
133{
134 ConditionCode cond;
135 rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
136 rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
buzbee31a4a6f2012-02-28 15:36:15 -0800137 Opcode opcode = mir->dalvikInsn.opcode;
138 switch(opcode) {
139 case OP_IF_EQ:
140 cond = kCondEq;
141 break;
142 case OP_IF_NE:
143 cond = kCondNe;
144 break;
145 case OP_IF_LT:
146 cond = kCondLt;
147 break;
148 case OP_IF_GE:
149 cond = kCondGe;
150 break;
151 case OP_IF_GT:
152 cond = kCondGt;
153 break;
154 case OP_IF_LE:
155 cond = kCondLe;
156 break;
157 default:
158 cond = (ConditionCode)0;
159 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
160 }
buzbee5de34942012-03-01 14:51:57 -0800161#if defined(TARGET_MIPS)
162 LIR* branch = genCompareBranch(cUnit, cond, rlSrc1.lowReg, rlSrc2.lowReg);
163 branch->target = &labelList[bb->taken->id];
164#else
165 opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
buzbee31a4a6f2012-02-28 15:36:15 -0800166 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
buzbee5de34942012-03-01 14:51:57 -0800167#endif
buzbee31a4a6f2012-02-28 15:36:15 -0800168 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
169}
170
171void genCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb, MIR* mir,
172 RegLocation rlSrc, LIR* labelList)
173{
174 ConditionCode cond;
175 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
buzbee31a4a6f2012-02-28 15:36:15 -0800176 Opcode opcode = mir->dalvikInsn.opcode;
177 switch(opcode) {
178 case OP_IF_EQZ:
179 cond = kCondEq;
180 break;
181 case OP_IF_NEZ:
182 cond = kCondNe;
183 break;
184 case OP_IF_LTZ:
185 cond = kCondLt;
186 break;
187 case OP_IF_GEZ:
188 cond = kCondGe;
189 break;
190 case OP_IF_GTZ:
191 cond = kCondGt;
192 break;
193 case OP_IF_LEZ:
194 cond = kCondLe;
195 break;
196 default:
197 cond = (ConditionCode)0;
198 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
199 }
buzbee5de34942012-03-01 14:51:57 -0800200#if defined(TARGET_MIPS)
201 LIR* branch = genCmpImmBranch(cUnit, cond, rlSrc.lowReg, 0);
202 branch->target = &labelList[bb->taken->id];
203#else
204 opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
buzbee31a4a6f2012-02-28 15:36:15 -0800205 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
buzbee5de34942012-03-01 14:51:57 -0800206#endif
buzbee31a4a6f2012-02-28 15:36:15 -0800207 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
208}
209
210void genIntToLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
211 RegLocation rlSrc)
212{
213 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
214 if (rlSrc.location == kLocPhysReg) {
215 genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
216 } else {
217 loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
218 }
219 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
220 rlResult.lowReg, 31);
221 storeValueWide(cUnit, rlDest, rlResult);
222}
223
224void genIntNarrowing(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
225 RegLocation rlSrc)
226{
227 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
228 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
229 OpKind op = kOpInvalid;
230 switch(mir->dalvikInsn.opcode) {
231 case OP_INT_TO_BYTE:
232 op = kOp2Byte;
233 break;
234 case OP_INT_TO_SHORT:
235 op = kOp2Short;
236 break;
237 case OP_INT_TO_CHAR:
238 op = kOp2Char;
239 break;
240 default:
241 LOG(ERROR) << "Bad int conversion type";
242 }
243 opRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
244 storeValue(cUnit, rlDest, rlResult);
245}
246
247/*
248 * Let helper function take care of everything. Will call
249 * Array::AllocFromCode(type_idx, method, count);
250 * Note: AllocFromCode will handle checks for errNegativeArraySize.
251 */
252void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
253 RegLocation rlSrc)
254{
255 oatFlushAllRegs(cUnit); /* Everything to home location */
256 uint32_t type_idx = mir->dalvikInsn.vC;
257 int rTgt;
258 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
259 cUnit->dex_cache,
260 *cUnit->dex_file,
261 type_idx)) {
262 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pAllocArrayFromCode));
263 } else {
264 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
265 pAllocArrayFromCodeWithAccessCheck));
266 }
267 loadCurrMethodDirect(cUnit, rARG1); // arg1 <- Method*
268 loadConstant(cUnit, rARG0, type_idx); // arg0 <- type_id
269 loadValueDirectFixed(cUnit, rlSrc, rARG2); // arg2 <- count
270 callRuntimeHelper(cUnit, rTgt);
271 RegLocation rlResult = oatGetReturn(cUnit);
272 storeValue(cUnit, rlDest, rlResult);
273}
274
275/*
276 * Similar to genNewArray, but with post-allocation initialization.
277 * Verifier guarantees we're dealing with an array class. Current
278 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
279 * Current code also throws internal unimp if not 'L', '[' or 'I'.
280 */
281void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
282{
283 DecodedInstruction* dInsn = &mir->dalvikInsn;
284 int elems = dInsn->vA;
285 int typeId = dInsn->vB;
286 oatFlushAllRegs(cUnit); /* Everything to home location */
287 int rTgt;
288 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
289 cUnit->dex_cache,
290 *cUnit->dex_file,
291 typeId)) {
292 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
293 pCheckAndAllocArrayFromCode));
294 } else {
295 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
296 pCheckAndAllocArrayFromCodeWithAccessCheck));
297 }
298 loadCurrMethodDirect(cUnit, rARG1); // arg1 <- Method*
299 loadConstant(cUnit, rARG0, typeId); // arg0 <- type_id
300 loadConstant(cUnit, rARG2, elems); // arg2 <- count
301 callRuntimeHelper(cUnit, rTgt);
302 /*
303 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
304 * return region. Because AllocFromCode placed the new array
305 * in rRET0, we'll just lock it into place. When debugger support is
306 * added, it may be necessary to additionally copy all return
307 * values to a home location in thread-local storage
308 */
309 oatLockTemp(cUnit, rRET0);
310
311 // TODO: use the correct component size, currently all supported types
312 // share array alignment with ints (see comment at head of function)
313 size_t component_size = sizeof(int32_t);
314
315 // Having a range of 0 is legal
316 if (isRange && (dInsn->vA > 0)) {
317 /*
318 * Bit of ugliness here. We're going generate a mem copy loop
319 * on the register range, but it is possible that some regs
320 * in the range have been promoted. This is unlikely, but
321 * before generating the copy, we'll just force a flush
322 * of any regs in the source range that have been promoted to
323 * home location.
324 */
325 for (unsigned int i = 0; i < dInsn->vA; i++) {
326 RegLocation loc = oatUpdateLoc(cUnit,
327 oatGetSrc(cUnit, mir, i));
328 if (loc.location == kLocPhysReg) {
329 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
330 loc.lowReg, kWord);
331 }
332 }
333 /*
334 * TUNING note: generated code here could be much improved, but
335 * this is an uncommon operation and isn't especially performance
336 * critical.
337 */
338 int rSrc = oatAllocTemp(cUnit);
339 int rDst = oatAllocTemp(cUnit);
340 int rIdx = oatAllocTemp(cUnit);
buzbee5de34942012-03-01 14:51:57 -0800341#if defined(TARGET_ARM)
buzbee31a4a6f2012-02-28 15:36:15 -0800342 int rVal = rLR; // Using a lot of temps, rLR is known free here
buzbee5de34942012-03-01 14:51:57 -0800343#else
344 int rVal = oatAllocTemp(cUnit);
345#endif
buzbee31a4a6f2012-02-28 15:36:15 -0800346 // Set up source pointer
347 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
348 opRegRegImm(cUnit, kOpAdd, rSrc, rSP,
349 oatSRegOffset(cUnit, rlFirst.sRegLow));
350 // Set up the target pointer
351 opRegRegImm(cUnit, kOpAdd, rDst, rRET0,
352 Array::DataOffset(component_size).Int32Value());
353 // Set up the loop counter (known to be > 0)
354 loadConstant(cUnit, rIdx, dInsn->vA - 1);
355 // Generate the copy loop. Going backwards for convenience
356 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
357 target->defMask = ENCODE_ALL;
358 // Copy next element
359 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
360 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
361#if defined(TARGET_ARM)
362 // Combine sub & test using sub setflags encoding here
363 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
364 LIR* branch = opCondBranch(cUnit, kCondGe);
365#else
buzbee5de34942012-03-01 14:51:57 -0800366 oatFreeTemp(cUnit, rVal);
buzbee31a4a6f2012-02-28 15:36:15 -0800367 opRegImm(cUnit, kOpSub, rIdx, 1);
buzbee5de34942012-03-01 14:51:57 -0800368 LIR* branch = genCmpImmBranch(cUnit, kCondGe, rIdx, 0);
buzbee31a4a6f2012-02-28 15:36:15 -0800369#endif
370 branch->target = (LIR*)target;
371 } else if (!isRange) {
372 // TUNING: interleave
373 for (unsigned int i = 0; i < dInsn->vA; i++) {
374 RegLocation rlArg = loadValue(cUnit,
375 oatGetSrc(cUnit, mir, i), kCoreReg);
376 storeBaseDisp(cUnit, rRET0,
377 Array::DataOffset(component_size).Int32Value() +
378 i * 4, rlArg.lowReg, kWord);
379 // If the loadValue caused a temp to be allocated, free it
380 if (oatIsTemp(cUnit, rlArg.lowReg)) {
381 oatFreeTemp(cUnit, rlArg.lowReg);
382 }
383 }
384 }
385}
386
387void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
388 bool isLongOrDouble, bool isObject)
389{
390 int fieldOffset;
391 int ssbIndex;
392 bool isVolatile;
393 bool isReferrersClass;
394 uint32_t fieldIdx = mir->dalvikInsn.vB;
395
396 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
397 *cUnit->dex_file, *cUnit->dex_cache,
398 cUnit->code_item, cUnit->method_idx,
399 cUnit->access_flags);
400
401 bool fastPath =
402 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
403 fieldOffset, ssbIndex,
404 isReferrersClass, isVolatile, true);
405 if (fastPath && !SLOW_FIELD_PATH) {
406 DCHECK_GE(fieldOffset, 0);
407 int rBase;
408 int rMethod;
409 if (isReferrersClass) {
410 // Fast path, static storage base is this method's class
411 rMethod = loadCurrMethod(cUnit);
412 rBase = oatAllocTemp(cUnit);
413 loadWordDisp(cUnit, rMethod,
414 Method::DeclaringClassOffset().Int32Value(), rBase);
415 } else {
416 // Medium path, static storage base in a different class which
417 // requires checks that the other class is initialized.
418 DCHECK_GE(ssbIndex, 0);
419 // May do runtime call so everything to home locations.
420 oatFlushAllRegs(cUnit);
421 // Using fixed register to sync with possible call to runtime
422 // support.
423 rMethod = rARG1;
424 oatLockTemp(cUnit, rMethod);
425 loadCurrMethodDirect(cUnit, rMethod);
426 rBase = rARG0;
427 oatLockTemp(cUnit, rBase);
428 loadWordDisp(cUnit, rMethod,
429 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
430 rBase);
431 loadWordDisp(cUnit, rBase,
432 Array::DataOffset(sizeof(Object*)).Int32Value() + sizeof(int32_t*) *
433 ssbIndex, rBase);
434 // rBase now points at appropriate static storage base (Class*)
435 // or NULL if not initialized. Check for NULL and call helper if NULL.
436 // TUNING: fast path should fall through
437 LIR* branchOver = genCmpImmBranch(cUnit, kCondNe, rBase, 0);
438 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
439 pInitializeStaticStorage));
440 loadConstant(cUnit, rARG0, ssbIndex);
441 callRuntimeHelper(cUnit, rTgt);
442#if defined(TARGET_MIPS)
443 // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
444 genRegCopy(cUnit, rBase, rRET0);
445#endif
446 LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
447 skipTarget->defMask = ENCODE_ALL;
448 branchOver->target = (LIR*)skipTarget;
449 }
450 // rBase now holds static storage base
451 oatFreeTemp(cUnit, rMethod);
452 if (isLongOrDouble) {
453 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
454 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
455 } else {
456 rlSrc = oatGetSrc(cUnit, mir, 0);
457 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
458 }
459//FIXME: need to generalize the barrier call
460 if (isVolatile) {
461 oatGenMemBarrier(cUnit, kST);
462 }
463 if (isLongOrDouble) {
464 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
465 rlSrc.highReg);
466 } else {
467 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
468 }
469 if (isVolatile) {
470 oatGenMemBarrier(cUnit, kSY);
471 }
472 if (isObject) {
473 markGCCard(cUnit, rlSrc.lowReg, rBase);
474 }
475 oatFreeTemp(cUnit, rBase);
476 } else {
477 oatFlushAllRegs(cUnit); // Everything to home locations
478 int setterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pSet64Static) :
479 (isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
480 : OFFSETOF_MEMBER(Thread, pSet32Static));
481 int rTgt = loadHelper(cUnit, setterOffset);
482 loadConstant(cUnit, rARG0, fieldIdx);
483 if (isLongOrDouble) {
484 loadValueDirectWideFixed(cUnit, rlSrc, rARG2, rARG3);
485 } else {
486 loadValueDirect(cUnit, rlSrc, rARG1);
487 }
488 callRuntimeHelper(cUnit, rTgt);
489 }
490}
491
492void genSget(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
493 bool isLongOrDouble, bool isObject)
494{
495 int fieldOffset;
496 int ssbIndex;
497 bool isVolatile;
498 bool isReferrersClass;
499 uint32_t fieldIdx = mir->dalvikInsn.vB;
500
501 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
502 *cUnit->dex_file, *cUnit->dex_cache,
503 cUnit->code_item, cUnit->method_idx,
504 cUnit->access_flags);
505
506 bool fastPath =
507 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
508 fieldOffset, ssbIndex,
509 isReferrersClass, isVolatile,
510 false);
511 if (fastPath && !SLOW_FIELD_PATH) {
512 DCHECK_GE(fieldOffset, 0);
513 int rBase;
514 int rMethod;
515 if (isReferrersClass) {
516 // Fast path, static storage base is this method's class
517 rMethod = loadCurrMethod(cUnit);
518 rBase = oatAllocTemp(cUnit);
519 loadWordDisp(cUnit, rMethod,
520 Method::DeclaringClassOffset().Int32Value(), rBase);
521 } else {
522 // Medium path, static storage base in a different class which
523 // requires checks that the other class is initialized
524 DCHECK_GE(ssbIndex, 0);
525 // May do runtime call so everything to home locations.
526 oatFlushAllRegs(cUnit);
527 // Using fixed register to sync with possible call to runtime
528 // support
529 rMethod = rARG1;
530 oatLockTemp(cUnit, rMethod);
531 loadCurrMethodDirect(cUnit, rMethod);
532 rBase = rARG0;
533 oatLockTemp(cUnit, rBase);
534 loadWordDisp(cUnit, rMethod,
535 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
536 rBase);
537 loadWordDisp(cUnit, rBase,
538 Array::DataOffset(sizeof(Object*)).Int32Value() +
539 sizeof(int32_t*) * ssbIndex,
540 rBase);
541 // rBase now points at appropriate static storage base (Class*)
542 // or NULL if not initialized. Check for NULL and call helper if NULL.
543 // TUNING: fast path should fall through
544 LIR* branchOver = genCmpImmBranch(cUnit, kCondNe, rBase, 0);
545 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
546 pInitializeStaticStorage));
547 loadConstant(cUnit, rARG0, ssbIndex);
548 callRuntimeHelper(cUnit, rTgt);
549#if defined(TARGET_MIPS)
550 // For Arm, rRET0 = rARG0 = rBASE, for Mips, we need to copy
551 genRegCopy(cUnit, rBase, rRET0);
552#endif
553 LIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
554 skipTarget->defMask = ENCODE_ALL;
555 branchOver->target = (LIR*)skipTarget;
556 }
557 // rBase now holds static storage base
558 oatFreeTemp(cUnit, rMethod);
559 rlDest = isLongOrDouble ? oatGetDestWide(cUnit, mir, 0, 1)
560 : oatGetDest(cUnit, mir, 0);
561 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
562 if (isVolatile) {
563 oatGenMemBarrier(cUnit, kSY);
564 }
565 if (isLongOrDouble) {
566 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
567 rlResult.highReg, INVALID_SREG);
568 } else {
569 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
570 }
571 oatFreeTemp(cUnit, rBase);
572 if (isLongOrDouble) {
573 storeValueWide(cUnit, rlDest, rlResult);
574 } else {
575 storeValue(cUnit, rlDest, rlResult);
576 }
577 } else {
578 oatFlushAllRegs(cUnit); // Everything to home locations
579 int getterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pGet64Static) :
580 (isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
581 : OFFSETOF_MEMBER(Thread, pGet32Static));
582 int rTgt = loadHelper(cUnit, getterOffset);
583 loadConstant(cUnit, rARG0, fieldIdx);
584 callRuntimeHelper(cUnit, rTgt);
585 if (isLongOrDouble) {
586 RegLocation rlResult = oatGetReturnWide(cUnit);
587 storeValueWide(cUnit, rlDest, rlResult);
588 } else {
589 RegLocation rlResult = oatGetReturn(cUnit);
590 storeValue(cUnit, rlDest, rlResult);
591 }
592 }
593}
594
595
596// Debugging routine - if null target, branch to DebugMe
597void genShowTarget(CompilationUnit* cUnit)
598{
599 LIR* branchOver = genCmpImmBranch(cUnit, kCondNe, rLINK, 0);
600 loadWordDisp(cUnit, rSELF,
601 OFFSETOF_MEMBER(Thread, pDebugMe), rLINK);
602 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
603 target->defMask = -1;
604 branchOver->target = (LIR*)target;
605}
606
607void genThrowVerificationError(CompilationUnit* cUnit, MIR* mir)
608{
609 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
610 pThrowVerificationErrorFromCode));
611 loadConstant(cUnit, rARG0, mir->dalvikInsn.vA);
612 loadConstant(cUnit, rARG1, mir->dalvikInsn.vB);
613 callRuntimeHelper(cUnit, rTgt);
614}
615
616void handleSuspendLaunchpads(CompilationUnit *cUnit)
617{
618 LIR** suspendLabel =
619 (LIR **) cUnit->suspendLaunchpads.elemList;
620 int numElems = cUnit->suspendLaunchpads.numUsed;
621
622 for (int i = 0; i < numElems; i++) {
623 /* TUNING: move suspend count load into helper */
624 LIR* lab = suspendLabel[i];
625 LIR* resumeLab = (LIR*)lab->operands[0];
626 cUnit->currentDalvikOffset = lab->operands[1];
627 oatAppendLIR(cUnit, (LIR *)lab);
628 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
629 pTestSuspendFromCode));
630 if (!cUnit->genDebugger) {
631 // use rSUSPEND for suspend count
632 loadWordDisp(cUnit, rSELF,
633 Thread::SuspendCountOffset().Int32Value(), rSUSPEND);
634 }
635 opReg(cUnit, kOpBlx, rTgt);
636 if ( cUnit->genDebugger) {
637 // use rSUSPEND for update debugger
638 loadWordDisp(cUnit, rSELF,
639 OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode),
640 rSUSPEND);
641 }
642 genUnconditionalBranch(cUnit, resumeLab);
643 }
644}
645
646void handleThrowLaunchpads(CompilationUnit *cUnit)
647{
648 LIR** throwLabel = (LIR **) cUnit->throwLaunchpads.elemList;
649 int numElems = cUnit->throwLaunchpads.numUsed;
650 int i;
651
652 for (i = 0; i < numElems; i++) {
653 LIR* lab = throwLabel[i];
654 cUnit->currentDalvikOffset = lab->operands[1];
655 oatAppendLIR(cUnit, (LIR *)lab);
656 int funcOffset = 0;
657 int v1 = lab->operands[2];
658 int v2 = lab->operands[3];
659 switch(lab->operands[0]) {
660 case kThrowNullPointer:
661 funcOffset = OFFSETOF_MEMBER(Thread, pThrowNullPointerFromCode);
662 break;
663 case kThrowArrayBounds:
buzbee5de34942012-03-01 14:51:57 -0800664 if (v2 != rARG0) {
buzbee31a4a6f2012-02-28 15:36:15 -0800665 genRegCopy(cUnit, rARG0, v1);
666 genRegCopy(cUnit, rARG1, v2);
667 } else {
buzbee5de34942012-03-01 14:51:57 -0800668 if (v1 == rARG1) {
buzbee31a4a6f2012-02-28 15:36:15 -0800669#if defined(TARGET_ARM)
670 int rTmp = r12;
671#else
672 int rTmp = oatAllocTemp(cUnit);
673#endif
674 genRegCopy(cUnit, rTmp, v1);
675 genRegCopy(cUnit, rARG1, v2);
676 genRegCopy(cUnit, rARG0, rTmp);
677#if !(defined(TARGET_ARM))
678 oatFreeTemp(cUnit, rTmp);
679#endif
680 } else {
681 genRegCopy(cUnit, rARG1, v2);
682 genRegCopy(cUnit, rARG0, v1);
683 }
684 }
685 funcOffset = OFFSETOF_MEMBER(Thread, pThrowArrayBoundsFromCode);
686 break;
687 case kThrowDivZero:
688 funcOffset = OFFSETOF_MEMBER(Thread, pThrowDivZeroFromCode);
689 break;
690 case kThrowVerificationError:
691 loadConstant(cUnit, rARG0, v1);
692 loadConstant(cUnit, rARG1, v2);
693 funcOffset =
694 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode);
695 break;
696 case kThrowNegArraySize:
697 genRegCopy(cUnit, rARG0, v1);
698 funcOffset =
699 OFFSETOF_MEMBER(Thread, pThrowNegArraySizeFromCode);
700 break;
701 case kThrowNoSuchMethod:
702 genRegCopy(cUnit, rARG0, v1);
703 funcOffset =
704 OFFSETOF_MEMBER(Thread, pThrowNoSuchMethodFromCode);
705 break;
706 case kThrowStackOverflow:
707 funcOffset =
708 OFFSETOF_MEMBER(Thread, pThrowStackOverflowFromCode);
709 // Restore stack alignment
710 opRegImm(cUnit, kOpAdd, rSP,
711 (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
712 break;
713 default:
714 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
715 }
716 int rTgt = loadHelper(cUnit, funcOffset);
717 callRuntimeHelper(cUnit, rTgt);
718 }
719}
720
721/* Needed by the Assembler */
722void oatSetupResourceMasks(LIR* lir)
723{
724 setupResourceMasks(lir);
725}
726
727void genIGet(CompilationUnit* cUnit, MIR* mir, OpSize size,
728 RegLocation rlDest, RegLocation rlObj,
729 bool isLongOrDouble, bool isObject)
730{
731 int fieldOffset;
732 bool isVolatile;
733 uint32_t fieldIdx = mir->dalvikInsn.vC;
734
735 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
736 *cUnit->dex_file, *cUnit->dex_cache,
737 cUnit->code_item, cUnit->method_idx,
738 cUnit->access_flags);
739
740 bool fastPath = cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit,
741 fieldOffset, isVolatile, false);
742
743 if (fastPath && !SLOW_FIELD_PATH) {
744 RegLocation rlResult;
745 RegisterClass regClass = oatRegClassBySize(size);
746 DCHECK_GE(fieldOffset, 0);
747 rlObj = loadValue(cUnit, rlObj, kCoreReg);
748 if (isLongOrDouble) {
749 DCHECK(rlDest.wide);
750 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
751 int regPtr = oatAllocTemp(cUnit);
752 opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
753 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
754 loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
755 if (isVolatile) {
756 oatGenMemBarrier(cUnit, kSY);
757 }
758 oatFreeTemp(cUnit, regPtr);
759 storeValueWide(cUnit, rlDest, rlResult);
760 } else {
761 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
762 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
763 loadBaseDisp(cUnit, mir, rlObj.lowReg, fieldOffset, rlResult.lowReg,
764 kWord, rlObj.sRegLow);
765 if (isVolatile) {
766 oatGenMemBarrier(cUnit, kSY);
767 }
768 storeValue(cUnit, rlDest, rlResult);
769 }
770 } else {
771 int getterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pGet64Instance) :
772 (isObject ? OFFSETOF_MEMBER(Thread, pGetObjInstance)
773 : OFFSETOF_MEMBER(Thread, pGet32Instance));
774 int rTgt = loadHelper(cUnit, getterOffset);
775 loadValueDirect(cUnit, rlObj, rARG1);
776 loadConstant(cUnit, rARG0, fieldIdx);
777 callRuntimeHelper(cUnit, rTgt);
778 if (isLongOrDouble) {
779 RegLocation rlResult = oatGetReturnWide(cUnit);
780 storeValueWide(cUnit, rlDest, rlResult);
781 } else {
782 RegLocation rlResult = oatGetReturn(cUnit);
783 storeValue(cUnit, rlDest, rlResult);
784 }
785 }
786}
787
788void genIPut(CompilationUnit* cUnit, MIR* mir, OpSize size, RegLocation rlSrc,
789 RegLocation rlObj, bool isLongOrDouble, bool isObject)
790{
791 int fieldOffset;
792 bool isVolatile;
793 uint32_t fieldIdx = mir->dalvikInsn.vC;
794
795 OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
796 *cUnit->dex_file, *cUnit->dex_cache,
797 cUnit->code_item, cUnit->method_idx,
798 cUnit->access_flags);
799
800 bool fastPath = cUnit->compiler->ComputeInstanceFieldInfo(fieldIdx, &mUnit,
801 fieldOffset, isVolatile, true);
802 if (fastPath && !SLOW_FIELD_PATH) {
803 RegisterClass regClass = oatRegClassBySize(size);
804 DCHECK_GE(fieldOffset, 0);
805 rlObj = loadValue(cUnit, rlObj, kCoreReg);
806 if (isLongOrDouble) {
807 int regPtr;
808 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
809 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
810 regPtr = oatAllocTemp(cUnit);
811 opRegRegImm(cUnit, kOpAdd, regPtr, rlObj.lowReg, fieldOffset);
812 if (isVolatile) {
813 oatGenMemBarrier(cUnit, kST);
814 }
815 storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
816 if (isVolatile) {
817 oatGenMemBarrier(cUnit, kSY);
818 }
819 oatFreeTemp(cUnit, regPtr);
820 } else {
821 rlSrc = loadValue(cUnit, rlSrc, regClass);
822 genNullCheck(cUnit, rlObj.sRegLow, rlObj.lowReg, mir);/* null? */
823 if (isVolatile) {
824 oatGenMemBarrier(cUnit, kST);
825 }
826 storeBaseDisp(cUnit, rlObj.lowReg, fieldOffset, rlSrc.lowReg, kWord);
827 if (isVolatile) {
828 oatGenMemBarrier(cUnit, kSY);
829 }
830 }
831 } else {
832 int setterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pSet64Instance) :
833 (isObject ? OFFSETOF_MEMBER(Thread, pSetObjInstance)
834 : OFFSETOF_MEMBER(Thread, pSet32Instance));
835 int rTgt = loadHelper(cUnit, setterOffset);
836 loadValueDirect(cUnit, rlObj, rARG1);
837 if (isLongOrDouble) {
838 loadValueDirectWide(cUnit, rlSrc, rARG2, rARG3);
839 } else {
840 loadValueDirect(cUnit, rlSrc, rARG2);
841 }
842 loadConstant(cUnit, rARG0, fieldIdx);
843 callRuntimeHelper(cUnit, rTgt);
844 }
845}
846
847void genConstClass(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
848 RegLocation rlSrc)
849{
850 uint32_t type_idx = mir->dalvikInsn.vB;
851 int mReg = loadCurrMethod(cUnit);
852 int resReg = oatAllocTemp(cUnit);
853 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
854 if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
855 cUnit->dex_cache,
856 *cUnit->dex_file,
857 type_idx)) {
858 // Call out to helper which resolves type and verifies access.
859 // Resolved type returned in rRET0.
860 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
861 pInitializeTypeAndVerifyAccessFromCode));
862 genRegCopy(cUnit, rARG1, mReg);
863 loadConstant(cUnit, rARG0, type_idx);
864 callRuntimeHelper(cUnit, rTgt);
865 RegLocation rlResult = oatGetReturn(cUnit);
866 storeValue(cUnit, rlDest, rlResult);
867 } else {
868 // We're don't need access checks, load type from dex cache
869 int32_t dex_cache_offset =
870 Method::DexCacheResolvedTypesOffset().Int32Value();
871 loadWordDisp(cUnit, mReg, dex_cache_offset, resReg);
872 int32_t offset_of_type =
873 Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
874 * type_idx);
875 loadWordDisp(cUnit, resReg, offset_of_type, rlResult.lowReg);
876 if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(cUnit->dex_cache,
877 type_idx) || SLOW_TYPE_PATH) {
878 // Slow path, at runtime test if type is null and if so initialize
879 oatFlushAllRegs(cUnit);
880 LIR* branch1 = genCmpImmBranch(cUnit, kCondEq, rlResult.lowReg, 0);
881 // Resolved, store and hop over following code
882 storeValue(cUnit, rlDest, rlResult);
883 LIR* branch2 = genUnconditionalBranch(cUnit,0);
884 // TUNING: move slow path to end & remove unconditional branch
885 LIR* target1 = newLIR0(cUnit, kPseudoTargetLabel);
886 target1->defMask = ENCODE_ALL;
buzbee5de34942012-03-01 14:51:57 -0800887 // Call out to helper, which will return resolved type in rARG0
buzbee31a4a6f2012-02-28 15:36:15 -0800888 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
889 pInitializeTypeFromCode));
890 genRegCopy(cUnit, rARG1, mReg);
891 loadConstant(cUnit, rARG0, type_idx);
892 callRuntimeHelper(cUnit, rTgt);
893 RegLocation rlResult = oatGetReturn(cUnit);
894 storeValue(cUnit, rlDest, rlResult);
895 // Rejoin code paths
896 LIR* target2 = newLIR0(cUnit, kPseudoTargetLabel);
897 target2->defMask = ENCODE_ALL;
898 branch1->target = (LIR*)target1;
899 branch2->target = (LIR*)target2;
900 } else {
901 // Fast path, we're done - just store result
902 storeValue(cUnit, rlDest, rlResult);
903 }
904 }
905}
906void genConstString(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
907 RegLocation rlSrc)
908{
909 /* NOTE: Most strings should be available at compile time */
910 uint32_t string_idx = mir->dalvikInsn.vB;
911 int32_t offset_of_string = Array::DataOffset(sizeof(String*)).Int32Value() +
912 (sizeof(String*) * string_idx);
913 if (!cUnit->compiler->CanAssumeStringIsPresentInDexCache(
914 cUnit->dex_cache, string_idx) || SLOW_STRING_PATH) {
915 // slow path, resolve string if not in dex cache
916 oatFlushAllRegs(cUnit);
917 oatLockCallTemps(cUnit); // Using explicit registers
918 loadCurrMethodDirect(cUnit, rARG2);
919 loadWordDisp(cUnit, rARG2,
920 Method::DexCacheStringsOffset().Int32Value(), rARG0);
921 // Might call out to helper, which will return resolved string in rRET0
922 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
923 pResolveStringFromCode));
924 loadWordDisp(cUnit, rRET0, offset_of_string, rARG0);
925 loadConstant(cUnit, rARG1, string_idx);
926#if defined(TARGET_ARM)
927 opRegImm(cUnit, kOpCmp, rRET0, 0); // Is resolved?
928 genBarrier(cUnit);
929 // For testing, always force through helper
930 if (!EXERCISE_SLOWEST_STRING_PATH) {
931 genIT(cUnit, kArmCondEq, "T");
932 }
933 genRegCopy(cUnit, rARG0, rARG2); // .eq
934 opReg(cUnit, kOpBlx, rTgt); // .eq, helper(Method*, string_idx)
935#else
buzbee5de34942012-03-01 14:51:57 -0800936 LIR* branch = genCmpImmBranch(cUnit, kCondNe, rRET0, 0);
buzbee31a4a6f2012-02-28 15:36:15 -0800937 genRegCopy(cUnit, rARG0, rARG2); // .eq
938 opReg(cUnit, kOpBlx, rTgt);
939 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
940 target->defMask = ENCODE_ALL;
941 branch->target = target;
942#endif
943 genBarrier(cUnit);
944 storeValue(cUnit, rlDest, getRetLoc(cUnit));
945 } else {
946 int mReg = loadCurrMethod(cUnit);
947 int resReg = oatAllocTemp(cUnit);
948 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
949 loadWordDisp(cUnit, mReg,
950 Method::DexCacheStringsOffset().Int32Value(), resReg);
951 loadWordDisp(cUnit, resReg, offset_of_string, rlResult.lowReg);
952 storeValue(cUnit, rlDest, rlResult);
953 }
954}
955
956/*
957 * Let helper function take care of everything. Will
958 * call Class::NewInstanceFromCode(type_idx, method);
959 */
960void genNewInstance(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest)
961{
962 oatFlushAllRegs(cUnit); /* Everything to home location */
963 uint32_t type_idx = mir->dalvikInsn.vB;
964 // alloc will always check for resolution, do we also need to verify
965 // access because the verifier was unable to?
966 int rTgt;
967 if (cUnit->compiler->CanAccessInstantiableTypeWithoutChecks(
968 cUnit->method_idx, cUnit->dex_cache, *cUnit->dex_file, type_idx)) {
969 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pAllocObjectFromCode));
970 } else {
971 rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
972 pAllocObjectFromCodeWithAccessCheck));
973 }
974 loadCurrMethodDirect(cUnit, rARG1); // arg1 <= Method*
975 loadConstant(cUnit, rARG0, type_idx); // arg0 <- type_idx
976 callRuntimeHelper(cUnit, rTgt);
977 RegLocation rlResult = oatGetReturn(cUnit);
978 storeValue(cUnit, rlDest, rlResult);
979}
980
981void genInstanceof(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
982 RegLocation rlSrc)
983{
984 oatFlushAllRegs(cUnit);
985 // May generate a call - use explicit registers
986 oatLockCallTemps(cUnit);
987 uint32_t type_idx = mir->dalvikInsn.vC;
buzbee5de34942012-03-01 14:51:57 -0800988 loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method*
buzbee31a4a6f2012-02-28 15:36:15 -0800989 int classReg = rARG2; // rARG2 will hold the Class*
990 if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
991 cUnit->dex_cache,
992 *cUnit->dex_file,
993 type_idx)) {
994 // Check we have access to type_idx and if not throw IllegalAccessError,
buzbee5de34942012-03-01 14:51:57 -0800995 // returns Class* in rARG0
buzbee31a4a6f2012-02-28 15:36:15 -0800996 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
997 pInitializeTypeAndVerifyAccessFromCode));
998 loadConstant(cUnit, rARG0, type_idx);
999 callRuntimeHelper(cUnit, rTgt); // InitializeTypeAndVerifyAccess(idx, method)
1000 genRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
buzbee5de34942012-03-01 14:51:57 -08001001 loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
buzbee31a4a6f2012-02-28 15:36:15 -08001002 } else {
buzbee5de34942012-03-01 14:51:57 -08001003 // Load dex cache entry into classReg (rARG2)
buzbee31a4a6f2012-02-28 15:36:15 -08001004 loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
1005 loadWordDisp(cUnit, rARG1,
1006 Method::DexCacheResolvedTypesOffset().Int32Value(),
1007 classReg);
1008 int32_t offset_of_type =
1009 Array::DataOffset(sizeof(Class*)).Int32Value() + (sizeof(Class*)
1010 * type_idx);
1011 loadWordDisp(cUnit, classReg, offset_of_type, classReg);
1012 if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
1013 cUnit->dex_cache, type_idx)) {
1014 // Need to test presence of type in dex cache at runtime
1015 LIR* hopBranch = genCmpImmBranch(cUnit, kCondNe, classReg, 0);
1016 // Not resolved
1017 // Call out to helper, which will return resolved type in rRET0
1018 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1019 pInitializeTypeFromCode));
1020 loadConstant(cUnit, rARG0, type_idx);
1021 callRuntimeHelper(cUnit, rTgt); // InitializeTypeFromCode(idx, method)
buzbee5de34942012-03-01 14:51:57 -08001022 genRegCopy(cUnit, rARG2, rRET0); // Align usage with fast path
buzbee31a4a6f2012-02-28 15:36:15 -08001023 loadValueDirectFixed(cUnit, rlSrc, rARG0); /* reload Ref */
1024 // Rejoin code paths
1025 LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
1026 hopTarget->defMask = ENCODE_ALL;
1027 hopBranch->target = (LIR*)hopTarget;
1028 }
1029 }
1030 /* rARG0 is ref, rARG2 is class. If ref==null, use directly as bool result */
1031 LIR* branch1 = genCmpImmBranch(cUnit, kCondEq, rARG0, 0);
1032 /* load object->clazz */
1033 DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
1034 loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
1035 /* rARG0 is ref, rARG1 is ref->clazz, rARG2 is class */
1036 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1037 pInstanceofNonTrivialFromCode));
1038#if defined(TARGET_ARM)
1039 opRegReg(cUnit, kOpCmp, rARG1, rARG2); // Same?
1040 genBarrier(cUnit);
1041 genIT(cUnit, kArmCondEq, "EE"); // if-convert the test
1042 loadConstant(cUnit, rARG0, 1); // .eq case - load true
1043 genRegCopy(cUnit, rARG0, rARG2); // .ne case - arg0 <= class
1044 opReg(cUnit, kOpBlx, rTgt); // .ne case: helper(class, ref->class)
1045 genBarrier(cUnit);
1046 oatClobberCalleeSave(cUnit);
1047#else
buzbee5de34942012-03-01 14:51:57 -08001048 (void)rTgt;
buzbee31a4a6f2012-02-28 15:36:15 -08001049 // Perhaps a general-purpose kOpSelect operator?
1050 UNIMPLEMENTED(FATAL) << "Need non IT implementation";
1051#endif
1052 /* branch target here */
1053 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
1054 target->defMask = ENCODE_ALL;
1055 RegLocation rlResult = oatGetReturn(cUnit);
1056 storeValue(cUnit, rlDest, rlResult);
1057 branch1->target = (LIR*)target;
1058}
1059
1060void genCheckCast(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
1061{
1062 oatFlushAllRegs(cUnit);
1063 // May generate a call - use explicit registers
1064 oatLockCallTemps(cUnit);
1065 uint32_t type_idx = mir->dalvikInsn.vB;
1066 loadCurrMethodDirect(cUnit, rARG1); // rARG1 <= current Method*
1067 int classReg = rARG2; // rARG2 will hold the Class*
1068 if (!cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
1069 cUnit->dex_cache,
1070 *cUnit->dex_file,
1071 type_idx)) {
1072 // Check we have access to type_idx and if not throw IllegalAccessError,
1073 // returns Class* in rRET0
1074 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1075 pInitializeTypeAndVerifyAccessFromCode));
1076 loadConstant(cUnit, rARG0, type_idx);
1077 callRuntimeHelper(cUnit, rTgt); // InitializeTypeAndVerifyAccess(idx, method)
1078 genRegCopy(cUnit, classReg, rRET0); // Align usage with fast path
1079 } else {
1080 // Load dex cache entry into classReg (rARG2)
1081 loadWordDisp(cUnit, rARG1,
1082 Method::DexCacheResolvedTypesOffset().Int32Value(),
1083 classReg);
1084 int32_t offset_of_type =
1085 Array::DataOffset(sizeof(Class*)).Int32Value() +
1086 (sizeof(Class*) * type_idx);
1087 loadWordDisp(cUnit, classReg, offset_of_type, classReg);
1088 if (!cUnit->compiler->CanAssumeTypeIsPresentInDexCache(
1089 cUnit->dex_cache, type_idx)) {
1090 // Need to test presence of type in dex cache at runtime
1091 LIR* hopBranch = genCmpImmBranch(cUnit, kCondNe, classReg, 0);
1092 // Not resolved
buzbee5de34942012-03-01 14:51:57 -08001093 // Call out to helper, which will return resolved type in rARG0
1094 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pInitializeTypeFromCode));
1095 loadConstant(cUnit, rARG0, type_idx);
1096 callRuntimeHelper(cUnit, rTgt); // InitializeTypeFromCode(idx, method)
1097 genRegCopy(cUnit, classReg, rARG0); // Align usage with fast path
buzbee31a4a6f2012-02-28 15:36:15 -08001098 // Rejoin code paths
1099 LIR* hopTarget = newLIR0(cUnit, kPseudoTargetLabel);
1100 hopTarget->defMask = ENCODE_ALL;
1101 hopBranch->target = (LIR*)hopTarget;
1102 }
1103 }
buzbee5de34942012-03-01 14:51:57 -08001104 // At this point, classReg (rARG2) has class
buzbee31a4a6f2012-02-28 15:36:15 -08001105 loadValueDirectFixed(cUnit, rlSrc, rARG0); // rARG0 <= ref
1106 /* Null is OK - continue */
1107 LIR* branch1 = genCmpImmBranch(cUnit, kCondEq, rARG0, 0);
1108 /* load object->clazz */
1109 DCHECK_EQ(Object::ClassOffset().Int32Value(), 0);
1110 loadWordDisp(cUnit, rARG0, Object::ClassOffset().Int32Value(), rARG1);
1111 /* rARG1 now contains object->clazz */
1112 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1113 pCheckCastFromCode));
buzbee5de34942012-03-01 14:51:57 -08001114#if defined(TARGET_MIPS)
1115 LIR* branch2 = genCompareBranch(cUnit, kCondEq, rARG1, classReg);
1116#else
buzbee31a4a6f2012-02-28 15:36:15 -08001117 opRegReg(cUnit, kOpCmp, rARG1, classReg);
1118 LIR* branch2 = opCondBranch(cUnit, kCondEq); /* If equal, trivial yes */
buzbee5de34942012-03-01 14:51:57 -08001119#endif
buzbee31a4a6f2012-02-28 15:36:15 -08001120 genRegCopy(cUnit, rARG0, rARG1);
1121 genRegCopy(cUnit, rARG1, rARG2);
1122 callRuntimeHelper(cUnit, rTgt);
1123 /* branch target here */
1124 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
1125 target->defMask = ENCODE_ALL;
1126 branch1->target = (LIR*)target;
1127 branch2->target = (LIR*)target;
1128}
1129
1130
1131void genThrow(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
1132{
1133 oatFlushAllRegs(cUnit);
1134 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread, pDeliverException));
1135 loadValueDirectFixed(cUnit, rlSrc, rARG0); // Get exception object
1136 callRuntimeHelper(cUnit, rTgt); // art_deliver_exception(exception);
1137}
1138
1139/*
1140 * Generate array store
1141 *
1142 */
1143void genArrayObjPut(CompilationUnit* cUnit, MIR* mir, RegLocation rlArray,
1144 RegLocation rlIndex, RegLocation rlSrc, int scale)
1145{
1146 RegisterClass regClass = oatRegClassBySize(kWord);
1147 int lenOffset = Array::LengthOffset().Int32Value();
1148 int dataOffset = Array::DataOffset(sizeof(Object*)).Int32Value();
1149
1150 oatFlushAllRegs(cUnit);
1151 /* Make sure it's a legal object Put. Use direct regs at first */
1152 loadValueDirectFixed(cUnit, rlArray, rARG1);
1153 loadValueDirectFixed(cUnit, rlSrc, rARG0);
1154
1155 /* null array object? */
1156 genNullCheck(cUnit, rlArray.sRegLow, rARG1, mir);
1157 int rTgt = loadHelper(cUnit, OFFSETOF_MEMBER(Thread,
1158 pCanPutArrayElementFromCode));
1159 /* Get the array's clazz */
1160 loadWordDisp(cUnit, rARG1, Object::ClassOffset().Int32Value(), rARG1);
1161 callRuntimeHelper(cUnit, rTgt);
1162 oatFreeTemp(cUnit, rARG0);
1163 oatFreeTemp(cUnit, rARG1);
1164
1165 // Now, redo loadValues in case they didn't survive the call
1166
1167 int regPtr;
1168 rlArray = loadValue(cUnit, rlArray, kCoreReg);
1169 rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
1170
1171 if (oatIsTemp(cUnit, rlArray.lowReg)) {
1172 oatClobber(cUnit, rlArray.lowReg);
1173 regPtr = rlArray.lowReg;
1174 } else {
1175 regPtr = oatAllocTemp(cUnit);
1176 genRegCopy(cUnit, regPtr, rlArray.lowReg);
1177 }
1178
1179 if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1180 int regLen = oatAllocTemp(cUnit);
1181 //NOTE: max live temps(4) here.
1182 /* Get len */
1183 loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
1184 /* regPtr -> array data */
1185 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1186 genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
1187 kThrowArrayBounds);
1188 oatFreeTemp(cUnit, regLen);
1189 } else {
1190 /* regPtr -> array data */
1191 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1192 }
1193 /* at this point, regPtr points to array, 2 live temps */
1194 rlSrc = loadValue(cUnit, rlSrc, regClass);
1195 storeBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
1196 scale, kWord);
1197}
1198
1199/*
1200 * Generate array load
1201 */
1202void genArrayGet(CompilationUnit* cUnit, MIR* mir, OpSize size,
1203 RegLocation rlArray, RegLocation rlIndex,
1204 RegLocation rlDest, int scale)
1205{
1206 RegisterClass regClass = oatRegClassBySize(size);
1207 int lenOffset = Array::LengthOffset().Int32Value();
1208 int dataOffset;
1209 RegLocation rlResult;
1210 rlArray = loadValue(cUnit, rlArray, kCoreReg);
1211 rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
1212 int regPtr;
1213
1214 if (size == kLong || size == kDouble) {
1215 dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
1216 } else {
1217 dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
1218 }
1219
1220 /* null object? */
1221 genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
1222
1223 regPtr = oatAllocTemp(cUnit);
1224
1225 if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1226 int regLen = oatAllocTemp(cUnit);
1227 /* Get len */
1228 loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
1229 /* regPtr -> array data */
1230 opRegRegImm(cUnit, kOpAdd, regPtr, rlArray.lowReg, dataOffset);
1231 genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
1232 kThrowArrayBounds);
1233 oatFreeTemp(cUnit, regLen);
1234 } else {
1235 /* regPtr -> array data */
1236 opRegRegImm(cUnit, kOpAdd, regPtr, rlArray.lowReg, dataOffset);
1237 }
1238 oatFreeTemp(cUnit, rlArray.lowReg);
1239 if ((size == kLong) || (size == kDouble)) {
1240 if (scale) {
1241 int rNewIndex = oatAllocTemp(cUnit);
1242 opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
1243 opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
1244 oatFreeTemp(cUnit, rNewIndex);
1245 } else {
1246 opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
1247 }
1248 oatFreeTemp(cUnit, rlIndex.lowReg);
1249 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
1250
1251 loadPair(cUnit, regPtr, rlResult.lowReg, rlResult.highReg);
1252
1253 oatFreeTemp(cUnit, regPtr);
1254 storeValueWide(cUnit, rlDest, rlResult);
1255 } else {
1256 rlResult = oatEvalLoc(cUnit, rlDest, regClass, true);
1257
1258 loadBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlResult.lowReg,
1259 scale, size);
1260
1261 oatFreeTemp(cUnit, regPtr);
1262 storeValue(cUnit, rlDest, rlResult);
1263 }
1264}
1265
1266/*
1267 * Generate array store
1268 *
1269 */
1270void genArrayPut(CompilationUnit* cUnit, MIR* mir, OpSize size,
1271 RegLocation rlArray, RegLocation rlIndex,
1272 RegLocation rlSrc, int scale)
1273{
1274 RegisterClass regClass = oatRegClassBySize(size);
1275 int lenOffset = Array::LengthOffset().Int32Value();
1276 int dataOffset;
1277
1278 if (size == kLong || size == kDouble) {
1279 dataOffset = Array::DataOffset(sizeof(int64_t)).Int32Value();
1280 } else {
1281 dataOffset = Array::DataOffset(sizeof(int32_t)).Int32Value();
1282 }
1283
1284 int regPtr;
1285 rlArray = loadValue(cUnit, rlArray, kCoreReg);
1286 rlIndex = loadValue(cUnit, rlIndex, kCoreReg);
1287
1288 if (oatIsTemp(cUnit, rlArray.lowReg)) {
1289 oatClobber(cUnit, rlArray.lowReg);
1290 regPtr = rlArray.lowReg;
1291 } else {
1292 regPtr = oatAllocTemp(cUnit);
1293 genRegCopy(cUnit, regPtr, rlArray.lowReg);
1294 }
1295
1296 /* null object? */
1297 genNullCheck(cUnit, rlArray.sRegLow, rlArray.lowReg, mir);
1298
1299 if (!(mir->optimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
1300 int regLen = oatAllocTemp(cUnit);
1301 //NOTE: max live temps(4) here.
1302 /* Get len */
1303 loadWordDisp(cUnit, rlArray.lowReg, lenOffset, regLen);
1304 /* regPtr -> array data */
1305 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1306 genRegRegCheck(cUnit, kCondCs, rlIndex.lowReg, regLen, mir,
1307 kThrowArrayBounds);
1308 oatFreeTemp(cUnit, regLen);
1309 } else {
1310 /* regPtr -> array data */
1311 opRegImm(cUnit, kOpAdd, regPtr, dataOffset);
1312 }
1313 /* at this point, regPtr points to array, 2 live temps */
1314 if ((size == kLong) || (size == kDouble)) {
1315 //TUNING: specific wide routine that can handle fp regs
1316 if (scale) {
1317 int rNewIndex = oatAllocTemp(cUnit);
1318 opRegRegImm(cUnit, kOpLsl, rNewIndex, rlIndex.lowReg, scale);
1319 opRegReg(cUnit, kOpAdd, regPtr, rNewIndex);
1320 oatFreeTemp(cUnit, rNewIndex);
1321 } else {
1322 opRegReg(cUnit, kOpAdd, regPtr, rlIndex.lowReg);
1323 }
1324 rlSrc = loadValueWide(cUnit, rlSrc, regClass);
1325
1326 storePair(cUnit, regPtr, rlSrc.lowReg, rlSrc.highReg);
1327
1328 oatFreeTemp(cUnit, regPtr);
1329 } else {
1330 rlSrc = loadValue(cUnit, rlSrc, regClass);
1331
1332 storeBaseIndexed(cUnit, regPtr, rlIndex.lowReg, rlSrc.lowReg,
1333 scale, size);
1334 }
1335}
1336
1337void genLong3Addr(CompilationUnit* cUnit, MIR* mir, OpKind firstOp,
1338 OpKind secondOp, RegLocation rlDest,
1339 RegLocation rlSrc1, RegLocation rlSrc2)
1340{
1341 RegLocation rlResult;
1342#if defined(TARGET_ARM)
1343 /*
1344 * NOTE: This is the one place in the code in which we might have
1345 * as many as six live temporary registers. There are 5 in the normal
1346 * set for Arm. Until we have spill capabilities, temporarily add
1347 * lr to the temp set. It is safe to do this locally, but note that
1348 * lr is used explicitly elsewhere in the code generator and cannot
1349 * normally be used as a general temp register.
1350 */
1351 oatMarkTemp(cUnit, rLR); // Add lr to the temp pool
1352 oatFreeTemp(cUnit, rLR); // and make it available
1353#endif
1354 rlSrc1 = loadValueWide(cUnit, rlSrc1, kCoreReg);
1355 rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
1356 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1357 // The longs may overlap - use intermediate temp if so
1358 if (rlResult.lowReg == rlSrc1.highReg) {
1359 int tReg = oatAllocTemp(cUnit);
1360 genRegCopy(cUnit, tReg, rlSrc1.highReg);
1361 opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg,
1362 rlSrc2.lowReg);
1363 opRegRegReg(cUnit, secondOp, rlResult.highReg, tReg,
1364 rlSrc2.highReg);
1365 oatFreeTemp(cUnit, tReg);
1366 } else {
1367 opRegRegReg(cUnit, firstOp, rlResult.lowReg, rlSrc1.lowReg,
1368 rlSrc2.lowReg);
1369 opRegRegReg(cUnit, secondOp, rlResult.highReg, rlSrc1.highReg,
1370 rlSrc2.highReg);
1371 }
1372 /*
1373 * NOTE: If rlDest refers to a frame variable in a large frame, the
1374 * following storeValueWide might need to allocate a temp register.
1375 * To further work around the lack of a spill capability, explicitly
1376 * free any temps from rlSrc1 & rlSrc2 that aren't still live in rlResult.
1377 * Remove when spill is functional.
1378 */
1379 freeRegLocTemps(cUnit, rlResult, rlSrc1);
1380 freeRegLocTemps(cUnit, rlResult, rlSrc2);
1381 storeValueWide(cUnit, rlDest, rlResult);
1382#if defined(TARGET_ARM)
1383 oatClobber(cUnit, rLR);
1384 oatUnmarkTemp(cUnit, rLR); // Remove lr from the temp pool
1385#endif
1386}
1387
1388
1389bool genShiftOpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1390 RegLocation rlSrc1, RegLocation rlShift)
1391{
1392 int funcOffset;
1393
1394 switch( mir->dalvikInsn.opcode) {
1395 case OP_SHL_LONG:
1396 case OP_SHL_LONG_2ADDR:
1397 funcOffset = OFFSETOF_MEMBER(Thread, pShlLong);
1398 break;
1399 case OP_SHR_LONG:
1400 case OP_SHR_LONG_2ADDR:
1401 funcOffset = OFFSETOF_MEMBER(Thread, pShrLong);
1402 break;
1403 case OP_USHR_LONG:
1404 case OP_USHR_LONG_2ADDR:
1405 funcOffset = OFFSETOF_MEMBER(Thread, pUshrLong);
1406 break;
1407 default:
1408 LOG(FATAL) << "Unexpected case";
1409 return true;
1410 }
1411 oatFlushAllRegs(cUnit); /* Send everything to home location */
1412 int rTgt = loadHelper(cUnit, funcOffset);
1413 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
1414 loadValueDirect(cUnit, rlShift, rARG2);
1415 callRuntimeHelper(cUnit, rTgt);
1416 RegLocation rlResult = oatGetReturnWide(cUnit);
1417 storeValueWide(cUnit, rlDest, rlResult);
1418 return false;
1419}
1420
1421
1422bool genArithOpInt(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1423 RegLocation rlSrc1, RegLocation rlSrc2)
1424{
1425 OpKind op = kOpBkpt;
1426 bool callOut = false;
1427 bool checkZero = false;
1428 bool unary = false;
1429 int retReg = rRET0;
1430 int funcOffset;
1431 RegLocation rlResult;
1432 bool shiftOp = false;
1433
1434 switch (mir->dalvikInsn.opcode) {
1435 case OP_NEG_INT:
1436 op = kOpNeg;
1437 unary = true;
1438 break;
1439 case OP_NOT_INT:
1440 op = kOpMvn;
1441 unary = true;
1442 break;
1443 case OP_ADD_INT:
1444 case OP_ADD_INT_2ADDR:
1445 op = kOpAdd;
1446 break;
1447 case OP_SUB_INT:
1448 case OP_SUB_INT_2ADDR:
1449 op = kOpSub;
1450 break;
1451 case OP_MUL_INT:
1452 case OP_MUL_INT_2ADDR:
1453 op = kOpMul;
1454 break;
1455 case OP_DIV_INT:
1456 case OP_DIV_INT_2ADDR:
1457 callOut = true;
1458 checkZero = true;
1459 funcOffset = OFFSETOF_MEMBER(Thread, pIdiv);
1460 retReg = rRET0;
1461 break;
buzbee5de34942012-03-01 14:51:57 -08001462 /* NOTE: returns in rARG1 */
buzbee31a4a6f2012-02-28 15:36:15 -08001463 case OP_REM_INT:
1464 case OP_REM_INT_2ADDR:
1465 callOut = true;
1466 checkZero = true;
1467 funcOffset = OFFSETOF_MEMBER(Thread, pIdivmod);
1468 retReg = rRET1;
1469 break;
1470 case OP_AND_INT:
1471 case OP_AND_INT_2ADDR:
1472 op = kOpAnd;
1473 break;
1474 case OP_OR_INT:
1475 case OP_OR_INT_2ADDR:
1476 op = kOpOr;
1477 break;
1478 case OP_XOR_INT:
1479 case OP_XOR_INT_2ADDR:
1480 op = kOpXor;
1481 break;
1482 case OP_SHL_INT:
1483 case OP_SHL_INT_2ADDR:
1484 shiftOp = true;
1485 op = kOpLsl;
1486 break;
1487 case OP_SHR_INT:
1488 case OP_SHR_INT_2ADDR:
1489 shiftOp = true;
1490 op = kOpAsr;
1491 break;
1492 case OP_USHR_INT:
1493 case OP_USHR_INT_2ADDR:
1494 shiftOp = true;
1495 op = kOpLsr;
1496 break;
1497 default:
1498 LOG(FATAL) << "Invalid word arith op: " <<
1499 (int)mir->dalvikInsn.opcode;
1500 }
1501 if (!callOut) {
1502 rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
1503 if (unary) {
1504 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1505 opRegReg(cUnit, op, rlResult.lowReg,
1506 rlSrc1.lowReg);
1507 } else {
1508 rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
1509 if (shiftOp) {
1510 int tReg = oatAllocTemp(cUnit);
1511 opRegRegImm(cUnit, kOpAnd, tReg, rlSrc2.lowReg, 31);
1512 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1513 opRegRegReg(cUnit, op, rlResult.lowReg,
1514 rlSrc1.lowReg, tReg);
1515 oatFreeTemp(cUnit, tReg);
1516 } else {
1517 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1518 opRegRegReg(cUnit, op, rlResult.lowReg,
1519 rlSrc1.lowReg, rlSrc2.lowReg);
1520 }
1521 }
1522 storeValue(cUnit, rlDest, rlResult);
1523 } else {
1524 RegLocation rlResult;
1525 oatFlushAllRegs(cUnit); /* Send everything to home location */
1526 loadValueDirectFixed(cUnit, rlSrc2, rRET1);
1527 int rTgt = loadHelper(cUnit, funcOffset);
1528 loadValueDirectFixed(cUnit, rlSrc1, rARG0);
1529 if (checkZero) {
1530 genImmedCheck(cUnit, kCondEq, rARG1, 0, mir, kThrowDivZero);
1531 }
1532 callRuntimeHelper(cUnit, rTgt);
1533 if (retReg == rRET0)
1534 rlResult = oatGetReturn(cUnit);
1535 else
1536 rlResult = oatGetReturnAlt(cUnit);
1537 storeValue(cUnit, rlDest, rlResult);
1538 }
1539 return false;
1540}
1541
1542/*
1543 * The following are the first-level codegen routines that analyze the format
1544 * of each bytecode then either dispatch special purpose codegen routines
1545 * or produce corresponding Thumb instructions directly.
1546 */
1547
1548bool isPowerOfTwo(int x)
1549{
1550 return (x & (x - 1)) == 0;
1551}
1552
1553// Returns true if no more than two bits are set in 'x'.
1554bool isPopCountLE2(unsigned int x)
1555{
1556 x &= x - 1;
1557 return (x & (x - 1)) == 0;
1558}
1559
1560// Returns the index of the lowest set bit in 'x'.
1561int lowestSetBit(unsigned int x) {
1562 int bit_posn = 0;
1563 while ((x & 0xf) == 0) {
1564 bit_posn += 4;
1565 x >>= 4;
1566 }
1567 while ((x & 1) == 0) {
1568 bit_posn++;
1569 x >>= 1;
1570 }
1571 return bit_posn;
1572}
1573
1574// Returns true if it added instructions to 'cUnit' to divide 'rlSrc' by 'lit'
1575// and store the result in 'rlDest'.
1576bool handleEasyDivide(CompilationUnit* cUnit, Opcode dalvikOpcode,
1577 RegLocation rlSrc, RegLocation rlDest, int lit)
1578{
1579 if (lit < 2 || !isPowerOfTwo(lit)) {
1580 return false;
1581 }
1582 int k = lowestSetBit(lit);
1583 if (k >= 30) {
1584 // Avoid special cases.
1585 return false;
1586 }
1587 bool div = (dalvikOpcode == OP_DIV_INT_LIT8 ||
1588 dalvikOpcode == OP_DIV_INT_LIT16);
1589 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1590 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1591 if (div) {
1592 int tReg = oatAllocTemp(cUnit);
1593 if (lit == 2) {
1594 // Division by 2 is by far the most common division by constant.
1595 opRegRegImm(cUnit, kOpLsr, tReg, rlSrc.lowReg, 32 - k);
1596 opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
1597 opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
1598 } else {
1599 opRegRegImm(cUnit, kOpAsr, tReg, rlSrc.lowReg, 31);
1600 opRegRegImm(cUnit, kOpLsr, tReg, tReg, 32 - k);
1601 opRegRegReg(cUnit, kOpAdd, tReg, tReg, rlSrc.lowReg);
1602 opRegRegImm(cUnit, kOpAsr, rlResult.lowReg, tReg, k);
1603 }
1604 } else {
1605 int cReg = oatAllocTemp(cUnit);
1606 loadConstant(cUnit, cReg, lit - 1);
1607 int tReg1 = oatAllocTemp(cUnit);
1608 int tReg2 = oatAllocTemp(cUnit);
1609 if (lit == 2) {
1610 opRegRegImm(cUnit, kOpLsr, tReg1, rlSrc.lowReg, 32 - k);
1611 opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
1612 opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
1613 opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
1614 } else {
1615 opRegRegImm(cUnit, kOpAsr, tReg1, rlSrc.lowReg, 31);
1616 opRegRegImm(cUnit, kOpLsr, tReg1, tReg1, 32 - k);
1617 opRegRegReg(cUnit, kOpAdd, tReg2, tReg1, rlSrc.lowReg);
1618 opRegRegReg(cUnit, kOpAnd, tReg2, tReg2, cReg);
1619 opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg2, tReg1);
1620 }
1621 }
1622 storeValue(cUnit, rlDest, rlResult);
1623 return true;
1624}
1625
1626void genMultiplyByTwoBitMultiplier(CompilationUnit* cUnit, RegLocation rlSrc,
1627 RegLocation rlResult, int lit,
1628 int firstBit, int secondBit)
1629{
buzbee5de34942012-03-01 14:51:57 -08001630#if defined(TARGET_MIPS)
1631 UNIMPLEMENTED(FATAL) << "Need shift & add primative";
1632#else
buzbee31a4a6f2012-02-28 15:36:15 -08001633 opRegRegRegShift(cUnit, kOpAdd, rlResult.lowReg, rlSrc.lowReg, rlSrc.lowReg,
1634 encodeShift(kArmLsl, secondBit - firstBit));
buzbee5de34942012-03-01 14:51:57 -08001635#endif
buzbee31a4a6f2012-02-28 15:36:15 -08001636 if (firstBit != 0) {
1637 opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlResult.lowReg, firstBit);
1638 }
1639}
1640
1641// Returns true if it added instructions to 'cUnit' to multiply 'rlSrc' by 'lit'
1642// and store the result in 'rlDest'.
1643bool handleEasyMultiply(CompilationUnit* cUnit, RegLocation rlSrc,
1644 RegLocation rlDest, int lit)
1645{
1646 // Can we simplify this multiplication?
1647 bool powerOfTwo = false;
1648 bool popCountLE2 = false;
1649 bool powerOfTwoMinusOne = false;
1650 if (lit < 2) {
1651 // Avoid special cases.
1652 return false;
1653 } else if (isPowerOfTwo(lit)) {
1654 powerOfTwo = true;
1655 } else if (isPopCountLE2(lit)) {
1656 popCountLE2 = true;
1657 } else if (isPowerOfTwo(lit + 1)) {
1658 powerOfTwoMinusOne = true;
1659 } else {
1660 return false;
1661 }
1662 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1663 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1664 if (powerOfTwo) {
1665 // Shift.
1666 opRegRegImm(cUnit, kOpLsl, rlResult.lowReg, rlSrc.lowReg,
1667 lowestSetBit(lit));
1668 } else if (popCountLE2) {
1669 // Shift and add and shift.
1670 int firstBit = lowestSetBit(lit);
1671 int secondBit = lowestSetBit(lit ^ (1 << firstBit));
1672 genMultiplyByTwoBitMultiplier(cUnit, rlSrc, rlResult, lit,
1673 firstBit, secondBit);
1674 } else {
1675 // Reverse subtract: (src << (shift + 1)) - src.
1676 DCHECK(powerOfTwoMinusOne);
1677 // TUNING: rsb dst, src, src lsl#lowestSetBit(lit + 1)
1678 int tReg = oatAllocTemp(cUnit);
1679 opRegRegImm(cUnit, kOpLsl, tReg, rlSrc.lowReg, lowestSetBit(lit + 1));
1680 opRegRegReg(cUnit, kOpSub, rlResult.lowReg, tReg, rlSrc.lowReg);
1681 }
1682 storeValue(cUnit, rlDest, rlResult);
1683 return true;
1684}
1685
1686bool genArithOpIntLit(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1687 RegLocation rlSrc, int lit)
1688{
1689 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1690 RegLocation rlResult;
1691 OpKind op = (OpKind)0; /* Make gcc happy */
1692 int shiftOp = false;
1693 bool isDiv = false;
1694 int funcOffset;
1695 int rTgt;
1696
1697 switch (dalvikOpcode) {
1698 case OP_RSUB_INT_LIT8:
1699 case OP_RSUB_INT: {
1700 int tReg;
1701 //TUNING: add support for use of Arm rsub op
1702 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1703 tReg = oatAllocTemp(cUnit);
1704 loadConstant(cUnit, tReg, lit);
1705 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1706 opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
1707 tReg, rlSrc.lowReg);
1708 storeValue(cUnit, rlDest, rlResult);
1709 return false;
1710 break;
1711 }
1712
1713 case OP_ADD_INT_LIT8:
1714 case OP_ADD_INT_LIT16:
1715 op = kOpAdd;
1716 break;
1717 case OP_MUL_INT_LIT8:
1718 case OP_MUL_INT_LIT16: {
1719 if (handleEasyMultiply(cUnit, rlSrc, rlDest, lit)) {
1720 return false;
1721 }
1722 op = kOpMul;
1723 break;
1724 }
1725 case OP_AND_INT_LIT8:
1726 case OP_AND_INT_LIT16:
1727 op = kOpAnd;
1728 break;
1729 case OP_OR_INT_LIT8:
1730 case OP_OR_INT_LIT16:
1731 op = kOpOr;
1732 break;
1733 case OP_XOR_INT_LIT8:
1734 case OP_XOR_INT_LIT16:
1735 op = kOpXor;
1736 break;
1737 case OP_SHL_INT_LIT8:
1738 lit &= 31;
1739 shiftOp = true;
1740 op = kOpLsl;
1741 break;
1742 case OP_SHR_INT_LIT8:
1743 lit &= 31;
1744 shiftOp = true;
1745 op = kOpAsr;
1746 break;
1747 case OP_USHR_INT_LIT8:
1748 lit &= 31;
1749 shiftOp = true;
1750 op = kOpLsr;
1751 break;
1752
1753 case OP_DIV_INT_LIT8:
1754 case OP_DIV_INT_LIT16:
1755 case OP_REM_INT_LIT8:
1756 case OP_REM_INT_LIT16:
1757 if (lit == 0) {
1758 genImmedCheck(cUnit, kCondAl, 0, 0, mir, kThrowDivZero);
1759 return false;
1760 }
1761 if (handleEasyDivide(cUnit, dalvikOpcode, rlSrc, rlDest, lit)) {
1762 return false;
1763 }
1764 oatFlushAllRegs(cUnit); /* Everything to home location */
1765 loadValueDirectFixed(cUnit, rlSrc, rARG0);
1766 oatClobber(cUnit, rARG0);
1767 if ((dalvikOpcode == OP_DIV_INT_LIT8) ||
1768 (dalvikOpcode == OP_DIV_INT_LIT16)) {
1769 funcOffset = OFFSETOF_MEMBER(Thread, pIdiv);
1770 isDiv = true;
1771 } else {
1772 funcOffset = OFFSETOF_MEMBER(Thread, pIdivmod);
1773 isDiv = false;
1774 }
1775 rTgt = loadHelper(cUnit, funcOffset);
1776 loadConstant(cUnit, rARG1, lit);
1777 callRuntimeHelper(cUnit, rTgt);
1778 if (isDiv)
1779 rlResult = oatGetReturn(cUnit);
1780 else
1781 rlResult = oatGetReturnAlt(cUnit);
1782 storeValue(cUnit, rlDest, rlResult);
1783 return false;
1784 break;
1785 default:
1786 return true;
1787 }
1788 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1789 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1790 // Avoid shifts by literal 0 - no support in Thumb. Change to copy
1791 if (shiftOp && (lit == 0)) {
1792 genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
1793 } else {
1794 opRegRegImm(cUnit, op, rlResult.lowReg, rlSrc.lowReg, lit);
1795 }
1796 storeValue(cUnit, rlDest, rlResult);
1797 return false;
1798}
1799
1800bool genArithOpLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1801 RegLocation rlSrc1, RegLocation rlSrc2)
1802{
1803 RegLocation rlResult;
1804 OpKind firstOp = kOpBkpt;
1805 OpKind secondOp = kOpBkpt;
1806 bool callOut = false;
1807 bool checkZero = false;
1808 int funcOffset;
1809 int retReg = rRET0;
1810
1811 switch (mir->dalvikInsn.opcode) {
1812 case OP_NOT_LONG:
1813 rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
1814 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1815 // Check for destructive overlap
1816 if (rlResult.lowReg == rlSrc2.highReg) {
1817 int tReg = oatAllocTemp(cUnit);
1818 genRegCopy(cUnit, tReg, rlSrc2.highReg);
1819 opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
1820 opRegReg(cUnit, kOpMvn, rlResult.highReg, tReg);
1821 oatFreeTemp(cUnit, tReg);
1822 } else {
1823 opRegReg(cUnit, kOpMvn, rlResult.lowReg, rlSrc2.lowReg);
1824 opRegReg(cUnit, kOpMvn, rlResult.highReg, rlSrc2.highReg);
1825 }
1826 storeValueWide(cUnit, rlDest, rlResult);
1827 return false;
1828 break;
1829 case OP_ADD_LONG:
1830 case OP_ADD_LONG_2ADDR:
1831 firstOp = kOpAdd;
1832 secondOp = kOpAdc;
1833 break;
1834 case OP_SUB_LONG:
1835 case OP_SUB_LONG_2ADDR:
1836 firstOp = kOpSub;
1837 secondOp = kOpSbc;
1838 break;
1839 case OP_MUL_LONG:
1840 case OP_MUL_LONG_2ADDR:
1841 callOut = true;
1842 retReg = rRET0;
1843 funcOffset = OFFSETOF_MEMBER(Thread, pLmul);
1844 break;
1845 case OP_DIV_LONG:
1846 case OP_DIV_LONG_2ADDR:
1847 callOut = true;
1848 checkZero = true;
1849 retReg = rRET0;
1850 funcOffset = OFFSETOF_MEMBER(Thread, pLdivmod);
1851 break;
1852 /* NOTE - result is in rARG2/rARG3 instead of rRET0/rRET1 */
1853 // FIXME: is true, or could be made true, or other targets?
1854 case OP_REM_LONG:
1855 case OP_REM_LONG_2ADDR:
1856 callOut = true;
1857 checkZero = true;
1858 funcOffset = OFFSETOF_MEMBER(Thread, pLdivmod);
1859 retReg = rARG2;
1860 break;
1861 case OP_AND_LONG_2ADDR:
1862 case OP_AND_LONG:
1863 firstOp = kOpAnd;
1864 secondOp = kOpAnd;
1865 break;
1866 case OP_OR_LONG:
1867 case OP_OR_LONG_2ADDR:
1868 firstOp = kOpOr;
1869 secondOp = kOpOr;
1870 break;
1871 case OP_XOR_LONG:
1872 case OP_XOR_LONG_2ADDR:
1873 firstOp = kOpXor;
1874 secondOp = kOpXor;
1875 break;
1876 case OP_NEG_LONG: {
1877 rlSrc2 = loadValueWide(cUnit, rlSrc2, kCoreReg);
1878 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1879 int zReg = oatAllocTemp(cUnit);
1880 loadConstantNoClobber(cUnit, zReg, 0);
1881 // Check for destructive overlap
1882 if (rlResult.lowReg == rlSrc2.highReg) {
1883 int tReg = oatAllocTemp(cUnit);
1884 opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
1885 zReg, rlSrc2.lowReg);
1886 opRegRegReg(cUnit, kOpSbc, rlResult.highReg,
1887 zReg, tReg);
1888 oatFreeTemp(cUnit, tReg);
1889 } else {
1890 opRegRegReg(cUnit, kOpSub, rlResult.lowReg,
1891 zReg, rlSrc2.lowReg);
1892 opRegRegReg(cUnit, kOpSbc, rlResult.highReg,
1893 zReg, rlSrc2.highReg);
1894 }
1895 oatFreeTemp(cUnit, zReg);
1896 storeValueWide(cUnit, rlDest, rlResult);
1897 return false;
1898 }
1899 default:
1900 LOG(FATAL) << "Invalid long arith op";
1901 }
1902 if (!callOut) {
1903 genLong3Addr(cUnit, mir, firstOp, secondOp, rlDest, rlSrc1, rlSrc2);
1904 } else {
1905 int rTgt;
1906 oatFlushAllRegs(cUnit); /* Send everything to home location */
1907 if (checkZero) {
1908 loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
1909 rTgt = loadHelper(cUnit, funcOffset);
1910 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
1911 int tReg = oatAllocTemp(cUnit);
1912#if defined(TARGET_ARM)
1913 newLIR4(cUnit, kThumb2OrrRRRs, tReg, rARG2, rARG3, 0);
1914 oatFreeTemp(cUnit, tReg);
1915 genCheck(cUnit, kCondEq, mir, kThrowDivZero);
1916#else
1917 opRegRegReg(cUnit, kOpOr, tReg, rARG2, rARG3);
buzbee5de34942012-03-01 14:51:57 -08001918 genImmedCheck(cUnit, kCondEq, tReg, 0, mir, kThrowDivZero);
buzbee31a4a6f2012-02-28 15:36:15 -08001919 oatFreeTemp(cUnit, tReg);
1920#endif
1921 } else {
1922 rTgt = loadHelper(cUnit, funcOffset);
1923 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
1924 loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
1925 }
1926 callRuntimeHelper(cUnit, rTgt);
1927 // Adjust return regs in to handle case of rem returning rARG2/rARG3
1928 if (retReg == rRET0)
1929 rlResult = oatGetReturnWide(cUnit);
1930 else
1931 rlResult = oatGetReturnWideAlt(cUnit);
1932 storeValueWide(cUnit, rlDest, rlResult);
1933 }
1934 return false;
1935}
1936
1937bool genConversionCall(CompilationUnit* cUnit, MIR* mir, int funcOffset,
1938 int srcSize, int tgtSize)
1939{
1940 /*
1941 * Don't optimize the register usage since it calls out to support
1942 * functions
1943 */
1944 RegLocation rlSrc;
1945 RegLocation rlDest;
1946 oatFlushAllRegs(cUnit); /* Send everything to home location */
1947 int rTgt = loadHelper(cUnit, funcOffset);
1948 if (srcSize == 1) {
1949 rlSrc = oatGetSrc(cUnit, mir, 0);
1950 loadValueDirectFixed(cUnit, rlSrc, rARG0);
1951 } else {
1952 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
1953 loadValueDirectWideFixed(cUnit, rlSrc, rARG0, rARG1);
1954 }
1955 callRuntimeHelper(cUnit, rTgt);
1956 if (tgtSize == 1) {
1957 RegLocation rlResult;
1958 rlDest = oatGetDest(cUnit, mir, 0);
1959 rlResult = oatGetReturn(cUnit);
1960 storeValue(cUnit, rlDest, rlResult);
1961 } else {
1962 RegLocation rlResult;
1963 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
1964 rlResult = oatGetReturnWide(cUnit);
1965 storeValueWide(cUnit, rlDest, rlResult);
1966 }
1967 return false;
1968}
1969
1970void genNegFloat(CompilationUnit* cUnit, RegLocation rlDest, RegLocation rlSrc);
1971bool genArithOpFloatPortable(CompilationUnit* cUnit, MIR* mir,
1972 RegLocation rlDest, RegLocation rlSrc1,
1973 RegLocation rlSrc2)
1974{
1975 RegLocation rlResult;
1976 int funcOffset;
1977
1978 switch (mir->dalvikInsn.opcode) {
1979 case OP_ADD_FLOAT_2ADDR:
1980 case OP_ADD_FLOAT:
1981 funcOffset = OFFSETOF_MEMBER(Thread, pFadd);
1982 break;
1983 case OP_SUB_FLOAT_2ADDR:
1984 case OP_SUB_FLOAT:
1985 funcOffset = OFFSETOF_MEMBER(Thread, pFsub);
1986 break;
1987 case OP_DIV_FLOAT_2ADDR:
1988 case OP_DIV_FLOAT:
1989 funcOffset = OFFSETOF_MEMBER(Thread, pFdiv);
1990 break;
1991 case OP_MUL_FLOAT_2ADDR:
1992 case OP_MUL_FLOAT:
1993 funcOffset = OFFSETOF_MEMBER(Thread, pFmul);
1994 break;
1995 case OP_REM_FLOAT_2ADDR:
1996 case OP_REM_FLOAT:
1997 funcOffset = OFFSETOF_MEMBER(Thread, pFmodf);
1998 break;
1999 case OP_NEG_FLOAT: {
2000 genNegFloat(cUnit, rlDest, rlSrc1);
2001 return false;
2002 }
2003 default:
2004 return true;
2005 }
2006 oatFlushAllRegs(cUnit); /* Send everything to home location */
2007 int rTgt = loadHelper(cUnit, funcOffset);
2008 loadValueDirectFixed(cUnit, rlSrc1, rARG0);
2009 loadValueDirectFixed(cUnit, rlSrc2, rARG1);
2010 callRuntimeHelper(cUnit, rTgt);
2011 rlResult = oatGetReturn(cUnit);
2012 storeValue(cUnit, rlDest, rlResult);
2013 return false;
2014}
2015
2016void genNegDouble(CompilationUnit* cUnit, RegLocation rlDst, RegLocation rlSrc);
2017bool genArithOpDoublePortable(CompilationUnit* cUnit, MIR* mir,
2018 RegLocation rlDest, RegLocation rlSrc1,
2019 RegLocation rlSrc2)
2020{
2021 RegLocation rlResult;
2022 int funcOffset;
2023
2024 switch (mir->dalvikInsn.opcode) {
2025 case OP_ADD_DOUBLE_2ADDR:
2026 case OP_ADD_DOUBLE:
2027 funcOffset = OFFSETOF_MEMBER(Thread, pDadd);
2028 break;
2029 case OP_SUB_DOUBLE_2ADDR:
2030 case OP_SUB_DOUBLE:
2031 funcOffset = OFFSETOF_MEMBER(Thread, pDsub);
2032 break;
2033 case OP_DIV_DOUBLE_2ADDR:
2034 case OP_DIV_DOUBLE:
2035 funcOffset = OFFSETOF_MEMBER(Thread, pDdiv);
2036 break;
2037 case OP_MUL_DOUBLE_2ADDR:
2038 case OP_MUL_DOUBLE:
2039 funcOffset = OFFSETOF_MEMBER(Thread, pDmul);
2040 break;
2041 case OP_REM_DOUBLE_2ADDR:
2042 case OP_REM_DOUBLE:
2043 funcOffset = OFFSETOF_MEMBER(Thread, pFmod);
2044 break;
2045 case OP_NEG_DOUBLE: {
2046 genNegDouble(cUnit, rlDest, rlSrc1);
2047 return false;
2048 }
2049 default:
2050 return true;
2051 }
2052 oatFlushAllRegs(cUnit); /* Send everything to home location */
2053 int rTgt = loadHelper(cUnit, funcOffset);
2054 loadValueDirectWideFixed(cUnit, rlSrc1, rARG0, rARG1);
2055 loadValueDirectWideFixed(cUnit, rlSrc2, rARG2, rARG3);
2056 callRuntimeHelper(cUnit, rTgt);
2057 rlResult = oatGetReturnWide(cUnit);
2058 storeValueWide(cUnit, rlDest, rlResult);
2059 return false;
2060}
2061
2062bool genConversionPortable(CompilationUnit* cUnit, MIR* mir)
2063{
2064 Opcode opcode = mir->dalvikInsn.opcode;
2065
2066 switch (opcode) {
2067 case OP_INT_TO_FLOAT:
2068 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pI2f),
2069 1, 1);
2070 case OP_FLOAT_TO_INT:
2071 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pF2iz),
2072 1, 1);
2073 case OP_DOUBLE_TO_FLOAT:
2074 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pD2f),
2075 2, 1);
2076 case OP_FLOAT_TO_DOUBLE:
2077 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pF2d),
2078 1, 2);
2079 case OP_INT_TO_DOUBLE:
2080 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pI2d),
2081 1, 2);
2082 case OP_DOUBLE_TO_INT:
2083 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pD2iz),
2084 2, 1);
2085 case OP_FLOAT_TO_LONG:
2086 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread,
2087 pF2l), 1, 2);
2088 case OP_LONG_TO_FLOAT:
2089 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pL2f),
2090 2, 1);
2091 case OP_DOUBLE_TO_LONG:
2092 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread,
2093 pD2l), 2, 2);
2094 case OP_LONG_TO_DOUBLE:
2095 return genConversionCall(cUnit, mir, OFFSETOF_MEMBER(Thread, pL2d),
2096 2, 2);
2097 default:
2098 return true;
2099 }
2100 return false;
2101}
2102
2103/*
2104 * Generate callout to updateDebugger. Note that we're overloading
2105 * the use of rSUSPEND here. When the debugger is active, this
2106 * register holds the address of the update function. So, if it's
2107 * non-null, we call out to it.
2108 *
2109 * Note also that rRET0 and rRET1 must be preserved across this
2110 * code. This must be handled by the stub.
2111 */
2112void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset)
2113{
2114 // Following DCHECK verifies that dPC is in range of single load immediate
2115 DCHECK((offset == DEBUGGER_METHOD_ENTRY) ||
2116 (offset == DEBUGGER_METHOD_EXIT) || ((offset & 0xffff) == offset));
2117 oatClobberCalleeSave(cUnit);
2118#if defined(TARGET_ARM)
2119 opRegImm(cUnit, kOpCmp, rSUSPEND, 0);
2120 genIT(cUnit, kArmCondNe, "T");
2121 loadConstant(cUnit, rARG2, offset); // arg2 <- Entry code
2122 opReg(cUnit, kOpBlx, rSUSPEND);
2123#else
2124 LIR* branch = genCmpImmBranch(cUnit, kCondEq, rSUSPEND, 0);
2125 loadConstant(cUnit, rARG2, offset);
2126 opReg(cUnit, kOpBlx, rSUSPEND);
2127 LIR* target = newLIR0(cUnit, kPseudoTargetLabel);
2128 target->defMask = ENCODE_ALL;
2129 branch->target = (LIR*)target;
2130#endif
2131 oatFreeTemp(cUnit, rARG2);
2132}
2133
2134/* Check if we need to check for pending suspend request */
2135void genSuspendTest(CompilationUnit* cUnit, MIR* mir)
2136{
2137 if (NO_SUSPEND || (mir->optimizationFlags & MIR_IGNORE_SUSPEND_CHECK)) {
2138 return;
2139 }
2140 oatFlushAllRegs(cUnit);
2141 LIR* branch;
2142 if (cUnit->genDebugger) {
2143 // If generating code for the debugger, always check for suspension
2144 branch = genUnconditionalBranch(cUnit, NULL);
2145 } else {
2146#if defined(TARGET_ARM)
2147 // In non-debug case, only check periodically
2148 newLIR2(cUnit, kThumbSubRI8, rSUSPEND, 1);
2149 branch = opCondBranch(cUnit, kCondEq);
2150#else
2151 opRegImm(cUnit, kOpSub, rSUSPEND, 1);
buzbee5de34942012-03-01 14:51:57 -08002152 branch = genCmpImmBranch(cUnit, kCondEq, rSUSPEND, 0);
buzbee31a4a6f2012-02-28 15:36:15 -08002153#endif
2154 }
2155 LIR* retLab = newLIR0(cUnit, kPseudoTargetLabel);
2156 retLab->defMask = ENCODE_ALL;
2157 LIR* target = (LIR*)oatNew(cUnit, sizeof(LIR), true, kAllocLIR);
2158 target->dalvikOffset = cUnit->currentDalvikOffset;
2159 target->opcode = kPseudoSuspendTarget;
2160 target->operands[0] = (intptr_t)retLab;
2161 target->operands[1] = mir->offset;
2162 branch->target = (LIR*)target;
2163 oatInsertGrowableList(cUnit, &cUnit->suspendLaunchpads, (intptr_t)target);
2164}
2165
2166} // namespace art