blob: 6a3e6675de16cea137ba6277e3d163ca4263515f [file] [log] [blame]
buzbeee3acd072012-02-25 17:03:10 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This file contains mips-specific codegen factory support.
19 * It is included by
20 *
21 * Codegen-$(TARGET_ARCH_VARIANT).c
22 *
23 */
24
25#define SLOW_FIELD_PATH (cUnit->enableDebug & (1 << kDebugSlowFieldPath))
26#define SLOW_INVOKE_PATH (cUnit->enableDebug & (1 << kDebugSlowInvokePath))
27#define SLOW_STRING_PATH (cUnit->enableDebug & (1 << kDebugSlowStringPath))
28#define SLOW_TYPE_PATH (cUnit->enableDebug & (1 << kDebugSlowTypePath))
29#define EXERCISE_SLOWEST_FIELD_PATH (cUnit->enableDebug & \
30 (1 << kDebugSlowestFieldPath))
31#define EXERCISE_SLOWEST_STRING_PATH (cUnit->enableDebug & \
32 (1 << kDebugSlowestStringPath))
33#define EXERCISE_RESOLVE_METHOD (cUnit->enableDebug & \
34 (1 << kDebugExerciseResolveMethod))
35
36// FIXME - this is the Mips version, change to MIPS
37
38namespace art {
39
40STATIC void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset);
41
42/* Generate conditional branch instructions */
43STATIC MipsLIR* genConditionalBranch(CompilationUnit* cUnit,
44 MipsConditionCode cond,
45 MipsLIR* target)
46{
47 UNIMPLEMENTED(FATAL) << "Needs mips version";
48 return NULL;
49#if 0
50 MipsLIR* branch = opCondBranch(cUnit, cond);
51 branch->generic.target = (LIR*) target;
52 return branch;
53#endif
54}
55
56/* Generate unconditional branch instructions */
57STATIC MipsLIR* genUnconditionalBranch(CompilationUnit* cUnit, MipsLIR* target)
58{
59 MipsLIR* branch = opNone(cUnit, kOpUncondBr);
60 branch->generic.target = (LIR*) target;
61 return branch;
62}
63
64STATIC MipsLIR* callRuntimeHelper(CompilationUnit* cUnit, int reg)
65{
66 oatClobberCalleeSave(cUnit);
67 return opReg(cUnit, kOpBlx, reg);
68}
69
70/*
71 * Mark garbage collection card. Skip if the value we're storing is null.
72 */
73STATIC void markGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
74{
75 UNIMPLEMENTED(FATAL) << "Needs mips version";
76#if 0
77 int regCardBase = oatAllocTemp(cUnit);
78 int regCardNo = oatAllocTemp(cUnit);
79 MipsLIR* branchOver = genCmpImmBranch(cUnit, kMipsCondEq, valReg, 0);
80 loadWordDisp(cUnit, rSELF, Thread::CardTableOffset().Int32Value(),
81 regCardBase);
82 opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
83 storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
84 kUnsignedByte);
85 MipsLIR* target = newLIR0(cUnit, kMipsPseudoTargetLabel);
86 target->defMask = ENCODE_ALL;
87 branchOver->generic.target = (LIR*)target;
88 oatFreeTemp(cUnit, regCardBase);
89 oatFreeTemp(cUnit, regCardNo);
90#endif
91}
92
93/*
94 * Utiltiy to load the current Method*. Broken out
95 * to allow easy change between placing the current Method* in a
96 * dedicated register or its home location in the frame.
97 */
98STATIC void loadCurrMethodDirect(CompilationUnit *cUnit, int rTgt)
99{
100 UNIMPLEMENTED(FATAL) << "Needs mips version";
101#if 0
102#if defined(METHOD_IN_REG)
103 genRegCopy(cUnit, rTgt, rMETHOD);
104#else
105 loadWordDisp(cUnit, rSP, 0, rTgt);
106#endif
107#endif
108}
109
110STATIC int loadCurrMethod(CompilationUnit *cUnit)
111{
112#if defined(METHOD_IN_REG)
113 return rMETHOD;
114#else
115 int mReg = oatAllocTemp(cUnit);
116 loadCurrMethodDirect(cUnit, mReg);
117 return mReg;
118#endif
119}
120
121STATIC MipsLIR* genCheck(CompilationUnit* cUnit, MipsConditionCode cCode,
122 MIR* mir, MipsThrowKind kind)
123{
124 UNIMPLEMENTED(FATAL) << "Needs mips version";
125 return 0;
126#if 0
127 MipsLIR* tgt = (MipsLIR*)oatNew(cUnit, sizeof(MipsLIR), true, kAllocLIR);
128 tgt->opcode = kMipsPseudoThrowTarget;
129 tgt->operands[0] = kind;
130 tgt->operands[1] = mir ? mir->offset : 0;
131 MipsLIR* branch = genConditionalBranch(cUnit, cCode, tgt);
132 // Remember branch target - will process later
133 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
134 return branch;
135#endif
136}
137
138STATIC MipsLIR* genImmedCheck(CompilationUnit* cUnit, MipsConditionCode cCode,
139 int reg, int immVal, MIR* mir, MipsThrowKind kind)
140{
141 UNIMPLEMENTED(FATAL) << "Needs mips version";
142 return 0;
143#if 0
144 MipsLIR* tgt = (MipsLIR*)oatNew(cUnit, sizeof(MipsLIR), true, kAllocLIR);
145 tgt->opcode = kMipsPseudoThrowTarget;
146 tgt->operands[0] = kind;
147 tgt->operands[1] = mir->offset;
148 MipsLIR* branch;
149 if (cCode == kMipsCondAl) {
150 branch = genUnconditionalBranch(cUnit, tgt);
151 } else {
152 branch = genCmpImmBranch(cUnit, cCode, reg, immVal);
153 branch->generic.target = (LIR*)tgt;
154 }
155 // Remember branch target - will process later
156 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
157 return branch;
158#endif
159}
160
161/* Perform null-check on a register. */
162STATIC MipsLIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg,
163 MIR* mir)
164{
165 UNIMPLEMENTED(FATAL) << "Needs mips version";
166 return 0;
167#if 0
168 if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
169 mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
170 return NULL;
171 }
172 return genImmedCheck(cUnit, kMipsCondEq, mReg, 0, mir, kMipsThrowNullPointer);
173#endif
174}
175
176/* Perform check on two registers */
177STATIC TGT_LIR* genRegRegCheck(CompilationUnit* cUnit, MipsConditionCode cCode,
178 int reg1, int reg2, MIR* mir, MipsThrowKind kind)
179{
180 UNIMPLEMENTED(FATAL) << "Needs mips version";
181 return 0;
182#if 0
183 MipsLIR* tgt = (MipsLIR*)oatNew(cUnit, sizeof(MipsLIR), true, kAllocLIR);
184 tgt->opcode = kMipsPseudoThrowTarget;
185 tgt->operands[0] = kind;
186 tgt->operands[1] = mir ? mir->offset : 0;
187 tgt->operands[2] = reg1;
188 tgt->operands[3] = reg2;
189 opRegReg(cUnit, kOpCmp, reg1, reg2);
190 MipsLIR* branch = genConditionalBranch(cUnit, cCode, tgt);
191 // Remember branch target - will process later
192 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
193 return branch;
194#endif
195}
196
197/*
198 * Let helper function take care of everything. Will call
199 * Array::AllocFromCode(type_idx, method, count);
200 * Note: AllocFromCode will handle checks for errNegativeArraySize.
201 */
202STATIC void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
203 RegLocation rlSrc)
204{
205 UNIMPLEMENTED(FATAL) << "Needs mips version";
206#if 0
207 oatFlushAllRegs(cUnit); /* Everything to home location */
208 uint32_t type_idx = mir->dalvikInsn.vC;
209 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
210 cUnit->dex_cache,
211 *cUnit->dex_file,
212 type_idx)) {
213 loadWordDisp(cUnit, rSELF,
214 OFFSETOF_MEMBER(Thread, pAllocArrayFromCode), rLR);
215 } else {
216 loadWordDisp(cUnit, rSELF,
217 OFFSETOF_MEMBER(Thread, pAllocArrayFromCodeWithAccessCheck), rLR);
218 }
219 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
220 loadConstant(cUnit, r0, type_idx); // arg0 <- type_id
221 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
222 callRuntimeHelper(cUnit, rLR);
223 RegLocation rlResult = oatGetReturn(cUnit);
224 storeValue(cUnit, rlDest, rlResult);
225#endif
226}
227
228/*
229 * Similar to genNewArray, but with post-allocation initialization.
230 * Verifier guarantees we're dealing with an array class. Current
231 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
232 * Current code also throws internal unimp if not 'L', '[' or 'I'.
233 */
234STATIC void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
235{
236 UNIMPLEMENTED(FATAL) << "Needs mips version";
237#if 0
238 DecodedInstruction* dInsn = &mir->dalvikInsn;
239 int elems = dInsn->vA;
240 int typeId = dInsn->vB;
241 oatFlushAllRegs(cUnit); /* Everything to home location */
242 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
243 cUnit->dex_cache,
244 *cUnit->dex_file,
245 typeId)) {
246 loadWordDisp(cUnit, rSELF,
247 OFFSETOF_MEMBER(Thread, pCheckAndAllocArrayFromCode), rLR);
248 } else {
249 loadWordDisp(cUnit, rSELF,
250 OFFSETOF_MEMBER(Thread, pCheckAndAllocArrayFromCodeWithAccessCheck), rLR);
251 }
252 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
253 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
254 loadConstant(cUnit, r2, elems); // arg2 <- count
255 callRuntimeHelper(cUnit, rLR);
256 /*
257 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
258 * return region. Because AllocFromCode placed the new array
259 * in r0, we'll just lock it into place. When debugger support is
260 * added, it may be necessary to additionally copy all return
261 * values to a home location in thread-local storage
262 */
263 oatLockTemp(cUnit, r0);
264
265 // Having a range of 0 is legal
266 if (isRange && (dInsn->vA > 0)) {
267 /*
268 * Bit of ugliness here. We're going generate a mem copy loop
269 * on the register range, but it is possible that some regs
270 * in the range have been promoted. This is unlikely, but
271 * before generating the copy, we'll just force a flush
272 * of any regs in the source range that have been promoted to
273 * home location.
274 */
275 for (unsigned int i = 0; i < dInsn->vA; i++) {
276 RegLocation loc = oatUpdateLoc(cUnit,
277 oatGetSrc(cUnit, mir, i));
278 if (loc.location == kLocPhysReg) {
279 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
280 loc.lowReg, kWord);
281 }
282 }
283 /*
284 * TUNING note: generated code here could be much improved, but
285 * this is an uncommon operation and isn't especially performance
286 * critical.
287 */
288 int rSrc = oatAllocTemp(cUnit);
289 int rDst = oatAllocTemp(cUnit);
290 int rIdx = oatAllocTemp(cUnit);
291 int rVal = rLR; // Using a lot of temps, rLR is known free here
292 // Set up source pointer
293 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
294 opRegRegImm(cUnit, kOpAdd, rSrc, rSP,
295 oatSRegOffset(cUnit, rlFirst.sRegLow));
296 // Set up the target pointer
297 opRegRegImm(cUnit, kOpAdd, rDst, r0,
298 Array::DataOffset().Int32Value());
299 // Set up the loop counter (known to be > 0)
300 loadConstant(cUnit, rIdx, dInsn->vA - 1);
301 // Generate the copy loop. Going backwards for convenience
302 MipsLIR* target = newLIR0(cUnit, kMipsPseudoTargetLabel);
303 target->defMask = ENCODE_ALL;
304 // Copy next element
305 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
306 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
307 // Use setflags encoding here
308 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
309 MipsLIR* branch = opCondBranch(cUnit, kMipsCondGe);
310 branch->generic.target = (LIR*)target;
311 } else if (!isRange) {
312 // TUNING: interleave
313 for (unsigned int i = 0; i < dInsn->vA; i++) {
314 RegLocation rlArg = loadValue(cUnit,
315 oatGetSrc(cUnit, mir, i), kCoreReg);
316 storeBaseDisp(cUnit, r0,
317 Array::DataOffset().Int32Value() +
318 i * 4, rlArg.lowReg, kWord);
319 // If the loadValue caused a temp to be allocated, free it
320 if (oatIsTemp(cUnit, rlArg.lowReg)) {
321 oatFreeTemp(cUnit, rlArg.lowReg);
322 }
323 }
324 }
325#endif
326}
327
328STATIC void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
329 bool isLongOrDouble, bool isObject)
330{
331 UNIMPLEMENTED(FATAL) << "Needs mips version";
332#if 0
333 int fieldOffset;
334 int ssbIndex;
335 bool isVolatile;
336 bool isReferrersClass;
337 uint32_t fieldIdx = mir->dalvikInsn.vB;
338 bool fastPath =
339 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, cUnit,
340 fieldOffset, ssbIndex,
341 isReferrersClass, isVolatile, true);
342 if (fastPath && !SLOW_FIELD_PATH) {
343 DCHECK_GE(fieldOffset, 0);
344 int rBase;
345 int rMethod;
346 if (isReferrersClass) {
347 // Fast path, static storage base is this method's class
348 rMethod = loadCurrMethod(cUnit);
349 rBase = oatAllocTemp(cUnit);
350 loadWordDisp(cUnit, rMethod,
351 Method::DeclaringClassOffset().Int32Value(), rBase);
352 } else {
353 // Medium path, static storage base in a different class which
354 // requires checks that the other class is initialized.
355 DCHECK_GE(ssbIndex, 0);
356 // May do runtime call so everything to home locations.
357 oatFlushAllRegs(cUnit);
358 // Using fixed register to sync with possible call to runtime
359 // support.
360 rMethod = r1;
361 oatLockTemp(cUnit, rMethod);
362 loadCurrMethodDirect(cUnit, rMethod);
363 rBase = r0;
364 oatLockTemp(cUnit, rBase);
365 loadWordDisp(cUnit, rMethod,
366 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
367 rBase);
368 loadWordDisp(cUnit, rBase,
369 Array::DataOffset().Int32Value() + sizeof(int32_t*) *
370 ssbIndex, rBase);
371 // rBase now points at appropriate static storage base (Class*)
372 // or NULL if not initialized. Check for NULL and call helper if NULL.
373 // TUNING: fast path should fall through
374 MipsLIR* branchOver = genCmpImmBranch(cUnit, kMipsCondNe, rBase, 0);
375 loadWordDisp(cUnit, rSELF,
376 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
377 loadConstant(cUnit, r0, ssbIndex);
378 callRuntimeHelper(cUnit, rLR);
379 MipsLIR* skipTarget = newLIR0(cUnit, kMipsPseudoTargetLabel);
380 skipTarget->defMask = ENCODE_ALL;
381 branchOver->generic.target = (LIR*)skipTarget;
382 }
383 // rBase now holds static storage base
384 oatFreeTemp(cUnit, rMethod);
385 if (isLongOrDouble) {
386 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
387 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
388 } else {
389 rlSrc = oatGetSrc(cUnit, mir, 0);
390 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
391 }
392 if (isVolatile) {
393 oatGenMemBarrier(cUnit, kST);
394 }
395 if (isLongOrDouble) {
396 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
397 rlSrc.highReg);
398 } else {
399 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
400 }
401 if (isVolatile) {
402 oatGenMemBarrier(cUnit, kSY);
403 }
404 if (isObject) {
405 markGCCard(cUnit, rlSrc.lowReg, rBase);
406 }
407 oatFreeTemp(cUnit, rBase);
408 } else {
409 oatFlushAllRegs(cUnit); // Everything to home locations
410 int setterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pSet64Static) :
411 (isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
412 : OFFSETOF_MEMBER(Thread, pSet32Static));
413 loadWordDisp(cUnit, rSELF, setterOffset, rLR);
414 loadConstant(cUnit, r0, fieldIdx);
415 if (isLongOrDouble) {
416 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
417 } else {
418 loadValueDirect(cUnit, rlSrc, r1);
419 }
420 callRuntimeHelper(cUnit, rLR);
421 }
422#endif
423}
424
425STATIC void genSget(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
426 bool isLongOrDouble, bool isObject)
427{
428 UNIMPLEMENTED(FATAL) << "Needs mips version";
429#if 0
430 int fieldOffset;
431 int ssbIndex;
432 bool isVolatile;
433 bool isReferrersClass;
434 uint32_t fieldIdx = mir->dalvikInsn.vB;
435 bool fastPath =
436 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, cUnit,
437 fieldOffset, ssbIndex,
438 isReferrersClass, isVolatile, false);
439 if (fastPath && !SLOW_FIELD_PATH) {
440 DCHECK_GE(fieldOffset, 0);
441 int rBase;
442 int rMethod;
443 if (isReferrersClass) {
444 // Fast path, static storage base is this method's class
445 rMethod = loadCurrMethod(cUnit);
446 rBase = oatAllocTemp(cUnit);
447 loadWordDisp(cUnit, rMethod,
448 Method::DeclaringClassOffset().Int32Value(), rBase);
449 } else {
450 // Medium path, static storage base in a different class which
451 // requires checks that the other class is initialized
452 DCHECK_GE(ssbIndex, 0);
453 // May do runtime call so everything to home locations.
454 oatFlushAllRegs(cUnit);
455 // Using fixed register to sync with possible call to runtime
456 // support
457 rMethod = r1;
458 oatLockTemp(cUnit, rMethod);
459 loadCurrMethodDirect(cUnit, rMethod);
460 rBase = r0;
461 oatLockTemp(cUnit, rBase);
462 loadWordDisp(cUnit, rMethod,
463 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
464 rBase);
465 loadWordDisp(cUnit, rBase,
466 Array::DataOffset().Int32Value() + sizeof(int32_t*) * ssbIndex,
467 rBase);
468 // rBase now points at appropriate static storage base (Class*)
469 // or NULL if not initialized. Check for NULL and call helper if NULL.
470 // TUNING: fast path should fall through
471 MipsLIR* branchOver = genCmpImmBranch(cUnit, kMipsCondNe, rBase, 0);
472 loadWordDisp(cUnit, rSELF,
473 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
474 loadConstant(cUnit, r0, ssbIndex);
475 callRuntimeHelper(cUnit, rLR);
476 MipsLIR* skipTarget = newLIR0(cUnit, kMipsPseudoTargetLabel);
477 skipTarget->defMask = ENCODE_ALL;
478 branchOver->generic.target = (LIR*)skipTarget;
479 }
480 // rBase now holds static storage base
481 oatFreeTemp(cUnit, rMethod);
482 rlDest = isLongOrDouble ? oatGetDestWide(cUnit, mir, 0, 1)
483 : oatGetDest(cUnit, mir, 0);
484 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
485 if (isVolatile) {
486 oatGenMemBarrier(cUnit, kSY);
487 }
488 if (isLongOrDouble) {
489 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
490 rlResult.highReg, INVALID_SREG);
491 } else {
492 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
493 }
494 oatFreeTemp(cUnit, rBase);
495 if (isLongOrDouble) {
496 storeValueWide(cUnit, rlDest, rlResult);
497 } else {
498 storeValue(cUnit, rlDest, rlResult);
499 }
500 } else {
501 oatFlushAllRegs(cUnit); // Everything to home locations
502 int getterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pGet64Static) :
503 (isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
504 : OFFSETOF_MEMBER(Thread, pGet32Static));
505 loadWordDisp(cUnit, rSELF, getterOffset, rLR);
506 loadConstant(cUnit, r0, fieldIdx);
507 callRuntimeHelper(cUnit, rLR);
508 if (isLongOrDouble) {
509 RegLocation rlResult = oatGetReturnWide(cUnit);
510 storeValueWide(cUnit, rlDest, rlResult);
511 } else {
512 RegLocation rlResult = oatGetReturn(cUnit);
513 storeValue(cUnit, rlDest, rlResult);
514 }
515 }
516#endif
517}
518
519typedef int (*NextCallInsn)(CompilationUnit*, MIR*, int, uint32_t dexIdx,
520 uint32_t methodIdx);
521
522/*
523 * Bit of a hack here - in leiu of a real scheduling pass,
524 * emit the next instruction in static & direct invoke sequences.
525 */
526STATIC int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
527 int state, uint32_t dexIdx, uint32_t unused)
528{
529 UNIMPLEMENTED(FATAL) << "Needs mips version";
530#if 0
531 switch(state) {
532 case 0: // Get the current Method* [sets r0]
533 loadCurrMethodDirect(cUnit, r0);
534 break;
535 case 1: // Get method->code_and_direct_methods_
536 loadWordDisp(cUnit, r0,
537 Method::GetDexCacheCodeAndDirectMethodsOffset().Int32Value(),
538 r0);
539 break;
540 case 2: // Grab target method* and target code_
541 loadWordDisp(cUnit, r0,
542 CodeAndDirectMethods::CodeOffsetInBytes(dexIdx), rLR);
543 loadWordDisp(cUnit, r0,
544 CodeAndDirectMethods::MethodOffsetInBytes(dexIdx), r0);
545 break;
546 default:
547 return -1;
548 }
549#endif
550 return state + 1;
551}
552
553/*
554 * Bit of a hack here - in leiu of a real scheduling pass,
555 * emit the next instruction in a virtual invoke sequence.
556 * We can use rLR as a temp prior to target address loading
557 * Note also that we'll load the first argument ("this") into
558 * r1 here rather than the standard loadArgRegs.
559 */
560STATIC int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
561 int state, uint32_t dexIdx, uint32_t methodIdx)
562{
563 UNIMPLEMENTED(FATAL) << "Needs mips version";
564#if 0
565 RegLocation rlArg;
566 /*
567 * This is the fast path in which the target virtual method is
568 * fully resolved at compile time.
569 */
570 switch(state) {
571 case 0: // Get "this" [set r1]
572 rlArg = oatGetSrc(cUnit, mir, 0);
573 loadValueDirectFixed(cUnit, rlArg, r1);
574 break;
575 case 1: // Is "this" null? [use r1]
576 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
577 // get this->klass_ [use r1, set rLR]
578 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
579 break;
580 case 2: // Get this->klass_->vtable [usr rLR, set rLR]
581 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
582 break;
583 case 3: // Get target method [use rLR, set r0]
584 loadWordDisp(cUnit, rLR, (methodIdx * 4) +
585 Array::DataOffset().Int32Value(), r0);
586 break;
587 case 4: // Get the target compiled code address [uses r0, sets rLR]
588 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
589 break;
590 default:
591 return -1;
592 }
593#endif
594 return state + 1;
595}
596
597/*
598 * Interleave launch code for INVOKE_SUPER. See comments
599 * for nextVCallIns.
600 */
601STATIC int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
602 int state, uint32_t dexIdx, uint32_t methodIdx)
603{
604 UNIMPLEMENTED(FATAL) << "Needs mips version";
605#if 0
606 /*
607 * This is the fast path in which the target virtual method is
608 * fully resolved at compile time. Note also that this path assumes
609 * that the check to verify that the target method index falls
610 * within the size of the super's vtable has been done at compile-time.
611 */
612 RegLocation rlArg;
613 switch(state) {
614 case 0: // Get current Method* [set r0]
615 loadCurrMethodDirect(cUnit, r0);
616 // Load "this" [set r1]
617 rlArg = oatGetSrc(cUnit, mir, 0);
618 loadValueDirectFixed(cUnit, rlArg, r1);
619 // Get method->declaring_class_ [use r0, set rLR]
620 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
621 rLR);
622 // Is "this" null? [use r1]
623 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
624 break;
625 case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
626 loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
627 rLR);
628 break;
629 case 2: // Get ...->super_class_->vtable [u/s rLR]
630 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
631 break;
632 case 3: // Get target method [use rLR, set r0]
633 loadWordDisp(cUnit, rLR, (methodIdx * 4) +
634 Array::DataOffset().Int32Value(), r0);
635 break;
636 case 4: // Get the target compiled code address [uses r0, sets rLR]
637 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
638 break;
639 default:
640 return -1;
641 }
642#endif
643 return state + 1;
644}
645
646STATIC int nextInvokeInsnSP(CompilationUnit* cUnit, MIR* mir, int trampoline,
647 int state, uint32_t dexIdx, uint32_t methodIdx)
648{
649 UNIMPLEMENTED(FATAL) << "Needs mips version";
650#if 0
651 /*
652 * This handles the case in which the base method is not fully
653 * resolved at compile time, we bail to a runtime helper.
654 */
655 if (state == 0) {
656 // Load trampoline target
657 loadWordDisp(cUnit, rSELF, trampoline, rLR);
658 // Load r0 with method index
659 loadConstant(cUnit, r0, dexIdx);
660 return 1;
661 }
662#endif
663 return -1;
664}
665
666STATIC int nextStaticCallInsnSP(CompilationUnit* cUnit, MIR* mir,
667 int state, uint32_t dexIdx, uint32_t methodIdx)
668{
669 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeStaticTrampolineWithAccessCheck);
670 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
671}
672
673STATIC int nextDirectCallInsnSP(CompilationUnit* cUnit, MIR* mir,
674 int state, uint32_t dexIdx, uint32_t methodIdx)
675{
676 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeDirectTrampolineWithAccessCheck);
677 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
678}
679
680STATIC int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
681 int state, uint32_t dexIdx, uint32_t methodIdx)
682{
683 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeSuperTrampolineWithAccessCheck);
684 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
685}
686
687STATIC int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
688 int state, uint32_t dexIdx, uint32_t methodIdx)
689{
690 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeVirtualTrampolineWithAccessCheck);
691 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
692}
693
694/*
695 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
696 * which will locate the target and continue on via a tail call.
697 */
698STATIC int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
699 int state, uint32_t dexIdx, uint32_t unused)
700{
701 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline);
702 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
703}
704
705STATIC int nextInterfaceCallInsnWithAccessCheck(CompilationUnit* cUnit,
706 MIR* mir, int state,
707 uint32_t dexIdx,
708 uint32_t unused)
709{
710 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampolineWithAccessCheck);
711 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
712}
713
714STATIC int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
715 DecodedInstruction* dInsn, int callState,
716 NextCallInsn nextCallInsn, uint32_t dexIdx,
717 uint32_t methodIdx, bool skipThis)
718{
719 UNIMPLEMENTED(FATAL) << "Needs mips version";
720#if 0
721 int nextReg = r1;
722 int nextArg = 0;
723 if (skipThis) {
724 nextReg++;
725 nextArg++;
726 }
727 for (; (nextReg <= r3) && (nextArg < mir->ssaRep->numUses); nextReg++) {
728 RegLocation rlArg = oatGetRawSrc(cUnit, mir, nextArg++);
729 rlArg = oatUpdateRawLoc(cUnit, rlArg);
730 if (rlArg.wide && (nextReg <= r2)) {
731 loadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
732 nextReg++;
733 nextArg++;
734 } else {
735 rlArg.wide = false;
736 loadValueDirectFixed(cUnit, rlArg, nextReg);
737 }
738 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
739 }
740#endif
741 return callState;
742}
743
744/*
745 * Load up to 5 arguments, the first three of which will be in
746 * r1 .. r3. On entry r0 contains the current method pointer,
747 * and as part of the load sequence, it must be replaced with
748 * the target method pointer. Note, this may also be called
749 * for "range" variants if the number of arguments is 5 or fewer.
750 */
751STATIC int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
752 DecodedInstruction* dInsn, int callState,
753 MipsLIR** pcrLabel, NextCallInsn nextCallInsn,
754 uint32_t dexIdx, uint32_t methodIdx,
755 bool skipThis)
756{
757 UNIMPLEMENTED(FATAL) << "Needs mips version";
758#if 0
759 RegLocation rlArg;
760
761 /* If no arguments, just return */
762 if (dInsn->vA == 0)
763 return callState;
764
765 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
766
767 DCHECK_LE(dInsn->vA, 5U);
768 if (dInsn->vA > 3) {
769 uint32_t nextUse = 3;
770 //Detect special case of wide arg spanning arg3/arg4
771 RegLocation rlUse0 = oatGetRawSrc(cUnit, mir, 0);
772 RegLocation rlUse1 = oatGetRawSrc(cUnit, mir, 1);
773 RegLocation rlUse2 = oatGetRawSrc(cUnit, mir, 2);
774 if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
775 rlUse2.wide) {
776 int reg;
777 // Wide spans, we need the 2nd half of uses[2].
778 rlArg = oatUpdateLocWide(cUnit, rlUse2);
779 if (rlArg.location == kLocPhysReg) {
780 reg = rlArg.highReg;
781 } else {
782 // r2 & r3 can safely be used here
783 reg = r3;
784 loadWordDisp(cUnit, rSP,
785 oatSRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
786 callState = nextCallInsn(cUnit, mir, callState, dexIdx,
787 methodIdx);
788 }
789 storeBaseDisp(cUnit, rSP, (nextUse + 1) * 4, reg, kWord);
790 storeBaseDisp(cUnit, rSP, 16 /* (3+1)*4 */, reg, kWord);
791 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
792 nextUse++;
793 }
794 // Loop through the rest
795 while (nextUse < dInsn->vA) {
796 int lowReg;
797 int highReg;
798 rlArg = oatGetRawSrc(cUnit, mir, nextUse);
799 rlArg = oatUpdateRawLoc(cUnit, rlArg);
800 if (rlArg.location == kLocPhysReg) {
801 lowReg = rlArg.lowReg;
802 highReg = rlArg.highReg;
803 } else {
804 lowReg = r2;
805 highReg = r3;
806 if (rlArg.wide) {
807 loadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
808 } else {
809 loadValueDirectFixed(cUnit, rlArg, lowReg);
810 }
811 callState = nextCallInsn(cUnit, mir, callState, dexIdx,
812 methodIdx);
813 }
814 int outsOffset = (nextUse + 1) * 4;
815 if (rlArg.wide) {
816 storeBaseDispWide(cUnit, rSP, outsOffset, lowReg, highReg);
817 nextUse += 2;
818 } else {
819 storeWordDisp(cUnit, rSP, outsOffset, lowReg);
820 nextUse++;
821 }
822 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
823 }
824 }
825
826 callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
827 dexIdx, methodIdx, skipThis);
828
829 if (pcrLabel) {
830 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
831 }
832#endif
833 return callState;
834}
835
836/*
837 * May have 0+ arguments (also used for jumbo). Note that
838 * source virtual registers may be in physical registers, so may
839 * need to be flushed to home location before copying. This
840 * applies to arg3 and above (see below).
841 *
842 * Two general strategies:
843 * If < 20 arguments
844 * Pass args 3-18 using vldm/vstm block copy
845 * Pass arg0, arg1 & arg2 in r1-r3
846 * If 20+ arguments
847 * Pass args arg19+ using memcpy block copy
848 * Pass arg0, arg1 & arg2 in r1-r3
849 *
850 */
851STATIC int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
852 DecodedInstruction* dInsn, int callState,
853 MipsLIR** pcrLabel, NextCallInsn nextCallInsn,
854 uint32_t dexIdx, uint32_t methodIdx,
855 bool skipThis)
856{
857 UNIMPLEMENTED(FATAL) << "Needs mips version";
858#if 0
859 int firstArg = dInsn->vC;
860 int numArgs = dInsn->vA;
861
862 // If we can treat it as non-range (Jumbo ops will use range form)
863 if (numArgs <= 5)
864 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
865 nextCallInsn, dexIdx, methodIdx,
866 skipThis);
867 /*
868 * Make sure range list doesn't span the break between in normal
869 * Dalvik vRegs and the ins.
870 */
871 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
872 int boundaryReg = cUnit->numDalvikRegisters - cUnit->numIns;
873 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
874 LOG(FATAL) << "Argument list spanned locals & args";
875 }
876
877 /*
878 * First load the non-register arguments. Both forms expect all
879 * of the source arguments to be in their home frame location, so
880 * scan the sReg names and flush any that have been promoted to
881 * frame backing storage.
882 */
883 // Scan the rest of the args - if in physReg flush to memory
884 for (int nextArg = 0; nextArg < numArgs;) {
885 RegLocation loc = oatGetRawSrc(cUnit, mir, nextArg);
886 if (loc.wide) {
887 loc = oatUpdateLocWide(cUnit, loc);
888 if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
889 storeBaseDispWide(cUnit, rSP,
890 oatSRegOffset(cUnit, loc.sRegLow),
891 loc.lowReg, loc.highReg);
892 }
893 nextArg += 2;
894 } else {
895 loc = oatUpdateLoc(cUnit, loc);
896 if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
897 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
898 loc.lowReg, kWord);
899 }
900 nextArg++;
901 }
902 }
903
904 int startOffset = oatSRegOffset(cUnit,
905 cUnit->regLocation[mir->ssaRep->uses[3]].sRegLow);
906 int outsOffset = 4 /* Method* */ + (3 * 4);
907 if (numArgs >= 20) {
908 // Generate memcpy
909 opRegRegImm(cUnit, kOpAdd, r0, rSP, outsOffset);
910 opRegRegImm(cUnit, kOpAdd, r1, rSP, startOffset);
911 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
912 loadConstant(cUnit, r2, (numArgs - 3) * 4);
913 callRuntimeHelper(cUnit, rLR);
914 // Restore Method*
915 loadCurrMethodDirect(cUnit, r0);
916 } else {
917 // Use vldm/vstm pair using r3 as a temp
918 int regsLeft = std::min(numArgs - 3, 16);
919 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
920 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
921 MipsLIR* ld = newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
922 //TUNING: loosen barrier
923 ld->defMask = ENCODE_ALL;
924 setMemRefType(ld, true /* isLoad */, kDalvikReg);
925 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
926 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
927 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
928 MipsLIR* st = newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
929 setMemRefType(st, false /* isLoad */, kDalvikReg);
930 st->defMask = ENCODE_ALL;
931 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
932 }
933
934 callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
935 dexIdx, methodIdx, skipThis);
936
937 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
938 if (pcrLabel) {
939 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
940 }
941#endif
942 return callState;
943}
944
945// Debugging routine - if null target, branch to DebugMe
946STATIC void genShowTarget(CompilationUnit* cUnit)
947{
948 UNIMPLEMENTED(FATAL) << "Needs mips version";
949#if 0
950 MipsLIR* branchOver = genCmpImmBranch(cUnit, kMipsCondNe, rLR, 0);
951 loadWordDisp(cUnit, rSELF,
952 OFFSETOF_MEMBER(Thread, pDebugMe), rLR);
953 MipsLIR* target = newLIR0(cUnit, kMipsPseudoTargetLabel);
954 target->defMask = -1;
955 branchOver->generic.target = (LIR*)target;
956#endif
957}
958
959STATIC void genThrowVerificationError(CompilationUnit* cUnit, MIR* mir)
960{
961 UNIMPLEMENTED(FATAL) << "Needs mips version";
962#if 0
963 loadWordDisp(cUnit, rSELF,
964 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode), rLR);
965 loadConstant(cUnit, r0, mir->dalvikInsn.vA);
966 loadConstant(cUnit, r1, mir->dalvikInsn.vB);
967 callRuntimeHelper(cUnit, rLR);
968#endif
969}
970
971STATIC void genCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
972 MIR* mir, RegLocation rlSrc1,
973 RegLocation rlSrc2, MipsLIR* labelList)
974{
975 UNIMPLEMENTED(FATAL) << "Needs mips version";
976#if 0
977 MipsConditionCode cond;
978 rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
979 rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
980 opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
981 Opcode opcode = mir->dalvikInsn.opcode;
982 switch(opcode) {
983 case OP_IF_EQ:
984 cond = kMipsCondEq;
985 break;
986 case OP_IF_NE:
987 cond = kMipsCondNe;
988 break;
989 case OP_IF_LT:
990 cond = kMipsCondLt;
991 break;
992 case OP_IF_GE:
993 cond = kMipsCondGe;
994 break;
995 case OP_IF_GT:
996 cond = kMipsCondGt;
997 break;
998 case OP_IF_LE:
999 cond = kMipsCondLe;
1000 break;
1001 default:
1002 cond = (MipsConditionCode)0;
1003 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1004 }
1005 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1006 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1007#endif
1008}
1009
1010STATIC void genCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
1011 MIR* mir, RegLocation rlSrc,
1012 MipsLIR* labelList)
1013{
1014 UNIMPLEMENTED(FATAL) << "Needs mips version";
1015#if 0
1016 MipsConditionCode cond;
1017 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1018 opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
1019 Opcode opcode = mir->dalvikInsn.opcode;
1020 switch(opcode) {
1021 case OP_IF_EQZ:
1022 cond = kMipsCondEq;
1023 break;
1024 case OP_IF_NEZ:
1025 cond = kMipsCondNe;
1026 break;
1027 case OP_IF_LTZ:
1028 cond = kMipsCondLt;
1029 break;
1030 case OP_IF_GEZ:
1031 cond = kMipsCondGe;
1032 break;
1033 case OP_IF_GTZ:
1034 cond = kMipsCondGt;
1035 break;
1036 case OP_IF_LEZ:
1037 cond = kMipsCondLe;
1038 break;
1039 default:
1040 cond = (MipsConditionCode)0;
1041 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1042 }
1043 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1044 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1045#endif
1046}
1047
1048STATIC void genIntToLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1049 RegLocation rlSrc)
1050{
1051 UNIMPLEMENTED(FATAL) << "Needs mips version";
1052#if 0
1053 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1054 if (rlSrc.location == kLocPhysReg) {
1055 genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
1056 } else {
1057 loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
1058 }
1059 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1060 rlResult.lowReg, 31);
1061 storeValueWide(cUnit, rlDest, rlResult);
1062#endif
1063}
1064
1065STATIC void genIntNarrowing(CompilationUnit* cUnit, MIR* mir,
1066 RegLocation rlDest, RegLocation rlSrc)
1067{
1068 UNIMPLEMENTED(FATAL) << "Needs mips version";
1069#if 0
1070 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1071 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1072 OpKind op = kOpInvalid;
1073 switch(mir->dalvikInsn.opcode) {
1074 case OP_INT_TO_BYTE:
1075 op = kOp2Byte;
1076 break;
1077 case OP_INT_TO_SHORT:
1078 op = kOp2Short;
1079 break;
1080 case OP_INT_TO_CHAR:
1081 op = kOp2Char;
1082 break;
1083 default:
1084 LOG(ERROR) << "Bad int conversion type";
1085 }
1086 opRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
1087 storeValue(cUnit, rlDest, rlResult);
1088#endif
1089}
1090
1091/*
1092 * If there are any ins passed in registers that have not been promoted
1093 * to a callee-save register, flush them to the frame. Perform intial
1094 * assignment of promoted arguments.
1095 */
1096STATIC void flushIns(CompilationUnit* cUnit)
1097{
1098 UNIMPLEMENTED(FATAL) << "Needs mips version";
1099#if 0
1100 if (cUnit->numIns == 0)
1101 return;
1102 int firstArgReg = r1;
1103 int lastArgReg = r3;
1104 int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
1105 /*
1106 * Arguments passed in registers should be flushed
1107 * to their backing locations in the frame for now.
1108 * Also, we need to do initial assignment for promoted
1109 * arguments. NOTE: an older version of dx had an issue
1110 * in which it would reuse static method argument registers.
1111 * This could result in the same Dalvik virtual register
1112 * being promoted to both core and fp regs. In those
1113 * cases, copy argument to both. This will be uncommon
1114 * enough that it isn't worth attempting to optimize.
1115 */
1116 for (int i = 0; i < cUnit->numIns; i++) {
1117 PromotionMap vMap = cUnit->promotionMap[startVReg + i];
1118 if (i <= (lastArgReg - firstArgReg)) {
1119 // If arriving in register
1120 if (vMap.coreLocation == kLocPhysReg) {
1121 genRegCopy(cUnit, vMap.coreReg, firstArgReg + i);
1122 }
1123 if (vMap.fpLocation == kLocPhysReg) {
1124 genRegCopy(cUnit, vMap.fpReg, firstArgReg + i);
1125 }
1126 // Also put a copy in memory in case we're partially promoted
1127 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
1128 firstArgReg + i, kWord);
1129 } else {
1130 // If arriving in frame & promoted
1131 if (vMap.coreLocation == kLocPhysReg) {
1132 loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
1133 vMap.coreReg);
1134 }
1135 if (vMap.fpLocation == kLocPhysReg) {
1136 loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
1137 vMap.fpReg);
1138 }
1139 }
1140 }
1141#endif
1142}
1143
1144STATIC void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
1145{
1146 UNIMPLEMENTED(FATAL) << "Needs mips version";
1147#if 0
1148 int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
1149 /*
1150 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1151 * mechanism know so it doesn't try to use any of them when
1152 * expanding the frame or flushing. This leaves the utility
1153 * code with a single temp: r12. This should be enough.
1154 */
1155 oatLockTemp(cUnit, r0);
1156 oatLockTemp(cUnit, r1);
1157 oatLockTemp(cUnit, r2);
1158 oatLockTemp(cUnit, r3);
1159
1160 /*
1161 * We can safely skip the stack overflow check if we're
1162 * a leaf *and* our frame size < fudge factor.
1163 */
1164 bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
1165 ((size_t)cUnit->frameSize <
1166 Thread::kStackOverflowReservedBytes));
1167 newLIR0(cUnit, kMipsPseudoMethodEntry);
1168 if (!skipOverflowCheck) {
1169 /* Load stack limit */
1170 loadWordDisp(cUnit, rSELF,
1171 Thread::StackEndOffset().Int32Value(), r12);
1172 }
1173 /* Spill core callee saves */
1174 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1175 /* Need to spill any FP regs? */
1176 if (cUnit->numFPSpills) {
1177 /*
1178 * NOTE: fp spills are a little different from core spills in that
1179 * they are pushed as a contiguous block. When promoting from
1180 * the fp set, we must allocate all singles from s16..highest-promoted
1181 */
1182 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1183 }
1184 if (!skipOverflowCheck) {
1185 opRegRegImm(cUnit, kOpSub, rLR, rSP,
1186 cUnit->frameSize - (spillCount * 4));
1187 genRegRegCheck(cUnit, kMipsCondCc, rLR, r12, NULL,
1188 kMipsThrowStackOverflow);
1189 genRegCopy(cUnit, rSP, rLR); // Establish stack
1190 } else {
1191 opRegImm(cUnit, kOpSub, rSP,
1192 cUnit->frameSize - (spillCount * 4));
1193 }
1194 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1195 flushIns(cUnit);
1196
1197 if (cUnit->genDebugger) {
1198 // Refresh update debugger callout
1199 loadWordDisp(cUnit, rSELF,
1200 OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
1201 genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
1202 }
1203
1204 oatFreeTemp(cUnit, r0);
1205 oatFreeTemp(cUnit, r1);
1206 oatFreeTemp(cUnit, r2);
1207 oatFreeTemp(cUnit, r3);
1208#endif
1209}
1210
1211STATIC void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
1212{
1213 UNIMPLEMENTED(FATAL) << "Needs mips version";
1214#if 0
1215 int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
1216 /*
1217 * In the exit path, r0/r1 are live - make sure they aren't
1218 * allocated by the register utilities as temps.
1219 */
1220 oatLockTemp(cUnit, r0);
1221 oatLockTemp(cUnit, r1);
1222
1223 newLIR0(cUnit, kMipsPseudoMethodExit);
1224 /* If we're compiling for the debugger, generate an update callout */
1225 if (cUnit->genDebugger) {
1226 genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
1227 }
1228 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (spillCount * 4));
1229 /* Need to restore any FP callee saves? */
1230 if (cUnit->numFPSpills) {
1231 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1232 }
1233 if (cUnit->coreSpillMask & (1 << rLR)) {
1234 /* Unspill rLR to rPC */
1235 cUnit->coreSpillMask &= ~(1 << rLR);
1236 cUnit->coreSpillMask |= (1 << rPC);
1237 }
1238 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1239 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1240 /* We didn't pop to rPC, so must do a bv rLR */
1241 newLIR1(cUnit, kThumbBx, rLR);
1242 }
1243#endif
1244}
1245
1246/*
1247 * Nop any unconditional branches that go to the next instruction.
1248 * Note: new redundant branches may be inserted later, and we'll
1249 * use a check in final instruction assembly to nop those out.
1250 */
1251void removeRedundantBranches(CompilationUnit* cUnit)
1252{
1253 UNIMPLEMENTED(FATAL) << "Needs mips version";
1254#if 0
1255 MipsLIR* thisLIR;
1256
1257 for (thisLIR = (MipsLIR*) cUnit->firstLIRInsn;
1258 thisLIR != (MipsLIR*) cUnit->lastLIRInsn;
1259 thisLIR = NEXT_LIR(thisLIR)) {
1260
1261 /* Branch to the next instruction */
1262 if ((thisLIR->opcode == kThumbBUncond) ||
1263 (thisLIR->opcode == kThumb2BUncond)) {
1264 MipsLIR* nextLIR = thisLIR;
1265
1266 while (true) {
1267 nextLIR = NEXT_LIR(nextLIR);
1268
1269 /*
1270 * Is the branch target the next instruction?
1271 */
1272 if (nextLIR == (MipsLIR*) thisLIR->generic.target) {
1273 thisLIR->flags.isNop = true;
1274 break;
1275 }
1276
1277 /*
1278 * Found real useful stuff between the branch and the target.
1279 * Need to explicitly check the lastLIRInsn here because it
1280 * might be the last real instruction.
1281 */
1282 if (!isPseudoOpcode(nextLIR->opcode) ||
1283 (nextLIR = (MipsLIR*) cUnit->lastLIRInsn))
1284 break;
1285 }
1286 }
1287 }
1288#endif
1289}
1290
1291STATIC void handleSuspendLaunchpads(CompilationUnit *cUnit)
1292{
1293 UNIMPLEMENTED(FATAL) << "Needs mips version";
1294#if 0
1295 MipsLIR** suspendLabel =
1296 (MipsLIR **) cUnit->suspendLaunchpads.elemList;
1297 int numElems = cUnit->suspendLaunchpads.numUsed;
1298
1299 for (int i = 0; i < numElems; i++) {
1300 /* TUNING: move suspend count load into helper */
1301 MipsLIR* lab = suspendLabel[i];
1302 MipsLIR* resumeLab = (MipsLIR*)lab->operands[0];
1303 cUnit->currentDalvikOffset = lab->operands[1];
1304 oatAppendLIR(cUnit, (LIR *)lab);
1305 loadWordDisp(cUnit, rSELF,
1306 OFFSETOF_MEMBER(Thread, pTestSuspendFromCode), rLR);
1307 if (!cUnit->genDebugger) {
1308 // use rSUSPEND for suspend count
1309 loadWordDisp(cUnit, rSELF,
1310 Thread::SuspendCountOffset().Int32Value(), rSUSPEND);
1311 }
1312 opReg(cUnit, kOpBlx, rLR);
1313 if ( cUnit->genDebugger) {
1314 // use rSUSPEND for update debugger
1315 loadWordDisp(cUnit, rSELF,
1316 OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
1317 }
1318 genUnconditionalBranch(cUnit, resumeLab);
1319 }
1320#endif
1321}
1322
1323STATIC void handleThrowLaunchpads(CompilationUnit *cUnit)
1324{
1325 UNIMPLEMENTED(FATAL) << "Needs mips version";
1326#if 0
1327 MipsLIR** throwLabel =
1328 (MipsLIR **) cUnit->throwLaunchpads.elemList;
1329 int numElems = cUnit->throwLaunchpads.numUsed;
1330 int i;
1331
1332 for (i = 0; i < numElems; i++) {
1333 MipsLIR* lab = throwLabel[i];
1334 cUnit->currentDalvikOffset = lab->operands[1];
1335 oatAppendLIR(cUnit, (LIR *)lab);
1336 int funcOffset = 0;
1337 int v1 = lab->operands[2];
1338 int v2 = lab->operands[3];
1339 switch(lab->operands[0]) {
1340 case kMipsThrowNullPointer:
1341 funcOffset = OFFSETOF_MEMBER(Thread, pThrowNullPointerFromCode);
1342 break;
1343 case kMipsThrowArrayBounds:
1344 if (v2 != r0) {
1345 genRegCopy(cUnit, r0, v1);
1346 genRegCopy(cUnit, r1, v2);
1347 } else {
1348 if (v1 == r1) {
1349 genRegCopy(cUnit, r12, v1);
1350 genRegCopy(cUnit, r1, v2);
1351 genRegCopy(cUnit, r0, r12);
1352 } else {
1353 genRegCopy(cUnit, r1, v2);
1354 genRegCopy(cUnit, r0, v1);
1355 }
1356 }
1357 funcOffset = OFFSETOF_MEMBER(Thread, pThrowArrayBoundsFromCode);
1358 break;
1359 case kMipsThrowDivZero:
1360 funcOffset = OFFSETOF_MEMBER(Thread, pThrowDivZeroFromCode);
1361 break;
1362 case kMipsThrowVerificationError:
1363 loadConstant(cUnit, r0, v1);
1364 loadConstant(cUnit, r1, v2);
1365 funcOffset =
1366 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode);
1367 break;
1368 case kMipsThrowNegArraySize:
1369 genRegCopy(cUnit, r0, v1);
1370 funcOffset =
1371 OFFSETOF_MEMBER(Thread, pThrowNegArraySizeFromCode);
1372 break;
1373 case kMipsThrowNoSuchMethod:
1374 genRegCopy(cUnit, r0, v1);
1375 funcOffset =
1376 OFFSETOF_MEMBER(Thread, pThrowNoSuchMethodFromCode);
1377 break;
1378 case kMipsThrowStackOverflow:
1379 funcOffset =
1380 OFFSETOF_MEMBER(Thread, pThrowStackOverflowFromCode);
1381 // Restore stack alignment
1382 opRegImm(cUnit, kOpAdd, rSP,
1383 (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
1384 break;
1385 default:
1386 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
1387 }
1388 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
1389 callRuntimeHelper(cUnit, rLR);
1390 }
1391#endif
1392}
1393
1394/* Common initialization routine for an architecture family */
1395bool oatArchInit()
1396{
1397 int i;
1398
1399 for (i = 0; i < kMipsLast; i++) {
1400 if (EncodingMap[i].opcode != i) {
1401 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1402 " is wrong: expecting " << i << ", seeing " <<
1403 (int)EncodingMap[i].opcode;
1404 }
1405 }
1406
1407 return oatArchVariantInit();
1408}
1409
1410/* Needed by the Assembler */
1411void oatSetupResourceMasks(MipsLIR* lir)
1412{
1413 setupResourceMasks(lir);
1414}
1415
1416} // namespace art