blob: cb9645de56b6d78e00dc01060fa7e55e26b736fd [file] [log] [blame]
buzbeee3acd072012-02-25 17:03:10 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This file contains mips-specific codegen factory support.
19 * It is included by
20 *
21 * Codegen-$(TARGET_ARCH_VARIANT).c
22 *
23 */
24
25#define SLOW_FIELD_PATH (cUnit->enableDebug & (1 << kDebugSlowFieldPath))
26#define SLOW_INVOKE_PATH (cUnit->enableDebug & (1 << kDebugSlowInvokePath))
27#define SLOW_STRING_PATH (cUnit->enableDebug & (1 << kDebugSlowStringPath))
28#define SLOW_TYPE_PATH (cUnit->enableDebug & (1 << kDebugSlowTypePath))
29#define EXERCISE_SLOWEST_FIELD_PATH (cUnit->enableDebug & \
30 (1 << kDebugSlowestFieldPath))
31#define EXERCISE_SLOWEST_STRING_PATH (cUnit->enableDebug & \
32 (1 << kDebugSlowestStringPath))
33#define EXERCISE_RESOLVE_METHOD (cUnit->enableDebug & \
34 (1 << kDebugExerciseResolveMethod))
35
36// FIXME - this is the Mips version, change to MIPS
37
38namespace art {
39
40STATIC void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset);
41
buzbeee3acd072012-02-25 17:03:10 -080042/* Generate unconditional branch instructions */
43STATIC MipsLIR* genUnconditionalBranch(CompilationUnit* cUnit, MipsLIR* target)
44{
45 MipsLIR* branch = opNone(cUnit, kOpUncondBr);
46 branch->generic.target = (LIR*) target;
47 return branch;
48}
49
50STATIC MipsLIR* callRuntimeHelper(CompilationUnit* cUnit, int reg)
51{
52 oatClobberCalleeSave(cUnit);
53 return opReg(cUnit, kOpBlx, reg);
54}
55
56/*
57 * Mark garbage collection card. Skip if the value we're storing is null.
58 */
59STATIC void markGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
60{
buzbeee3acd072012-02-25 17:03:10 -080061 int regCardBase = oatAllocTemp(cUnit);
62 int regCardNo = oatAllocTemp(cUnit);
buzbee31a4a6f2012-02-28 15:36:15 -080063 MipsLIR* branchOver = opCompareBranchCC(cUnit, kMipsCondEq, valReg, r_ZERO);
buzbeee3acd072012-02-25 17:03:10 -080064 loadWordDisp(cUnit, rSELF, Thread::CardTableOffset().Int32Value(),
65 regCardBase);
66 opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
67 storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
68 kUnsignedByte);
buzbee31a4a6f2012-02-28 15:36:15 -080069 MipsLIR* target = newLIR0(cUnit, kPseudoTargetLabel);
buzbeee3acd072012-02-25 17:03:10 -080070 target->defMask = ENCODE_ALL;
71 branchOver->generic.target = (LIR*)target;
72 oatFreeTemp(cUnit, regCardBase);
73 oatFreeTemp(cUnit, regCardNo);
buzbeee3acd072012-02-25 17:03:10 -080074}
75
76/*
77 * Utiltiy to load the current Method*. Broken out
78 * to allow easy change between placing the current Method* in a
79 * dedicated register or its home location in the frame.
80 */
81STATIC void loadCurrMethodDirect(CompilationUnit *cUnit, int rTgt)
82{
buzbeee3acd072012-02-25 17:03:10 -080083#if defined(METHOD_IN_REG)
84 genRegCopy(cUnit, rTgt, rMETHOD);
85#else
86 loadWordDisp(cUnit, rSP, 0, rTgt);
87#endif
buzbeee3acd072012-02-25 17:03:10 -080088}
89
90STATIC int loadCurrMethod(CompilationUnit *cUnit)
91{
92#if defined(METHOD_IN_REG)
93 return rMETHOD;
94#else
95 int mReg = oatAllocTemp(cUnit);
96 loadCurrMethodDirect(cUnit, mReg);
97 return mReg;
98#endif
99}
100
buzbeee3acd072012-02-25 17:03:10 -0800101STATIC MipsLIR* genImmedCheck(CompilationUnit* cUnit, MipsConditionCode cCode,
102 int reg, int immVal, MIR* mir, MipsThrowKind kind)
103{
buzbeee3acd072012-02-25 17:03:10 -0800104 MipsLIR* tgt = (MipsLIR*)oatNew(cUnit, sizeof(MipsLIR), true, kAllocLIR);
buzbee31a4a6f2012-02-28 15:36:15 -0800105 tgt->opcode = kPseudoThrowTarget;
buzbeee3acd072012-02-25 17:03:10 -0800106 tgt->operands[0] = kind;
107 tgt->operands[1] = mir->offset;
108 MipsLIR* branch;
109 if (cCode == kMipsCondAl) {
110 branch = genUnconditionalBranch(cUnit, tgt);
111 } else {
buzbee31a4a6f2012-02-28 15:36:15 -0800112 int tReg;
113 if (immVal == 0) {
114 tReg = r_ZERO;
115 } else {
116 tReg = oatAllocTemp(cUnit);
117 loadConstant(cUnit, tReg, immVal);
118 }
119 branch = opCompareBranchCC(cUnit, cCode, reg, tReg);
buzbeee3acd072012-02-25 17:03:10 -0800120 branch->generic.target = (LIR*)tgt;
121 }
122 // Remember branch target - will process later
123 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
124 return branch;
buzbeee3acd072012-02-25 17:03:10 -0800125}
126
127/* Perform null-check on a register. */
128STATIC MipsLIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg,
129 MIR* mir)
130{
buzbeee3acd072012-02-25 17:03:10 -0800131 if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
132 mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
133 return NULL;
134 }
135 return genImmedCheck(cUnit, kMipsCondEq, mReg, 0, mir, kMipsThrowNullPointer);
buzbeee3acd072012-02-25 17:03:10 -0800136}
137
138/* Perform check on two registers */
139STATIC TGT_LIR* genRegRegCheck(CompilationUnit* cUnit, MipsConditionCode cCode,
140 int reg1, int reg2, MIR* mir, MipsThrowKind kind)
141{
buzbeee3acd072012-02-25 17:03:10 -0800142 MipsLIR* tgt = (MipsLIR*)oatNew(cUnit, sizeof(MipsLIR), true, kAllocLIR);
buzbee31a4a6f2012-02-28 15:36:15 -0800143 tgt->opcode = kPseudoThrowTarget;
buzbeee3acd072012-02-25 17:03:10 -0800144 tgt->operands[0] = kind;
145 tgt->operands[1] = mir ? mir->offset : 0;
146 tgt->operands[2] = reg1;
147 tgt->operands[3] = reg2;
148 opRegReg(cUnit, kOpCmp, reg1, reg2);
149 MipsLIR* branch = genConditionalBranch(cUnit, cCode, tgt);
150 // Remember branch target - will process later
151 oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
152 return branch;
buzbeee3acd072012-02-25 17:03:10 -0800153}
154
155/*
156 * Let helper function take care of everything. Will call
157 * Array::AllocFromCode(type_idx, method, count);
158 * Note: AllocFromCode will handle checks for errNegativeArraySize.
159 */
160STATIC void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
161 RegLocation rlSrc)
162{
buzbeee3acd072012-02-25 17:03:10 -0800163 oatFlushAllRegs(cUnit); /* Everything to home location */
buzbee31a4a6f2012-02-28 15:36:15 -0800164 oatLockCallTemps(cUnit);
165 int addrReg = oatAllocTemp(cUnit);
buzbeee3acd072012-02-25 17:03:10 -0800166 uint32_t type_idx = mir->dalvikInsn.vC;
167 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
168 cUnit->dex_cache,
169 *cUnit->dex_file,
170 type_idx)) {
171 loadWordDisp(cUnit, rSELF,
buzbee31a4a6f2012-02-28 15:36:15 -0800172 OFFSETOF_MEMBER(Thread, pAllocArrayFromCode), addrReg);
buzbeee3acd072012-02-25 17:03:10 -0800173 } else {
174 loadWordDisp(cUnit, rSELF,
buzbee31a4a6f2012-02-28 15:36:15 -0800175 OFFSETOF_MEMBER(Thread, pAllocArrayFromCodeWithAccessCheck), addrReg);
buzbeee3acd072012-02-25 17:03:10 -0800176 }
buzbee31a4a6f2012-02-28 15:36:15 -0800177 loadCurrMethodDirect(cUnit, r_ARG1); // arg1 <- Method*
178 loadConstant(cUnit, r_ARG0, type_idx); // arg0 <- type_id
179 loadValueDirectFixed(cUnit, rlSrc, r_ARG2); // arg2 <- count
180 callRuntimeHelper(cUnit, addrReg);
buzbeee3acd072012-02-25 17:03:10 -0800181 RegLocation rlResult = oatGetReturn(cUnit);
182 storeValue(cUnit, rlDest, rlResult);
buzbeee3acd072012-02-25 17:03:10 -0800183}
184
185/*
186 * Similar to genNewArray, but with post-allocation initialization.
187 * Verifier guarantees we're dealing with an array class. Current
188 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
189 * Current code also throws internal unimp if not 'L', '[' or 'I'.
190 */
191STATIC void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
192{
buzbeee3acd072012-02-25 17:03:10 -0800193 DecodedInstruction* dInsn = &mir->dalvikInsn;
194 int elems = dInsn->vA;
195 int typeId = dInsn->vB;
196 oatFlushAllRegs(cUnit); /* Everything to home location */
buzbee31a4a6f2012-02-28 15:36:15 -0800197 oatLockCallTemps(cUnit);
198 int addrReg = oatAllocTemp(cUnit);
buzbeee3acd072012-02-25 17:03:10 -0800199 if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
200 cUnit->dex_cache,
201 *cUnit->dex_file,
202 typeId)) {
203 loadWordDisp(cUnit, rSELF,
buzbee31a4a6f2012-02-28 15:36:15 -0800204 OFFSETOF_MEMBER(Thread, pCheckAndAllocArrayFromCode),
205 addrReg);
buzbeee3acd072012-02-25 17:03:10 -0800206 } else {
207 loadWordDisp(cUnit, rSELF,
buzbee31a4a6f2012-02-28 15:36:15 -0800208 OFFSETOF_MEMBER(Thread,
209 pCheckAndAllocArrayFromCodeWithAccessCheck), addrReg);
buzbeee3acd072012-02-25 17:03:10 -0800210 }
buzbee31a4a6f2012-02-28 15:36:15 -0800211 loadCurrMethodDirect(cUnit, r_ARG1); // arg1 <- Method*
212 loadConstant(cUnit, r_ARG0, typeId); // arg0 <- type_id
213 loadConstant(cUnit, r_ARG2, elems); // arg2 <- count
214 callRuntimeHelper(cUnit, addrReg);
buzbeee3acd072012-02-25 17:03:10 -0800215 /*
216 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
217 * return region. Because AllocFromCode placed the new array
buzbee31a4a6f2012-02-28 15:36:15 -0800218 * in r_V0, we'll just lock it into place. When debugger support is
buzbeee3acd072012-02-25 17:03:10 -0800219 * added, it may be necessary to additionally copy all return
220 * values to a home location in thread-local storage
221 */
buzbee31a4a6f2012-02-28 15:36:15 -0800222 oatLockTemp(cUnit, r_V0);
buzbeee3acd072012-02-25 17:03:10 -0800223
224 // Having a range of 0 is legal
225 if (isRange && (dInsn->vA > 0)) {
226 /*
227 * Bit of ugliness here. We're going generate a mem copy loop
228 * on the register range, but it is possible that some regs
229 * in the range have been promoted. This is unlikely, but
230 * before generating the copy, we'll just force a flush
231 * of any regs in the source range that have been promoted to
232 * home location.
233 */
234 for (unsigned int i = 0; i < dInsn->vA; i++) {
235 RegLocation loc = oatUpdateLoc(cUnit,
236 oatGetSrc(cUnit, mir, i));
237 if (loc.location == kLocPhysReg) {
238 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
239 loc.lowReg, kWord);
240 }
241 }
242 /*
243 * TUNING note: generated code here could be much improved, but
244 * this is an uncommon operation and isn't especially performance
245 * critical.
246 */
247 int rSrc = oatAllocTemp(cUnit);
248 int rDst = oatAllocTemp(cUnit);
249 int rIdx = oatAllocTemp(cUnit);
buzbee31a4a6f2012-02-28 15:36:15 -0800250 int rVal = oatAllocTemp(cUnit);
buzbeee3acd072012-02-25 17:03:10 -0800251 // Set up source pointer
252 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
253 opRegRegImm(cUnit, kOpAdd, rSrc, rSP,
254 oatSRegOffset(cUnit, rlFirst.sRegLow));
255 // Set up the target pointer
buzbee31a4a6f2012-02-28 15:36:15 -0800256 opRegRegImm(cUnit, kOpAdd, rDst, r_V0,
buzbeee3acd072012-02-25 17:03:10 -0800257 Array::DataOffset().Int32Value());
258 // Set up the loop counter (known to be > 0)
259 loadConstant(cUnit, rIdx, dInsn->vA - 1);
260 // Generate the copy loop. Going backwards for convenience
buzbee31a4a6f2012-02-28 15:36:15 -0800261 MipsLIR* target = newLIR0(cUnit, kPseudoTargetLabel);
buzbeee3acd072012-02-25 17:03:10 -0800262 target->defMask = ENCODE_ALL;
263 // Copy next element
264 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
265 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
buzbee31a4a6f2012-02-28 15:36:15 -0800266 opRegImm(cUnit, kOpSub, rIdx, 1);
267 MipsLIR* branch = opCompareBranchCC(cUnit, kMipsCondGe, rIdx, r_ZERO);
buzbeee3acd072012-02-25 17:03:10 -0800268 branch->generic.target = (LIR*)target;
269 } else if (!isRange) {
270 // TUNING: interleave
271 for (unsigned int i = 0; i < dInsn->vA; i++) {
272 RegLocation rlArg = loadValue(cUnit,
273 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbee31a4a6f2012-02-28 15:36:15 -0800274 storeBaseDisp(cUnit, r_V0,
buzbeee3acd072012-02-25 17:03:10 -0800275 Array::DataOffset().Int32Value() +
276 i * 4, rlArg.lowReg, kWord);
277 // If the loadValue caused a temp to be allocated, free it
278 if (oatIsTemp(cUnit, rlArg.lowReg)) {
279 oatFreeTemp(cUnit, rlArg.lowReg);
280 }
281 }
282 }
buzbeee3acd072012-02-25 17:03:10 -0800283}
284
285STATIC void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
286 bool isLongOrDouble, bool isObject)
287{
buzbeee3acd072012-02-25 17:03:10 -0800288 int fieldOffset;
289 int ssbIndex;
290 bool isVolatile;
291 bool isReferrersClass;
292 uint32_t fieldIdx = mir->dalvikInsn.vB;
293 bool fastPath =
294 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, cUnit,
295 fieldOffset, ssbIndex,
296 isReferrersClass, isVolatile, true);
297 if (fastPath && !SLOW_FIELD_PATH) {
298 DCHECK_GE(fieldOffset, 0);
299 int rBase;
300 int rMethod;
301 if (isReferrersClass) {
302 // Fast path, static storage base is this method's class
303 rMethod = loadCurrMethod(cUnit);
304 rBase = oatAllocTemp(cUnit);
305 loadWordDisp(cUnit, rMethod,
306 Method::DeclaringClassOffset().Int32Value(), rBase);
307 } else {
308 // Medium path, static storage base in a different class which
309 // requires checks that the other class is initialized.
310 DCHECK_GE(ssbIndex, 0);
311 // May do runtime call so everything to home locations.
312 oatFlushAllRegs(cUnit);
313 // Using fixed register to sync with possible call to runtime
314 // support.
buzbee31a4a6f2012-02-28 15:36:15 -0800315 oatLockCallTemps(cUnit);
buzbeee3acd072012-02-25 17:03:10 -0800316 rMethod = r1;
317 oatLockTemp(cUnit, rMethod);
318 loadCurrMethodDirect(cUnit, rMethod);
319 rBase = r0;
320 oatLockTemp(cUnit, rBase);
321 loadWordDisp(cUnit, rMethod,
322 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
323 rBase);
324 loadWordDisp(cUnit, rBase,
325 Array::DataOffset().Int32Value() + sizeof(int32_t*) *
326 ssbIndex, rBase);
327 // rBase now points at appropriate static storage base (Class*)
328 // or NULL if not initialized. Check for NULL and call helper if NULL.
329 // TUNING: fast path should fall through
buzbee31a4a6f2012-02-28 15:36:15 -0800330 MipsLIR* branchOver = opCmpImmBranchCC(cUnit, kMipsCondNe,
331 rBase, 0);
buzbeee3acd072012-02-25 17:03:10 -0800332 loadWordDisp(cUnit, rSELF,
333 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
334 loadConstant(cUnit, r0, ssbIndex);
335 callRuntimeHelper(cUnit, rLR);
buzbee31a4a6f2012-02-28 15:36:15 -0800336 MipsLIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
buzbeee3acd072012-02-25 17:03:10 -0800337 skipTarget->defMask = ENCODE_ALL;
338 branchOver->generic.target = (LIR*)skipTarget;
339 }
340 // rBase now holds static storage base
341 oatFreeTemp(cUnit, rMethod);
342 if (isLongOrDouble) {
343 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
344 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
345 } else {
346 rlSrc = oatGetSrc(cUnit, mir, 0);
347 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
348 }
349 if (isVolatile) {
350 oatGenMemBarrier(cUnit, kST);
351 }
352 if (isLongOrDouble) {
353 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
354 rlSrc.highReg);
355 } else {
356 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
357 }
358 if (isVolatile) {
359 oatGenMemBarrier(cUnit, kSY);
360 }
361 if (isObject) {
362 markGCCard(cUnit, rlSrc.lowReg, rBase);
363 }
364 oatFreeTemp(cUnit, rBase);
365 } else {
366 oatFlushAllRegs(cUnit); // Everything to home locations
367 int setterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pSet64Static) :
368 (isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
369 : OFFSETOF_MEMBER(Thread, pSet32Static));
370 loadWordDisp(cUnit, rSELF, setterOffset, rLR);
371 loadConstant(cUnit, r0, fieldIdx);
372 if (isLongOrDouble) {
373 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
374 } else {
375 loadValueDirect(cUnit, rlSrc, r1);
376 }
377 callRuntimeHelper(cUnit, rLR);
378 }
buzbeee3acd072012-02-25 17:03:10 -0800379}
380
381STATIC void genSget(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
382 bool isLongOrDouble, bool isObject)
383{
384 UNIMPLEMENTED(FATAL) << "Needs mips version";
385#if 0
386 int fieldOffset;
387 int ssbIndex;
388 bool isVolatile;
389 bool isReferrersClass;
390 uint32_t fieldIdx = mir->dalvikInsn.vB;
391 bool fastPath =
392 cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, cUnit,
393 fieldOffset, ssbIndex,
394 isReferrersClass, isVolatile, false);
395 if (fastPath && !SLOW_FIELD_PATH) {
396 DCHECK_GE(fieldOffset, 0);
397 int rBase;
398 int rMethod;
399 if (isReferrersClass) {
400 // Fast path, static storage base is this method's class
401 rMethod = loadCurrMethod(cUnit);
402 rBase = oatAllocTemp(cUnit);
403 loadWordDisp(cUnit, rMethod,
404 Method::DeclaringClassOffset().Int32Value(), rBase);
405 } else {
406 // Medium path, static storage base in a different class which
407 // requires checks that the other class is initialized
408 DCHECK_GE(ssbIndex, 0);
409 // May do runtime call so everything to home locations.
410 oatFlushAllRegs(cUnit);
411 // Using fixed register to sync with possible call to runtime
412 // support
413 rMethod = r1;
414 oatLockTemp(cUnit, rMethod);
415 loadCurrMethodDirect(cUnit, rMethod);
416 rBase = r0;
417 oatLockTemp(cUnit, rBase);
418 loadWordDisp(cUnit, rMethod,
419 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
420 rBase);
421 loadWordDisp(cUnit, rBase,
422 Array::DataOffset().Int32Value() + sizeof(int32_t*) * ssbIndex,
423 rBase);
424 // rBase now points at appropriate static storage base (Class*)
425 // or NULL if not initialized. Check for NULL and call helper if NULL.
426 // TUNING: fast path should fall through
buzbee31a4a6f2012-02-28 15:36:15 -0800427 MipsLIR* branchOver = opCmpImmBranchCC(cUnit, kMipsCondNe, rBase, 0);
buzbeee3acd072012-02-25 17:03:10 -0800428 loadWordDisp(cUnit, rSELF,
429 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
430 loadConstant(cUnit, r0, ssbIndex);
431 callRuntimeHelper(cUnit, rLR);
buzbee31a4a6f2012-02-28 15:36:15 -0800432 MipsLIR* skipTarget = newLIR0(cUnit, kPseudoTargetLabel);
buzbeee3acd072012-02-25 17:03:10 -0800433 skipTarget->defMask = ENCODE_ALL;
434 branchOver->generic.target = (LIR*)skipTarget;
435 }
436 // rBase now holds static storage base
437 oatFreeTemp(cUnit, rMethod);
438 rlDest = isLongOrDouble ? oatGetDestWide(cUnit, mir, 0, 1)
439 : oatGetDest(cUnit, mir, 0);
440 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
441 if (isVolatile) {
442 oatGenMemBarrier(cUnit, kSY);
443 }
444 if (isLongOrDouble) {
445 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
446 rlResult.highReg, INVALID_SREG);
447 } else {
448 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
449 }
450 oatFreeTemp(cUnit, rBase);
451 if (isLongOrDouble) {
452 storeValueWide(cUnit, rlDest, rlResult);
453 } else {
454 storeValue(cUnit, rlDest, rlResult);
455 }
456 } else {
457 oatFlushAllRegs(cUnit); // Everything to home locations
458 int getterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pGet64Static) :
459 (isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
460 : OFFSETOF_MEMBER(Thread, pGet32Static));
461 loadWordDisp(cUnit, rSELF, getterOffset, rLR);
462 loadConstant(cUnit, r0, fieldIdx);
463 callRuntimeHelper(cUnit, rLR);
464 if (isLongOrDouble) {
465 RegLocation rlResult = oatGetReturnWide(cUnit);
466 storeValueWide(cUnit, rlDest, rlResult);
467 } else {
468 RegLocation rlResult = oatGetReturn(cUnit);
469 storeValue(cUnit, rlDest, rlResult);
470 }
471 }
472#endif
473}
474
475typedef int (*NextCallInsn)(CompilationUnit*, MIR*, int, uint32_t dexIdx,
476 uint32_t methodIdx);
477
478/*
479 * Bit of a hack here - in leiu of a real scheduling pass,
480 * emit the next instruction in static & direct invoke sequences.
481 */
482STATIC int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
483 int state, uint32_t dexIdx, uint32_t unused)
484{
485 UNIMPLEMENTED(FATAL) << "Needs mips version";
486#if 0
487 switch(state) {
488 case 0: // Get the current Method* [sets r0]
489 loadCurrMethodDirect(cUnit, r0);
490 break;
491 case 1: // Get method->code_and_direct_methods_
492 loadWordDisp(cUnit, r0,
493 Method::GetDexCacheCodeAndDirectMethodsOffset().Int32Value(),
494 r0);
495 break;
496 case 2: // Grab target method* and target code_
497 loadWordDisp(cUnit, r0,
498 CodeAndDirectMethods::CodeOffsetInBytes(dexIdx), rLR);
499 loadWordDisp(cUnit, r0,
500 CodeAndDirectMethods::MethodOffsetInBytes(dexIdx), r0);
501 break;
502 default:
503 return -1;
504 }
505#endif
506 return state + 1;
507}
508
509/*
510 * Bit of a hack here - in leiu of a real scheduling pass,
511 * emit the next instruction in a virtual invoke sequence.
512 * We can use rLR as a temp prior to target address loading
513 * Note also that we'll load the first argument ("this") into
514 * r1 here rather than the standard loadArgRegs.
515 */
516STATIC int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
517 int state, uint32_t dexIdx, uint32_t methodIdx)
518{
519 UNIMPLEMENTED(FATAL) << "Needs mips version";
520#if 0
521 RegLocation rlArg;
522 /*
523 * This is the fast path in which the target virtual method is
524 * fully resolved at compile time.
525 */
526 switch(state) {
527 case 0: // Get "this" [set r1]
528 rlArg = oatGetSrc(cUnit, mir, 0);
529 loadValueDirectFixed(cUnit, rlArg, r1);
530 break;
531 case 1: // Is "this" null? [use r1]
532 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
533 // get this->klass_ [use r1, set rLR]
534 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
535 break;
536 case 2: // Get this->klass_->vtable [usr rLR, set rLR]
537 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
538 break;
539 case 3: // Get target method [use rLR, set r0]
540 loadWordDisp(cUnit, rLR, (methodIdx * 4) +
541 Array::DataOffset().Int32Value(), r0);
542 break;
543 case 4: // Get the target compiled code address [uses r0, sets rLR]
544 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
545 break;
546 default:
547 return -1;
548 }
549#endif
550 return state + 1;
551}
552
553/*
554 * Interleave launch code for INVOKE_SUPER. See comments
555 * for nextVCallIns.
556 */
557STATIC int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
558 int state, uint32_t dexIdx, uint32_t methodIdx)
559{
560 UNIMPLEMENTED(FATAL) << "Needs mips version";
561#if 0
562 /*
563 * This is the fast path in which the target virtual method is
564 * fully resolved at compile time. Note also that this path assumes
565 * that the check to verify that the target method index falls
566 * within the size of the super's vtable has been done at compile-time.
567 */
568 RegLocation rlArg;
569 switch(state) {
570 case 0: // Get current Method* [set r0]
571 loadCurrMethodDirect(cUnit, r0);
572 // Load "this" [set r1]
573 rlArg = oatGetSrc(cUnit, mir, 0);
574 loadValueDirectFixed(cUnit, rlArg, r1);
575 // Get method->declaring_class_ [use r0, set rLR]
576 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
577 rLR);
578 // Is "this" null? [use r1]
579 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
580 break;
581 case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
582 loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
583 rLR);
584 break;
585 case 2: // Get ...->super_class_->vtable [u/s rLR]
586 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
587 break;
588 case 3: // Get target method [use rLR, set r0]
589 loadWordDisp(cUnit, rLR, (methodIdx * 4) +
590 Array::DataOffset().Int32Value(), r0);
591 break;
592 case 4: // Get the target compiled code address [uses r0, sets rLR]
593 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
594 break;
595 default:
596 return -1;
597 }
598#endif
599 return state + 1;
600}
601
602STATIC int nextInvokeInsnSP(CompilationUnit* cUnit, MIR* mir, int trampoline,
603 int state, uint32_t dexIdx, uint32_t methodIdx)
604{
605 UNIMPLEMENTED(FATAL) << "Needs mips version";
606#if 0
607 /*
608 * This handles the case in which the base method is not fully
609 * resolved at compile time, we bail to a runtime helper.
610 */
611 if (state == 0) {
612 // Load trampoline target
613 loadWordDisp(cUnit, rSELF, trampoline, rLR);
614 // Load r0 with method index
615 loadConstant(cUnit, r0, dexIdx);
616 return 1;
617 }
618#endif
619 return -1;
620}
621
622STATIC int nextStaticCallInsnSP(CompilationUnit* cUnit, MIR* mir,
623 int state, uint32_t dexIdx, uint32_t methodIdx)
624{
625 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeStaticTrampolineWithAccessCheck);
626 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
627}
628
629STATIC int nextDirectCallInsnSP(CompilationUnit* cUnit, MIR* mir,
630 int state, uint32_t dexIdx, uint32_t methodIdx)
631{
632 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeDirectTrampolineWithAccessCheck);
633 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
634}
635
636STATIC int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
637 int state, uint32_t dexIdx, uint32_t methodIdx)
638{
639 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeSuperTrampolineWithAccessCheck);
640 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
641}
642
643STATIC int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
644 int state, uint32_t dexIdx, uint32_t methodIdx)
645{
646 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeVirtualTrampolineWithAccessCheck);
647 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
648}
649
650/*
651 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
652 * which will locate the target and continue on via a tail call.
653 */
654STATIC int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
655 int state, uint32_t dexIdx, uint32_t unused)
656{
657 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline);
658 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
659}
660
661STATIC int nextInterfaceCallInsnWithAccessCheck(CompilationUnit* cUnit,
662 MIR* mir, int state,
663 uint32_t dexIdx,
664 uint32_t unused)
665{
666 int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampolineWithAccessCheck);
667 return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
668}
669
670STATIC int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
671 DecodedInstruction* dInsn, int callState,
672 NextCallInsn nextCallInsn, uint32_t dexIdx,
673 uint32_t methodIdx, bool skipThis)
674{
675 UNIMPLEMENTED(FATAL) << "Needs mips version";
676#if 0
677 int nextReg = r1;
678 int nextArg = 0;
679 if (skipThis) {
680 nextReg++;
681 nextArg++;
682 }
683 for (; (nextReg <= r3) && (nextArg < mir->ssaRep->numUses); nextReg++) {
684 RegLocation rlArg = oatGetRawSrc(cUnit, mir, nextArg++);
685 rlArg = oatUpdateRawLoc(cUnit, rlArg);
686 if (rlArg.wide && (nextReg <= r2)) {
687 loadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
688 nextReg++;
689 nextArg++;
690 } else {
691 rlArg.wide = false;
692 loadValueDirectFixed(cUnit, rlArg, nextReg);
693 }
694 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
695 }
696#endif
697 return callState;
698}
699
700/*
701 * Load up to 5 arguments, the first three of which will be in
702 * r1 .. r3. On entry r0 contains the current method pointer,
703 * and as part of the load sequence, it must be replaced with
704 * the target method pointer. Note, this may also be called
705 * for "range" variants if the number of arguments is 5 or fewer.
706 */
707STATIC int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
708 DecodedInstruction* dInsn, int callState,
709 MipsLIR** pcrLabel, NextCallInsn nextCallInsn,
710 uint32_t dexIdx, uint32_t methodIdx,
711 bool skipThis)
712{
713 UNIMPLEMENTED(FATAL) << "Needs mips version";
714#if 0
715 RegLocation rlArg;
716
717 /* If no arguments, just return */
718 if (dInsn->vA == 0)
719 return callState;
720
721 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
722
723 DCHECK_LE(dInsn->vA, 5U);
724 if (dInsn->vA > 3) {
725 uint32_t nextUse = 3;
726 //Detect special case of wide arg spanning arg3/arg4
727 RegLocation rlUse0 = oatGetRawSrc(cUnit, mir, 0);
728 RegLocation rlUse1 = oatGetRawSrc(cUnit, mir, 1);
729 RegLocation rlUse2 = oatGetRawSrc(cUnit, mir, 2);
730 if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
731 rlUse2.wide) {
732 int reg;
733 // Wide spans, we need the 2nd half of uses[2].
734 rlArg = oatUpdateLocWide(cUnit, rlUse2);
735 if (rlArg.location == kLocPhysReg) {
736 reg = rlArg.highReg;
737 } else {
738 // r2 & r3 can safely be used here
739 reg = r3;
740 loadWordDisp(cUnit, rSP,
741 oatSRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
742 callState = nextCallInsn(cUnit, mir, callState, dexIdx,
743 methodIdx);
744 }
745 storeBaseDisp(cUnit, rSP, (nextUse + 1) * 4, reg, kWord);
746 storeBaseDisp(cUnit, rSP, 16 /* (3+1)*4 */, reg, kWord);
747 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
748 nextUse++;
749 }
750 // Loop through the rest
751 while (nextUse < dInsn->vA) {
752 int lowReg;
753 int highReg;
754 rlArg = oatGetRawSrc(cUnit, mir, nextUse);
755 rlArg = oatUpdateRawLoc(cUnit, rlArg);
756 if (rlArg.location == kLocPhysReg) {
757 lowReg = rlArg.lowReg;
758 highReg = rlArg.highReg;
759 } else {
760 lowReg = r2;
761 highReg = r3;
762 if (rlArg.wide) {
763 loadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
764 } else {
765 loadValueDirectFixed(cUnit, rlArg, lowReg);
766 }
767 callState = nextCallInsn(cUnit, mir, callState, dexIdx,
768 methodIdx);
769 }
770 int outsOffset = (nextUse + 1) * 4;
771 if (rlArg.wide) {
772 storeBaseDispWide(cUnit, rSP, outsOffset, lowReg, highReg);
773 nextUse += 2;
774 } else {
775 storeWordDisp(cUnit, rSP, outsOffset, lowReg);
776 nextUse++;
777 }
778 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
779 }
780 }
781
782 callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
783 dexIdx, methodIdx, skipThis);
784
785 if (pcrLabel) {
786 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
787 }
788#endif
789 return callState;
790}
791
792/*
793 * May have 0+ arguments (also used for jumbo). Note that
794 * source virtual registers may be in physical registers, so may
795 * need to be flushed to home location before copying. This
796 * applies to arg3 and above (see below).
797 *
798 * Two general strategies:
799 * If < 20 arguments
800 * Pass args 3-18 using vldm/vstm block copy
801 * Pass arg0, arg1 & arg2 in r1-r3
802 * If 20+ arguments
803 * Pass args arg19+ using memcpy block copy
804 * Pass arg0, arg1 & arg2 in r1-r3
805 *
806 */
807STATIC int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
808 DecodedInstruction* dInsn, int callState,
809 MipsLIR** pcrLabel, NextCallInsn nextCallInsn,
810 uint32_t dexIdx, uint32_t methodIdx,
811 bool skipThis)
812{
813 UNIMPLEMENTED(FATAL) << "Needs mips version";
814#if 0
815 int firstArg = dInsn->vC;
816 int numArgs = dInsn->vA;
817
818 // If we can treat it as non-range (Jumbo ops will use range form)
819 if (numArgs <= 5)
820 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
821 nextCallInsn, dexIdx, methodIdx,
822 skipThis);
823 /*
824 * Make sure range list doesn't span the break between in normal
825 * Dalvik vRegs and the ins.
826 */
827 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
828 int boundaryReg = cUnit->numDalvikRegisters - cUnit->numIns;
829 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
830 LOG(FATAL) << "Argument list spanned locals & args";
831 }
832
833 /*
834 * First load the non-register arguments. Both forms expect all
835 * of the source arguments to be in their home frame location, so
836 * scan the sReg names and flush any that have been promoted to
837 * frame backing storage.
838 */
839 // Scan the rest of the args - if in physReg flush to memory
840 for (int nextArg = 0; nextArg < numArgs;) {
841 RegLocation loc = oatGetRawSrc(cUnit, mir, nextArg);
842 if (loc.wide) {
843 loc = oatUpdateLocWide(cUnit, loc);
844 if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
845 storeBaseDispWide(cUnit, rSP,
846 oatSRegOffset(cUnit, loc.sRegLow),
847 loc.lowReg, loc.highReg);
848 }
849 nextArg += 2;
850 } else {
851 loc = oatUpdateLoc(cUnit, loc);
852 if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
853 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
854 loc.lowReg, kWord);
855 }
856 nextArg++;
857 }
858 }
859
860 int startOffset = oatSRegOffset(cUnit,
861 cUnit->regLocation[mir->ssaRep->uses[3]].sRegLow);
862 int outsOffset = 4 /* Method* */ + (3 * 4);
863 if (numArgs >= 20) {
864 // Generate memcpy
865 opRegRegImm(cUnit, kOpAdd, r0, rSP, outsOffset);
866 opRegRegImm(cUnit, kOpAdd, r1, rSP, startOffset);
867 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
868 loadConstant(cUnit, r2, (numArgs - 3) * 4);
869 callRuntimeHelper(cUnit, rLR);
870 // Restore Method*
871 loadCurrMethodDirect(cUnit, r0);
872 } else {
873 // Use vldm/vstm pair using r3 as a temp
874 int regsLeft = std::min(numArgs - 3, 16);
875 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
876 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
877 MipsLIR* ld = newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
878 //TUNING: loosen barrier
879 ld->defMask = ENCODE_ALL;
880 setMemRefType(ld, true /* isLoad */, kDalvikReg);
881 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
882 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
883 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
884 MipsLIR* st = newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
885 setMemRefType(st, false /* isLoad */, kDalvikReg);
886 st->defMask = ENCODE_ALL;
887 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
888 }
889
890 callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
891 dexIdx, methodIdx, skipThis);
892
893 callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
894 if (pcrLabel) {
895 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
896 }
897#endif
898 return callState;
899}
900
901// Debugging routine - if null target, branch to DebugMe
902STATIC void genShowTarget(CompilationUnit* cUnit)
903{
904 UNIMPLEMENTED(FATAL) << "Needs mips version";
905#if 0
buzbee31a4a6f2012-02-28 15:36:15 -0800906 MipsLIR* branchOver = opCmpImmBranch(cUnit, kMipsCondNe, rLR, 0);
buzbeee3acd072012-02-25 17:03:10 -0800907 loadWordDisp(cUnit, rSELF,
908 OFFSETOF_MEMBER(Thread, pDebugMe), rLR);
buzbee31a4a6f2012-02-28 15:36:15 -0800909 MipsLIR* target = newLIR0(cUnit, kPseudoTargetLabel);
buzbeee3acd072012-02-25 17:03:10 -0800910 target->defMask = -1;
911 branchOver->generic.target = (LIR*)target;
912#endif
913}
914
915STATIC void genThrowVerificationError(CompilationUnit* cUnit, MIR* mir)
916{
917 UNIMPLEMENTED(FATAL) << "Needs mips version";
918#if 0
919 loadWordDisp(cUnit, rSELF,
920 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode), rLR);
921 loadConstant(cUnit, r0, mir->dalvikInsn.vA);
922 loadConstant(cUnit, r1, mir->dalvikInsn.vB);
923 callRuntimeHelper(cUnit, rLR);
924#endif
925}
926
927STATIC void genCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
928 MIR* mir, RegLocation rlSrc1,
929 RegLocation rlSrc2, MipsLIR* labelList)
930{
931 UNIMPLEMENTED(FATAL) << "Needs mips version";
932#if 0
933 MipsConditionCode cond;
934 rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
935 rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
936 opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
937 Opcode opcode = mir->dalvikInsn.opcode;
938 switch(opcode) {
939 case OP_IF_EQ:
940 cond = kMipsCondEq;
941 break;
942 case OP_IF_NE:
943 cond = kMipsCondNe;
944 break;
945 case OP_IF_LT:
946 cond = kMipsCondLt;
947 break;
948 case OP_IF_GE:
949 cond = kMipsCondGe;
950 break;
951 case OP_IF_GT:
952 cond = kMipsCondGt;
953 break;
954 case OP_IF_LE:
955 cond = kMipsCondLe;
956 break;
957 default:
958 cond = (MipsConditionCode)0;
959 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
960 }
961 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
962 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
963#endif
964}
965
966STATIC void genCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
967 MIR* mir, RegLocation rlSrc,
968 MipsLIR* labelList)
969{
970 UNIMPLEMENTED(FATAL) << "Needs mips version";
971#if 0
972 MipsConditionCode cond;
973 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
974 opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
975 Opcode opcode = mir->dalvikInsn.opcode;
976 switch(opcode) {
977 case OP_IF_EQZ:
978 cond = kMipsCondEq;
979 break;
980 case OP_IF_NEZ:
981 cond = kMipsCondNe;
982 break;
983 case OP_IF_LTZ:
984 cond = kMipsCondLt;
985 break;
986 case OP_IF_GEZ:
987 cond = kMipsCondGe;
988 break;
989 case OP_IF_GTZ:
990 cond = kMipsCondGt;
991 break;
992 case OP_IF_LEZ:
993 cond = kMipsCondLe;
994 break;
995 default:
996 cond = (MipsConditionCode)0;
997 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
998 }
999 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1000 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1001#endif
1002}
1003
1004STATIC void genIntToLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
1005 RegLocation rlSrc)
1006{
1007 UNIMPLEMENTED(FATAL) << "Needs mips version";
1008#if 0
1009 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1010 if (rlSrc.location == kLocPhysReg) {
1011 genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
1012 } else {
1013 loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
1014 }
1015 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1016 rlResult.lowReg, 31);
1017 storeValueWide(cUnit, rlDest, rlResult);
1018#endif
1019}
1020
1021STATIC void genIntNarrowing(CompilationUnit* cUnit, MIR* mir,
1022 RegLocation rlDest, RegLocation rlSrc)
1023{
1024 UNIMPLEMENTED(FATAL) << "Needs mips version";
1025#if 0
1026 rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
1027 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1028 OpKind op = kOpInvalid;
1029 switch(mir->dalvikInsn.opcode) {
1030 case OP_INT_TO_BYTE:
1031 op = kOp2Byte;
1032 break;
1033 case OP_INT_TO_SHORT:
1034 op = kOp2Short;
1035 break;
1036 case OP_INT_TO_CHAR:
1037 op = kOp2Char;
1038 break;
1039 default:
1040 LOG(ERROR) << "Bad int conversion type";
1041 }
1042 opRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
1043 storeValue(cUnit, rlDest, rlResult);
1044#endif
1045}
1046
1047/*
1048 * If there are any ins passed in registers that have not been promoted
1049 * to a callee-save register, flush them to the frame. Perform intial
1050 * assignment of promoted arguments.
1051 */
1052STATIC void flushIns(CompilationUnit* cUnit)
1053{
1054 UNIMPLEMENTED(FATAL) << "Needs mips version";
1055#if 0
1056 if (cUnit->numIns == 0)
1057 return;
1058 int firstArgReg = r1;
1059 int lastArgReg = r3;
1060 int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
1061 /*
1062 * Arguments passed in registers should be flushed
1063 * to their backing locations in the frame for now.
1064 * Also, we need to do initial assignment for promoted
1065 * arguments. NOTE: an older version of dx had an issue
1066 * in which it would reuse static method argument registers.
1067 * This could result in the same Dalvik virtual register
1068 * being promoted to both core and fp regs. In those
1069 * cases, copy argument to both. This will be uncommon
1070 * enough that it isn't worth attempting to optimize.
1071 */
1072 for (int i = 0; i < cUnit->numIns; i++) {
1073 PromotionMap vMap = cUnit->promotionMap[startVReg + i];
1074 if (i <= (lastArgReg - firstArgReg)) {
1075 // If arriving in register
1076 if (vMap.coreLocation == kLocPhysReg) {
1077 genRegCopy(cUnit, vMap.coreReg, firstArgReg + i);
1078 }
1079 if (vMap.fpLocation == kLocPhysReg) {
1080 genRegCopy(cUnit, vMap.fpReg, firstArgReg + i);
1081 }
1082 // Also put a copy in memory in case we're partially promoted
1083 storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
1084 firstArgReg + i, kWord);
1085 } else {
1086 // If arriving in frame & promoted
1087 if (vMap.coreLocation == kLocPhysReg) {
1088 loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
1089 vMap.coreReg);
1090 }
1091 if (vMap.fpLocation == kLocPhysReg) {
1092 loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
1093 vMap.fpReg);
1094 }
1095 }
1096 }
1097#endif
1098}
1099
1100STATIC void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
1101{
1102 UNIMPLEMENTED(FATAL) << "Needs mips version";
1103#if 0
1104 int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
1105 /*
1106 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1107 * mechanism know so it doesn't try to use any of them when
1108 * expanding the frame or flushing. This leaves the utility
1109 * code with a single temp: r12. This should be enough.
1110 */
1111 oatLockTemp(cUnit, r0);
1112 oatLockTemp(cUnit, r1);
1113 oatLockTemp(cUnit, r2);
1114 oatLockTemp(cUnit, r3);
1115
1116 /*
1117 * We can safely skip the stack overflow check if we're
1118 * a leaf *and* our frame size < fudge factor.
1119 */
1120 bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
1121 ((size_t)cUnit->frameSize <
1122 Thread::kStackOverflowReservedBytes));
buzbee31a4a6f2012-02-28 15:36:15 -08001123 newLIR0(cUnit, kPseudoMethodEntry);
buzbeee3acd072012-02-25 17:03:10 -08001124 if (!skipOverflowCheck) {
1125 /* Load stack limit */
1126 loadWordDisp(cUnit, rSELF,
1127 Thread::StackEndOffset().Int32Value(), r12);
1128 }
1129 /* Spill core callee saves */
1130 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1131 /* Need to spill any FP regs? */
1132 if (cUnit->numFPSpills) {
1133 /*
1134 * NOTE: fp spills are a little different from core spills in that
1135 * they are pushed as a contiguous block. When promoting from
1136 * the fp set, we must allocate all singles from s16..highest-promoted
1137 */
1138 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1139 }
1140 if (!skipOverflowCheck) {
1141 opRegRegImm(cUnit, kOpSub, rLR, rSP,
1142 cUnit->frameSize - (spillCount * 4));
1143 genRegRegCheck(cUnit, kMipsCondCc, rLR, r12, NULL,
1144 kMipsThrowStackOverflow);
1145 genRegCopy(cUnit, rSP, rLR); // Establish stack
1146 } else {
1147 opRegImm(cUnit, kOpSub, rSP,
1148 cUnit->frameSize - (spillCount * 4));
1149 }
1150 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1151 flushIns(cUnit);
1152
1153 if (cUnit->genDebugger) {
1154 // Refresh update debugger callout
1155 loadWordDisp(cUnit, rSELF,
1156 OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
1157 genDebuggerUpdate(cUnit, DEBUGGER_METHOD_ENTRY);
1158 }
1159
1160 oatFreeTemp(cUnit, r0);
1161 oatFreeTemp(cUnit, r1);
1162 oatFreeTemp(cUnit, r2);
1163 oatFreeTemp(cUnit, r3);
1164#endif
1165}
1166
1167STATIC void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
1168{
1169 UNIMPLEMENTED(FATAL) << "Needs mips version";
1170#if 0
1171 int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
1172 /*
1173 * In the exit path, r0/r1 are live - make sure they aren't
1174 * allocated by the register utilities as temps.
1175 */
1176 oatLockTemp(cUnit, r0);
1177 oatLockTemp(cUnit, r1);
1178
buzbee31a4a6f2012-02-28 15:36:15 -08001179 newLIR0(cUnit, kPseudoMethodExit);
buzbeee3acd072012-02-25 17:03:10 -08001180 /* If we're compiling for the debugger, generate an update callout */
1181 if (cUnit->genDebugger) {
1182 genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
1183 }
1184 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (spillCount * 4));
1185 /* Need to restore any FP callee saves? */
1186 if (cUnit->numFPSpills) {
1187 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1188 }
1189 if (cUnit->coreSpillMask & (1 << rLR)) {
1190 /* Unspill rLR to rPC */
1191 cUnit->coreSpillMask &= ~(1 << rLR);
1192 cUnit->coreSpillMask |= (1 << rPC);
1193 }
1194 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1195 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1196 /* We didn't pop to rPC, so must do a bv rLR */
1197 newLIR1(cUnit, kThumbBx, rLR);
1198 }
1199#endif
1200}
1201
1202/*
1203 * Nop any unconditional branches that go to the next instruction.
1204 * Note: new redundant branches may be inserted later, and we'll
1205 * use a check in final instruction assembly to nop those out.
1206 */
1207void removeRedundantBranches(CompilationUnit* cUnit)
1208{
1209 UNIMPLEMENTED(FATAL) << "Needs mips version";
1210#if 0
1211 MipsLIR* thisLIR;
1212
1213 for (thisLIR = (MipsLIR*) cUnit->firstLIRInsn;
1214 thisLIR != (MipsLIR*) cUnit->lastLIRInsn;
1215 thisLIR = NEXT_LIR(thisLIR)) {
1216
1217 /* Branch to the next instruction */
1218 if ((thisLIR->opcode == kThumbBUncond) ||
1219 (thisLIR->opcode == kThumb2BUncond)) {
1220 MipsLIR* nextLIR = thisLIR;
1221
1222 while (true) {
1223 nextLIR = NEXT_LIR(nextLIR);
1224
1225 /*
1226 * Is the branch target the next instruction?
1227 */
1228 if (nextLIR == (MipsLIR*) thisLIR->generic.target) {
1229 thisLIR->flags.isNop = true;
1230 break;
1231 }
1232
1233 /*
1234 * Found real useful stuff between the branch and the target.
1235 * Need to explicitly check the lastLIRInsn here because it
1236 * might be the last real instruction.
1237 */
1238 if (!isPseudoOpcode(nextLIR->opcode) ||
1239 (nextLIR = (MipsLIR*) cUnit->lastLIRInsn))
1240 break;
1241 }
1242 }
1243 }
1244#endif
1245}
1246
1247STATIC void handleSuspendLaunchpads(CompilationUnit *cUnit)
1248{
1249 UNIMPLEMENTED(FATAL) << "Needs mips version";
1250#if 0
1251 MipsLIR** suspendLabel =
1252 (MipsLIR **) cUnit->suspendLaunchpads.elemList;
1253 int numElems = cUnit->suspendLaunchpads.numUsed;
1254
1255 for (int i = 0; i < numElems; i++) {
1256 /* TUNING: move suspend count load into helper */
1257 MipsLIR* lab = suspendLabel[i];
1258 MipsLIR* resumeLab = (MipsLIR*)lab->operands[0];
1259 cUnit->currentDalvikOffset = lab->operands[1];
1260 oatAppendLIR(cUnit, (LIR *)lab);
1261 loadWordDisp(cUnit, rSELF,
1262 OFFSETOF_MEMBER(Thread, pTestSuspendFromCode), rLR);
1263 if (!cUnit->genDebugger) {
1264 // use rSUSPEND for suspend count
1265 loadWordDisp(cUnit, rSELF,
1266 Thread::SuspendCountOffset().Int32Value(), rSUSPEND);
1267 }
1268 opReg(cUnit, kOpBlx, rLR);
1269 if ( cUnit->genDebugger) {
1270 // use rSUSPEND for update debugger
1271 loadWordDisp(cUnit, rSELF,
1272 OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
1273 }
1274 genUnconditionalBranch(cUnit, resumeLab);
1275 }
1276#endif
1277}
1278
1279STATIC void handleThrowLaunchpads(CompilationUnit *cUnit)
1280{
1281 UNIMPLEMENTED(FATAL) << "Needs mips version";
1282#if 0
1283 MipsLIR** throwLabel =
1284 (MipsLIR **) cUnit->throwLaunchpads.elemList;
1285 int numElems = cUnit->throwLaunchpads.numUsed;
1286 int i;
1287
1288 for (i = 0; i < numElems; i++) {
1289 MipsLIR* lab = throwLabel[i];
1290 cUnit->currentDalvikOffset = lab->operands[1];
1291 oatAppendLIR(cUnit, (LIR *)lab);
1292 int funcOffset = 0;
1293 int v1 = lab->operands[2];
1294 int v2 = lab->operands[3];
1295 switch(lab->operands[0]) {
1296 case kMipsThrowNullPointer:
1297 funcOffset = OFFSETOF_MEMBER(Thread, pThrowNullPointerFromCode);
1298 break;
1299 case kMipsThrowArrayBounds:
1300 if (v2 != r0) {
1301 genRegCopy(cUnit, r0, v1);
1302 genRegCopy(cUnit, r1, v2);
1303 } else {
1304 if (v1 == r1) {
1305 genRegCopy(cUnit, r12, v1);
1306 genRegCopy(cUnit, r1, v2);
1307 genRegCopy(cUnit, r0, r12);
1308 } else {
1309 genRegCopy(cUnit, r1, v2);
1310 genRegCopy(cUnit, r0, v1);
1311 }
1312 }
1313 funcOffset = OFFSETOF_MEMBER(Thread, pThrowArrayBoundsFromCode);
1314 break;
1315 case kMipsThrowDivZero:
1316 funcOffset = OFFSETOF_MEMBER(Thread, pThrowDivZeroFromCode);
1317 break;
1318 case kMipsThrowVerificationError:
1319 loadConstant(cUnit, r0, v1);
1320 loadConstant(cUnit, r1, v2);
1321 funcOffset =
1322 OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode);
1323 break;
1324 case kMipsThrowNegArraySize:
1325 genRegCopy(cUnit, r0, v1);
1326 funcOffset =
1327 OFFSETOF_MEMBER(Thread, pThrowNegArraySizeFromCode);
1328 break;
1329 case kMipsThrowNoSuchMethod:
1330 genRegCopy(cUnit, r0, v1);
1331 funcOffset =
1332 OFFSETOF_MEMBER(Thread, pThrowNoSuchMethodFromCode);
1333 break;
1334 case kMipsThrowStackOverflow:
1335 funcOffset =
1336 OFFSETOF_MEMBER(Thread, pThrowStackOverflowFromCode);
1337 // Restore stack alignment
1338 opRegImm(cUnit, kOpAdd, rSP,
1339 (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
1340 break;
1341 default:
1342 LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
1343 }
1344 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
1345 callRuntimeHelper(cUnit, rLR);
1346 }
1347#endif
1348}
1349
1350/* Common initialization routine for an architecture family */
1351bool oatArchInit()
1352{
1353 int i;
1354
1355 for (i = 0; i < kMipsLast; i++) {
1356 if (EncodingMap[i].opcode != i) {
1357 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1358 " is wrong: expecting " << i << ", seeing " <<
1359 (int)EncodingMap[i].opcode;
1360 }
1361 }
1362
1363 return oatArchVariantInit();
1364}
1365
1366/* Needed by the Assembler */
1367void oatSetupResourceMasks(MipsLIR* lir)
1368{
1369 setupResourceMasks(lir);
1370}
1371
1372} // namespace art