blob: ea0272172cafdca8749a93858353f51eec25a828 [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
buzbee67bf8852011-08-17 17:51:35 -070017static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
18 INVALID_REG, INVALID_SREG, 0,
19 kLocDalvikFrame, INVALID_REG, INVALID_REG,
20 INVALID_OFFSET};
21static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
22static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
23
buzbeedfd3d702011-08-28 12:56:51 -070024/*
25 * Let helper function take care of everything. Will call
26 * Array::AllocFromCode(type_idx, method, count);
27 * Note: AllocFromCode will handle checks for errNegativeArraySize.
28 */
buzbee67bf8852011-08-17 17:51:35 -070029static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
30 RegLocation rlSrc)
31{
buzbeedfd3d702011-08-28 12:56:51 -070032 oatFlushAllRegs(cUnit); /* Everything to home location */
33 loadWordDisp(cUnit, rSELF,
34 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
35 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
36 loadConstant(cUnit, r0, mir->dalvikInsn.vC); // arg0 <- type_id
37 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
38 opReg(cUnit, kOpBlx, rLR);
39 oatClobberCallRegs(cUnit);
40 RegLocation rlResult = oatGetReturn(cUnit);
41 storeValue(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -070042}
43
44/*
45 * Similar to genNewArray, but with post-allocation initialization.
46 * Verifier guarantees we're dealing with an array class. Current
47 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
48 * Current code also throws internal unimp if not 'L', '[' or 'I'.
49 */
50static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
51{
52 DecodedInstruction* dInsn = &mir->dalvikInsn;
53 int elems;
buzbeedfd3d702011-08-28 12:56:51 -070054 int typeId;
buzbee67bf8852011-08-17 17:51:35 -070055 if (isRange) {
56 elems = dInsn->vA;
buzbeedfd3d702011-08-28 12:56:51 -070057 typeId = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -070058 } else {
59 elems = dInsn->vB;
buzbeedfd3d702011-08-28 12:56:51 -070060 typeId = dInsn->vC;
buzbee67bf8852011-08-17 17:51:35 -070061 }
buzbeedfd3d702011-08-28 12:56:51 -070062 oatFlushAllRegs(cUnit); /* Everything to home location */
63 // TODO: Alloc variant that checks types (see header comment) */
64 UNIMPLEMENTED(WARNING) << "Need AllocFromCode variant w/ extra checks";
65 loadWordDisp(cUnit, rSELF,
66 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
67 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
68 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
69 loadConstant(cUnit, r2, elems); // arg2 <- count
70 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -070071 /*
buzbeedfd3d702011-08-28 12:56:51 -070072 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
73 * return region. Because AllocFromCode placed the new array
74 * in r0, we'll just lock it into place. When debugger support is
75 * added, it may be necessary to additionally copy all return
76 * values to a home location in thread-local storage
buzbee67bf8852011-08-17 17:51:35 -070077 */
buzbee67bf8852011-08-17 17:51:35 -070078 oatLockTemp(cUnit, r0);
buzbeedfd3d702011-08-28 12:56:51 -070079
buzbee67bf8852011-08-17 17:51:35 -070080 // Having a range of 0 is legal
81 if (isRange && (dInsn->vA > 0)) {
82 /*
83 * Bit of ugliness here. We're going generate a mem copy loop
84 * on the register range, but it is possible that some regs
85 * in the range have been promoted. This is unlikely, but
86 * before generating the copy, we'll just force a flush
87 * of any regs in the source range that have been promoted to
88 * home location.
89 */
90 for (unsigned int i = 0; i < dInsn->vA; i++) {
91 RegLocation loc = oatUpdateLoc(cUnit,
92 oatGetSrc(cUnit, mir, i));
93 if (loc.location == kLocPhysReg) {
94 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
95 }
96 }
97 /*
98 * TUNING note: generated code here could be much improved, but
99 * this is an uncommon operation and isn't especially performance
100 * critical.
101 */
102 int rSrc = oatAllocTemp(cUnit);
103 int rDst = oatAllocTemp(cUnit);
104 int rIdx = oatAllocTemp(cUnit);
105 int rVal = rLR; // Using a lot of temps, rLR is known free here
106 // Set up source pointer
107 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
108 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
109 // Set up the target pointer
110 opRegRegImm(cUnit, kOpAdd, rDst, r0,
buzbeec143c552011-08-20 17:38:58 -0700111 Array::DataOffset().Int32Value());
buzbee67bf8852011-08-17 17:51:35 -0700112 // Set up the loop counter (known to be > 0)
113 loadConstant(cUnit, rIdx, dInsn->vA);
114 // Generate the copy loop. Going backwards for convenience
115 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
116 target->defMask = ENCODE_ALL;
117 // Copy next element
118 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
119 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
120 // Use setflags encoding here
121 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
122 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
123 branch->generic.target = (LIR*)target;
124 } else if (!isRange) {
125 // TUNING: interleave
126 for (unsigned int i = 0; i < dInsn->vA; i++) {
127 RegLocation rlArg = loadValue(cUnit,
128 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbeec143c552011-08-20 17:38:58 -0700129 storeBaseDisp(cUnit, r0,
130 Array::DataOffset().Int32Value() +
buzbee67bf8852011-08-17 17:51:35 -0700131 i * 4, rlArg.lowReg, kWord);
132 // If the loadValue caused a temp to be allocated, free it
133 if (oatIsTemp(cUnit, rlArg.lowReg)) {
134 oatFreeTemp(cUnit, rlArg.lowReg);
135 }
136 }
137 }
138}
139
140static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
141{
buzbeee1931742011-08-28 21:15:53 -0700142 bool slow_path = true;
143 bool isObject = ((mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
144 (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE));
145 UNIMPLEMENTED(WARNING) << "Implement sput fast path";
146 int funcOffset;
147 if (slow_path) {
148 if (isObject) {
149 funcOffset = OFFSETOF_MEMBER(Thread, pSetObjStatic);
150 } else {
151 funcOffset = OFFSETOF_MEMBER(Thread, pSet32Static);
152 }
153 oatFlushAllRegs(cUnit);
154 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
155 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
156 loadCurrMethodDirect(cUnit, r1);
157 loadValueDirect(cUnit, rlSrc, r2);
158 opReg(cUnit, kOpBlx, rLR);
159 oatClobberCallRegs(cUnit);
160 } else {
161 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700162#if 0
buzbee67bf8852011-08-17 17:51:35 -0700163 int valOffset = OFFSETOF_MEMBER(StaticField, value);
164 int tReg = oatAllocTemp(cUnit);
165 int objHead;
166 bool isVolatile;
167 bool isSputObject;
168 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
169 mir->meta.calleeMethod : cUnit->method;
170 void* fieldPtr = (void*)
171 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
172 Opcode opcode = mir->dalvikInsn.opcode;
173
174 if (fieldPtr == NULL) {
175 // FIXME: need to handle this case for oat();
176 UNIMPLEMENTED(FATAL);
177 }
178
179#if ANDROID_SMP != 0
180 isVolatile = (opcode == OP_SPUT_VOLATILE) ||
181 (opcode == OP_SPUT_VOLATILE_JUMBO) ||
182 (opcode == OP_SPUT_OBJECT_VOLATILE) ||
183 (opcode == OP_SPUT_OBJECT_VOLATILE_JUMBO);
buzbeec143c552011-08-20 17:38:58 -0700184 assert(isVolatile == artIsVolatileField((Field *) fieldPtr));
buzbee67bf8852011-08-17 17:51:35 -0700185#else
buzbeec143c552011-08-20 17:38:58 -0700186 isVolatile = artIsVolatileField((Field *) fieldPtr);
buzbee67bf8852011-08-17 17:51:35 -0700187#endif
188
189 isSputObject = (opcode == OP_SPUT_OBJECT) ||
190 (opcode == OP_SPUT_OBJECT_VOLATILE);
191
192 rlSrc = oatGetSrc(cUnit, mir, 0);
193 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
194 loadConstant(cUnit, tReg, (int) fieldPtr);
195 if (isSputObject) {
196 objHead = oatAllocTemp(cUnit);
197 loadWordDisp(cUnit, tReg, OFFSETOF_MEMBER(Field, clazz), objHead);
198 }
199 storeWordDisp(cUnit, tReg, valOffset ,rlSrc.lowReg);
200 oatFreeTemp(cUnit, tReg);
201 if (isVolatile) {
202 oatGenMemBarrier(cUnit, kSY);
203 }
204 if (isSputObject) {
205 /* NOTE: marking card based sfield->clazz */
206 markGCCard(cUnit, rlSrc.lowReg, objHead);
207 oatFreeTemp(cUnit, objHead);
208 }
buzbeec143c552011-08-20 17:38:58 -0700209#endif
buzbeee1931742011-08-28 21:15:53 -0700210 }
buzbee67bf8852011-08-17 17:51:35 -0700211}
212
213static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
214{
buzbeee1931742011-08-28 21:15:53 -0700215 bool slow_path = true;
216 UNIMPLEMENTED(WARNING) << "Implement sput-wide fast path";
217 int funcOffset;
218 if (slow_path) {
219 funcOffset = OFFSETOF_MEMBER(Thread, pSet64Static);
220 oatFlushAllRegs(cUnit);
221 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
222 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
223 loadCurrMethodDirect(cUnit, r1);
224 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
225 opReg(cUnit, kOpBlx, rLR);
226 oatClobberCallRegs(cUnit);
227 } else {
228 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700229#if 0
buzbee67bf8852011-08-17 17:51:35 -0700230 int tReg = oatAllocTemp(cUnit);
231 int valOffset = OFFSETOF_MEMBER(StaticField, value);
232 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
233 mir->meta.calleeMethod : cUnit->method;
234 void* fieldPtr = (void*)
235 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
236
237 if (fieldPtr == NULL) {
238 // FIXME: need to handle this case for oat();
239 UNIMPLEMENTED(FATAL);
240 }
241
242 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
243 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
244 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
245
246 storePair(cUnit, tReg, rlSrc.lowReg, rlSrc.highReg);
buzbeec143c552011-08-20 17:38:58 -0700247#endif
buzbeee1931742011-08-28 21:15:53 -0700248 }
buzbee67bf8852011-08-17 17:51:35 -0700249}
250
251
252
253static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
254 RegLocation rlResult, RegLocation rlDest)
255{
buzbeee1931742011-08-28 21:15:53 -0700256 bool slow_path = true;
257 UNIMPLEMENTED(WARNING) << "Implement sget-wide fast path";
258 int funcOffset;
259 if (slow_path) {
260 funcOffset = OFFSETOF_MEMBER(Thread, pGet64Static);
261 oatFlushAllRegs(cUnit);
262 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
263 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
264 loadCurrMethodDirect(cUnit, r1);
265 opReg(cUnit, kOpBlx, rLR);
266 RegLocation rlResult = oatGetReturnWide(cUnit);
267 storeValueWide(cUnit, rlDest, rlResult);
268 } else {
269 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700270#if 0
buzbee67bf8852011-08-17 17:51:35 -0700271 int valOffset = OFFSETOF_MEMBER(StaticField, value);
272 const Method *method = (mir->OptimizationFlags & MIR_CALLEE) ?
273 mir->meta.calleeMethod : cUnit->method;
274 void* fieldPtr = (void*)
275 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
276
277 if (fieldPtr == NULL) {
278 // FIXME: need to handle this case for oat();
279 UNIMPLEMENTED(FATAL);
280 }
281
282 int tReg = oatAllocTemp(cUnit);
283 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
284 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
285 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
286
287 loadPair(cUnit, tReg, rlResult.lowReg, rlResult.highReg);
288
289 storeValueWide(cUnit, rlDest, rlResult);
buzbeec143c552011-08-20 17:38:58 -0700290#endif
buzbeee1931742011-08-28 21:15:53 -0700291 }
buzbee67bf8852011-08-17 17:51:35 -0700292}
293
294static void genSget(CompilationUnit* cUnit, MIR* mir,
295 RegLocation rlResult, RegLocation rlDest)
296{
buzbeee1931742011-08-28 21:15:53 -0700297 bool slow_path = true;
298 bool isObject = ((mir->dalvikInsn.opcode == OP_SGET_OBJECT) ||
299 (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE));
300 UNIMPLEMENTED(WARNING) << "Implement sget fast path";
301 int funcOffset;
302 if (slow_path) {
303 if (isObject) {
304 funcOffset = OFFSETOF_MEMBER(Thread, pGetObjStatic);
305 } else {
306 funcOffset = OFFSETOF_MEMBER(Thread, pGet32Static);
307 }
308 oatFlushAllRegs(cUnit);
309 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
310 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
311 loadCurrMethodDirect(cUnit, r1);
312 opReg(cUnit, kOpBlx, rLR);
313 RegLocation rlResult = oatGetReturn(cUnit);
314 storeValue(cUnit, rlDest, rlResult);
315 } else {
316 UNIMPLEMENTED(FATAL) << "Must update for new world";
buzbeec143c552011-08-20 17:38:58 -0700317#if 0
buzbee67bf8852011-08-17 17:51:35 -0700318 int valOffset = OFFSETOF_MEMBER(StaticField, value);
319 int tReg = oatAllocTemp(cUnit);
320 bool isVolatile;
321 const Method *method = cUnit->method;
322 void* fieldPtr = (void*)
323 (method->clazz->pDvmDex->pResFields[mir->dalvikInsn.vB]);
324
325 if (fieldPtr == NULL) {
326 // FIXME: need to handle this case for oat();
327 UNIMPLEMENTED(FATAL);
328 }
329
330 /*
331 * On SMP systems, Dalvik opcodes found to be referencing
332 * volatile fields are rewritten to their _VOLATILE variant.
333 * However, this does not happen on non-SMP systems. The compiler
334 * still needs to know about volatility to avoid unsafe
335 * optimizations so we determine volatility based on either
336 * the opcode or the field access flags.
337 */
338#if ANDROID_SMP != 0
339 Opcode opcode = mir->dalvikInsn.opcode;
340 isVolatile = (opcode == OP_SGET_VOLATILE) ||
341 (opcode == OP_SGET_OBJECT_VOLATILE);
buzbeec143c552011-08-20 17:38:58 -0700342 assert(isVolatile == artIsVolatileField((Field *) fieldPtr));
buzbee67bf8852011-08-17 17:51:35 -0700343#else
buzbeec143c552011-08-20 17:38:58 -0700344 isVolatile = artIsVolatileField((Field *) fieldPtr);
buzbee67bf8852011-08-17 17:51:35 -0700345#endif
346
347 rlDest = oatGetDest(cUnit, mir, 0);
348 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
349 loadConstant(cUnit, tReg, (int) fieldPtr + valOffset);
350
351 if (isVolatile) {
352 oatGenMemBarrier(cUnit, kSY);
353 }
354 loadWordDisp(cUnit, tReg, 0, rlResult.lowReg);
355
356 storeValue(cUnit, rlDest, rlResult);
buzbeec143c552011-08-20 17:38:58 -0700357#endif
buzbeee1931742011-08-28 21:15:53 -0700358 }
buzbee67bf8852011-08-17 17:51:35 -0700359}
360
361typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int);
362
363/*
364 * Bit of a hack here - in leiu of a real scheduling pass,
365 * emit the next instruction in static & direct invoke sequences.
366 */
367static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
368 DecodedInstruction* dInsn, int state)
369{
buzbeec143c552011-08-20 17:38:58 -0700370 UNIMPLEMENTED(FATAL) << "Update with new cache model";
371#if 0
buzbee67bf8852011-08-17 17:51:35 -0700372 switch(state) {
373 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700374 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700375 break;
376 case 1: // Get the pResMethods pointer [uses r0, sets r0]
buzbeec143c552011-08-20 17:38:58 -0700377 UNIMPLEMENTED(FATAL) << "Update with new cache";
buzbee67bf8852011-08-17 17:51:35 -0700378 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
379 r0, kWord, INVALID_SREG);
380 break;
381 case 2: // Get the target Method* [uses r0, sets r0]
382 loadBaseDisp(cUnit, mir, r0, dInsn->vB * 4, r0,
383 kWord, INVALID_SREG);
384 break;
385 case 3: // Get the target compiled code address [uses r0, sets rLR]
386 loadBaseDisp(cUnit, mir, r0,
387 OFFSETOF_MEMBER(Method, compiledInsns), rLR,
388 kWord, INVALID_SREG);
389 break;
390 default:
391 return -1;
392 }
buzbeec143c552011-08-20 17:38:58 -0700393#endif
buzbee67bf8852011-08-17 17:51:35 -0700394 return state + 1;
395}
396
buzbeec5ef0462011-08-25 18:44:49 -0700397// Slow path static & direct invoke launch sequence
398static int nextSDCallInsnSP(CompilationUnit* cUnit, MIR* mir,
399 DecodedInstruction* dInsn, int state)
400{
401 switch(state) {
402 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700403 loadCurrMethodDirect(cUnit, r0);
buzbeec5ef0462011-08-25 18:44:49 -0700404 break;
405 case 1: // Get the current Method->DeclaringClass() [sets r0]
406 loadBaseDisp(cUnit, mir, r0,
407 OFFSETOF_MEMBER(art::Method, declaring_class_),
408 r0, kWord, INVALID_SREG);
409 break;
410 case 2: // Method->DeclaringClass()->GetDexCache() [sets r0]
411 loadBaseDisp(cUnit, mir, r0,
412 OFFSETOF_MEMBER(art::Class, dex_cache_), r0, kWord,
413 INVALID_SREG);
414 break;
415 case 3: // Method->DeclaringClass()->GetDexCache()->methodsObjectArr
buzbee5cd21802011-08-26 10:40:14 -0700416 loadBaseDisp(cUnit, mir, r0,
Brian Carlstrom1caa2c22011-08-28 13:02:33 -0700417 art::DexCache::ResolvedMethodsOffset().Int32Value(), r0,
buzbee5cd21802011-08-26 10:40:14 -0700418 kWord, INVALID_SREG);
buzbeec5ef0462011-08-25 18:44:49 -0700419 break;
420 case 4: // Skip past the object header
421 opRegImm(cUnit, kOpAdd, r0, art::Array::DataOffset().Int32Value());
422 break;
423 case 5: // Get the target Method* [uses r0, sets r0]
424 loadBaseDisp(cUnit, mir, r0, dInsn->vB * 4, r0,
425 kWord, INVALID_SREG);
426 break;
427 case 6: // Get the target compiled code address [uses r0, sets rLR]
428 loadBaseDisp(cUnit, mir, r0, art::Method::GetCodeOffset(), rLR,
429 kWord, INVALID_SREG);
430 break;
431 default:
432 return -1;
433 }
434 return state + 1;
435}
436
buzbee67bf8852011-08-17 17:51:35 -0700437/*
438 * Bit of a hack here - in leiu of a real scheduling pass,
439 * emit the next instruction in a virtual invoke sequence.
440 * We can use rLR as a temp prior to target address loading
441 * Note also that we'll load the first argument ("this") into
442 * r1 here rather than the standard loadArgRegs.
443 */
444static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
445 DecodedInstruction* dInsn, int state)
446{
buzbeec143c552011-08-20 17:38:58 -0700447 UNIMPLEMENTED(FATAL) << "Update with new cache model";
448#if 0
buzbee67bf8852011-08-17 17:51:35 -0700449 RegLocation rlArg;
450 switch(state) {
451 case 0: // Get the current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700452 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700453 // Load "this" [set r1]
454 rlArg = oatGetSrc(cUnit, mir, 0);
455 loadValueDirectFixed(cUnit, rlArg, r1);
456 break;
457 case 1: // Get the pResMethods pointer [use r0, set r12]
458 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
459 r12, kWord, INVALID_SREG);
460 // Is "this" null? [use r1]
461 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
462 mir->offset, NULL);
463 break;
464 case 2: // Get the base Method* [use r12, set r0]
465 loadBaseDisp(cUnit, mir, r12, dInsn->vB * 4, r0,
466 kWord, INVALID_SREG);
467 // get this->clazz [use r1, set rLR]
468 loadBaseDisp(cUnit, mir, r1, OFFSETOF_MEMBER(Object, clazz), rLR,
469 kWord, INVALID_SREG);
470 break;
471 case 3: // Get the method index [use r0, set r12]
472 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, methodIndex),
473 r12, kUnsignedHalf, INVALID_SREG);
474 // get this->clazz->vtable [use rLR, set rLR]
475 loadBaseDisp(cUnit, mir, rLR,
buzbeec143c552011-08-20 17:38:58 -0700476 OFFSETOF_MEMBER(Class, vtable), rLR, kWord,
buzbee67bf8852011-08-17 17:51:35 -0700477 INVALID_SREG);
478 break;
479 case 4: // get target Method* [use rLR, use r12, set r0]
480 loadBaseIndexed(cUnit, rLR, r12, r0, 2, kWord);
481 break;
482 case 5: // Get the target compiled code address [use r0, set rLR]
buzbeec143c552011-08-20 17:38:58 -0700483 UNIMPLEMENTED(FATAL) << "Update with new cache";
buzbee67bf8852011-08-17 17:51:35 -0700484 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
485 rLR, kWord, INVALID_SREG);
486 break;
487 default:
488 return -1;
489 }
buzbeec143c552011-08-20 17:38:58 -0700490#endif
buzbee67bf8852011-08-17 17:51:35 -0700491 return state + 1;
492}
493
buzbee7b1b86d2011-08-26 18:59:10 -0700494// Slow path sequence for virtual calls
495static int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
496 DecodedInstruction* dInsn, int state)
497{
498 RegLocation rlArg;
499 switch(state) {
500 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700501 loadCurrMethodDirect(cUnit, r0);
buzbee7b1b86d2011-08-26 18:59:10 -0700502 break;
503 case 1: // Get the current Method->DeclaringClass() [uses/sets r0]
504 loadBaseDisp(cUnit, mir, r0,
505 OFFSETOF_MEMBER(art::Method, declaring_class_),
506 r0, kWord, INVALID_SREG);
507 break;
508 case 2: // Method->DeclaringClass()->GetDexCache() [uses/sets r0]
509 loadBaseDisp(cUnit, mir, r0,
510 OFFSETOF_MEMBER(art::Class, dex_cache_), r0, kWord,
511 INVALID_SREG);
512 break;
513 case 3: // ...()->GetDexCache()->methodsObjectArr [uses/sets r0]
514 loadBaseDisp(cUnit, mir, r0,
Brian Carlstrom1caa2c22011-08-28 13:02:33 -0700515 art::DexCache::ResolvedMethodsOffset().Int32Value(), r0,
buzbee7b1b86d2011-08-26 18:59:10 -0700516 kWord, INVALID_SREG);
517 // Load "this" [set r1]
518 rlArg = oatGetSrc(cUnit, mir, 0);
519 loadValueDirectFixed(cUnit, rlArg, r1);
520 // Skip past the object header
521 opRegImm(cUnit, kOpAdd, r0, art::Array::DataOffset().Int32Value());
522 break;
523 case 4:
524 // Is "this" null? [use r1]
525 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
526 // get this->clazz [use r1, set rLR]
527 loadBaseDisp(cUnit, mir, r1, OFFSETOF_MEMBER(Object, klass_), rLR,
528 kWord, INVALID_SREG);
529 // Get the base Method* [uses r0, sets r0]
530 loadBaseDisp(cUnit, mir, r0, dInsn->vB * 4, r0,
531 kWord, INVALID_SREG);
532 // get this->clazz->vtable [use rLR, set rLR]
533 loadBaseDisp(cUnit, mir, rLR,
534 OFFSETOF_MEMBER(Class, vtable_), rLR, kWord,
535 INVALID_SREG);
536 // Get the method index [use r0, set r12]
537 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, method_index_),
538 r12, kUnsignedHalf, INVALID_SREG);
539 // Skip past the object header
540 opRegImm(cUnit, kOpAdd, rLR, art::Array::DataOffset().Int32Value());
541 // Get target Method*
542 loadBaseIndexed(cUnit, rLR, r12, r0, 2, kWord);
543 break;
544 case 5: // Get the target compiled code address [uses r0, sets rLR]
545 loadBaseDisp(cUnit, mir, r0, art::Method::GetCodeOffset(), rLR,
546 kWord, INVALID_SREG);
547 break;
548 default:
549 return -1;
550 }
551 return state + 1;
552}
553
buzbee67bf8852011-08-17 17:51:35 -0700554/* Load up to 3 arguments in r1..r3 */
555static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
556 DecodedInstruction* dInsn, int callState,
557 int *args, NextCallInsn nextCallInsn)
558{
559 for (int i = 0; i < 3; i++) {
560 if (args[i] != INVALID_REG) {
561 RegLocation rlArg = oatGetSrc(cUnit, mir, i);
562 loadValueDirectFixed(cUnit, rlArg, r1 + i);
563 callState = nextCallInsn(cUnit, mir, dInsn, callState);
564 }
565 }
566 return callState;
567}
568
569/*
570 * Interleave launch code for INVOKE_INTERFACE. The target is
571 * identified using artFindInterfaceMethodInCache(class, ref, method, dex)
572 * Note that we'll have to reload "this" following the helper call.
573 *
574 * FIXME: do we need to have artFindInterfaceMethodInCache return
575 * a NULL if not found so we can throw exception here? Otherwise,
576 * may need to pass some additional info to allow the helper function
577 * to throw on its own.
578 */
579static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
580 DecodedInstruction* dInsn, int state)
581{
buzbeec143c552011-08-20 17:38:58 -0700582 UNIMPLEMENTED(FATAL) << "Update with new cache model";
583#if 0
buzbee67bf8852011-08-17 17:51:35 -0700584 RegLocation rlArg;
585 switch(state) {
586 case 0:
587 // Load "this" [set r12]
588 rlArg = oatGetSrc(cUnit, mir, 0);
589 loadValueDirectFixed(cUnit, rlArg, r12);
590 // Get the current Method* [set arg2]
buzbeedfd3d702011-08-28 12:56:51 -0700591 loadCurrMethodDirect(cUnit, r2);
buzbee67bf8852011-08-17 17:51:35 -0700592 // Is "this" null? [use r12]
593 genNullCheck(cUnit, oatSSASrc(mir,0), r12,
594 mir->offset, NULL);
595 // Get curMethod->clazz [set arg3]
596 loadBaseDisp(cUnit, mir, r2, OFFSETOF_MEMBER(Method, clazz),
597 r3, kWord, INVALID_SREG);
598 // Load this->class [usr r12, set arg0]
buzbeec143c552011-08-20 17:38:58 -0700599 loadBaseDisp(cUnit, mir, r12, OFFSETOF_MEMBER(Class, clazz),
buzbee67bf8852011-08-17 17:51:35 -0700600 r3, kWord, INVALID_SREG);
601 // Load address of helper function
602 loadBaseDisp(cUnit, mir, rSELF,
603 OFFSETOF_MEMBER(Thread, pArtFindInterfaceMethodInCache),
604 rLR, kWord, INVALID_SREG);
605 // Get dvmDex
buzbeec143c552011-08-20 17:38:58 -0700606 loadBaseDisp(cUnit, mir, r3, OFFSETOF_MEMBER(Class, pDvmDex),
buzbee67bf8852011-08-17 17:51:35 -0700607 r3, kWord, INVALID_SREG);
608 // Load ref [set arg1]
609 loadConstant(cUnit, r1, dInsn->vB);
610 // Call out to helper, target Method returned in ret0
611 newLIR1(cUnit, kThumbBlxR, rLR);
612 break;
613 case 1: // Get the target compiled code address [use r0, set rLR]
614 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
615 rLR, kWord, INVALID_SREG);
616 default:
617 return -1;
618 }
buzbeec143c552011-08-20 17:38:58 -0700619#endif
buzbee67bf8852011-08-17 17:51:35 -0700620 return state + 1;
621}
622
623
624/*
625 * Interleave launch code for INVOKE_SUPER. See comments
626 * for nextVCallIns.
627 */
628static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
629 DecodedInstruction* dInsn, int state)
630{
buzbeec143c552011-08-20 17:38:58 -0700631 UNIMPLEMENTED(FATAL) << "Update with new cache model";
632#if 0
buzbee67bf8852011-08-17 17:51:35 -0700633 RegLocation rlArg;
634 switch(state) {
635 case 0:
636 // Get the current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700637 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700638 // Load "this" [set r1]
639 rlArg = oatGetSrc(cUnit, mir, 0);
640 loadValueDirectFixed(cUnit, rlArg, r1);
641 // Get method->clazz [use r0, set r12]
642 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, clazz),
643 r12, kWord, INVALID_SREG);
644 // Get pResmethods [use r0, set rLR]
645 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, pResMethods),
646 rLR, kWord, INVALID_SREG);
647 // Get clazz->super [use r12, set r12]
buzbeec143c552011-08-20 17:38:58 -0700648 loadBaseDisp(cUnit, mir, r12, OFFSETOF_MEMBER(Class, super),
buzbee67bf8852011-08-17 17:51:35 -0700649 r12, kWord, INVALID_SREG);
650 // Get base method [use rLR, set r0]
651 loadBaseDisp(cUnit, mir, rLR, dInsn->vB * 4, r0,
652 kWord, INVALID_SREG);
653 // Is "this" null? [use r1]
654 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
655 mir->offset, NULL);
656 // Get methodIndex [use r0, set rLR]
657 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, methodIndex),
658 rLR, kUnsignedHalf, INVALID_SREG);
659 // Get vtableCount [use r12, set r0]
660 loadBaseDisp(cUnit, mir, r12,
buzbeec143c552011-08-20 17:38:58 -0700661 OFFSETOF_MEMBER(Class, vtableCount),
buzbee67bf8852011-08-17 17:51:35 -0700662 r0, kWord, INVALID_SREG);
663 // Compare method index w/ vtable count [use r12, use rLR]
664 genRegRegCheck(cUnit, kArmCondGe, rLR, r0, mir->offset, NULL);
665 // get target Method* [use rLR, use r12, set r0]
666 loadBaseIndexed(cUnit, r0, r12, rLR, 2, kWord);
667 case 1: // Get the target compiled code address [use r0, set rLR]
668 loadBaseDisp(cUnit, mir, r0, OFFSETOF_MEMBER(Method, compiledInsns),
669 rLR, kWord, INVALID_SREG);
670 default:
671 return -1;
672 }
buzbeec143c552011-08-20 17:38:58 -0700673#endif
buzbee67bf8852011-08-17 17:51:35 -0700674 return state + 1;
675}
676
677/*
678 * Load up to 5 arguments, the first three of which will be in
679 * r1 .. r3. On entry r0 contains the current method pointer,
680 * and as part of the load sequence, it must be replaced with
681 * the target method pointer. Note, this may also be called
682 * for "range" variants if the number of arguments is 5 or fewer.
683 */
684static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
685 DecodedInstruction* dInsn, int callState,
686 ArmLIR** pcrLabel, bool isRange,
687 NextCallInsn nextCallInsn)
688{
689 RegLocation rlArg;
690 int registerArgs[3];
691
692 /* If no arguments, just return */
693 if (dInsn->vA == 0)
694 return callState;
695
696 oatLockAllTemps(cUnit);
697 callState = nextCallInsn(cUnit, mir, dInsn, callState);
698
699 /*
700 * Load frame arguments arg4 & arg5 first. Coded a little odd to
701 * pre-schedule the method pointer target.
702 */
703 for (unsigned int i=3; i < dInsn->vA; i++) {
704 int reg;
705 int arg = (isRange) ? dInsn->vC + i : i;
706 rlArg = oatUpdateLoc(cUnit, oatGetSrc(cUnit, mir, arg));
707 if (rlArg.location == kLocPhysReg) {
708 reg = rlArg.lowReg;
709 } else {
710 reg = r1;
711 loadValueDirectFixed(cUnit, rlArg, r1);
712 callState = nextCallInsn(cUnit, mir, dInsn, callState);
713 }
714 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
715 callState = nextCallInsn(cUnit, mir, dInsn, callState);
716 }
717
718 /* Load register arguments r1..r3 */
719 for (unsigned int i = 0; i < 3; i++) {
720 if (i < dInsn->vA)
721 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
722 else
723 registerArgs[i] = INVALID_REG;
724 }
725 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
726 nextCallInsn);
727
728 // Load direct & need a "this" null check?
729 if (pcrLabel) {
730 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1,
731 mir->offset, NULL);
732 }
733 return callState;
734}
735
736/*
737 * May have 0+ arguments (also used for jumbo). Note that
738 * source virtual registers may be in physical registers, so may
739 * need to be flushed to home location before copying. This
740 * applies to arg3 and above (see below).
741 *
742 * Two general strategies:
743 * If < 20 arguments
744 * Pass args 3-18 using vldm/vstm block copy
745 * Pass arg0, arg1 & arg2 in r1-r3
746 * If 20+ arguments
747 * Pass args arg19+ using memcpy block copy
748 * Pass arg0, arg1 & arg2 in r1-r3
749 *
750 */
751static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
752 DecodedInstruction* dInsn, int callState,
753 ArmLIR** pcrLabel, NextCallInsn nextCallInsn)
754{
755 int firstArg = dInsn->vC;
756 int numArgs = dInsn->vA;
757
758 // If we can treat it as non-range (Jumbo ops will use range form)
759 if (numArgs <= 5)
760 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
761 true, nextCallInsn);
762 /*
763 * Make sure range list doesn't span the break between in normal
764 * Dalvik vRegs and the ins.
765 */
766 int highestVreg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
buzbeec143c552011-08-20 17:38:58 -0700767 if (highestVreg >= cUnit->method->num_registers_ -
768 cUnit->method->num_ins_) {
buzbee67bf8852011-08-17 17:51:35 -0700769 LOG(FATAL) << "Wide argument spanned locals & args";
770 }
771
772 /*
773 * First load the non-register arguments. Both forms expect all
774 * of the source arguments to be in their home frame location, so
775 * scan the sReg names and flush any that have been promoted to
776 * frame backing storage.
777 */
778 // Scan the rest of the args - if in physReg flush to memory
779 for (int i = 4; i < numArgs; i++) {
780 RegLocation loc = oatUpdateLoc(cUnit,
781 oatGetSrc(cUnit, mir, i));
782 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
783 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
784 callState = nextCallInsn(cUnit, mir, dInsn, callState);
785 }
786 }
787
788 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
789 int outsOffset = 4 /* Method* */ + (3 * 4);
790 if (numArgs >= 20) {
791 // Generate memcpy, but first make sure all of
792 opRegRegImm(cUnit, kOpAdd, r0, rSP, startOffset);
793 opRegRegImm(cUnit, kOpAdd, r1, rSP, outsOffset);
794 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
795 loadConstant(cUnit, r2, (numArgs - 3) * 4);
796 newLIR1(cUnit, kThumbBlxR, rLR);
797 } else {
798 // Use vldm/vstm pair using r3 as a temp
buzbeec143c552011-08-20 17:38:58 -0700799 int regsLeft = std::min(numArgs - 3, 16);
buzbee67bf8852011-08-17 17:51:35 -0700800 callState = nextCallInsn(cUnit, mir, dInsn, callState);
801 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
802 newLIR3(cUnit, kThumb2Vldms, r3, fr0 & FP_REG_MASK, regsLeft);
803 callState = nextCallInsn(cUnit, mir, dInsn, callState);
804 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
805 callState = nextCallInsn(cUnit, mir, dInsn, callState);
806 newLIR3(cUnit, kThumb2Vstms, r3, fr0 & FP_REG_MASK, regsLeft);
807 callState = nextCallInsn(cUnit, mir, dInsn, callState);
808 }
809
810 // Handle the 1st 3 in r1, r2 & r3
811 for (unsigned int i = 0; i < dInsn->vA && i < 3; i++) {
812 RegLocation loc = oatGetSrc(cUnit, mir, firstArg + i);
813 loadValueDirectFixed(cUnit, loc, r1 + i);
814 callState = nextCallInsn(cUnit, mir, dInsn, callState);
815 }
816
817 // Finally, deal with the register arguments
818 // We'll be using fixed registers here
819 oatLockAllTemps(cUnit);
820 callState = nextCallInsn(cUnit, mir, dInsn, callState);
821 return callState;
822}
823
824static void genInvokeStatic(CompilationUnit* cUnit, MIR* mir)
825{
826 DecodedInstruction* dInsn = &mir->dalvikInsn;
827 int callState = 0;
buzbeec5ef0462011-08-25 18:44:49 -0700828 int fastPath = false; // TODO: set based on resolution results
829
830 NextCallInsn nextCallInsn = fastPath ? nextSDCallInsn : nextSDCallInsnSP;
831
buzbee67bf8852011-08-17 17:51:35 -0700832 if (mir->dalvikInsn.opcode == OP_INVOKE_STATIC) {
833 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, NULL,
buzbeec5ef0462011-08-25 18:44:49 -0700834 false, nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700835 } else {
836 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, NULL,
buzbeec5ef0462011-08-25 18:44:49 -0700837 nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700838 }
839 // Finish up any of the call sequence not interleaved in arg loading
840 while (callState >= 0) {
buzbeec5ef0462011-08-25 18:44:49 -0700841 callState = nextCallInsn(cUnit, mir, dInsn, callState);
buzbee67bf8852011-08-17 17:51:35 -0700842 }
843 newLIR1(cUnit, kThumbBlxR, rLR);
844}
845
846static void genInvokeDirect(CompilationUnit* cUnit, MIR* mir)
847{
848 DecodedInstruction* dInsn = &mir->dalvikInsn;
849 int callState = 0;
850 ArmLIR* nullCk;
buzbee7b1b86d2011-08-26 18:59:10 -0700851 int fastPath = false; // TODO: set based on resolution results
852
853 NextCallInsn nextCallInsn = fastPath ? nextSDCallInsn : nextSDCallInsnSP;
buzbee67bf8852011-08-17 17:51:35 -0700854 if (mir->dalvikInsn.opcode == OP_INVOKE_DIRECT)
855 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700856 false, nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700857 else
858 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700859 nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700860 // Finish up any of the call sequence not interleaved in arg loading
861 while (callState >= 0) {
buzbee7b1b86d2011-08-26 18:59:10 -0700862 callState = nextCallInsn(cUnit, mir, dInsn, callState);
buzbee67bf8852011-08-17 17:51:35 -0700863 }
864 newLIR1(cUnit, kThumbBlxR, rLR);
865}
866
867static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
868{
869 DecodedInstruction* dInsn = &mir->dalvikInsn;
870 int callState = 0;
871 ArmLIR* nullCk;
872 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
873 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState);
874 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
875 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
876 false, nextInterfaceCallInsn);
877 else
878 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
879 nextInterfaceCallInsn);
880 // Finish up any of the call sequence not interleaved in arg loading
881 while (callState >= 0) {
882 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState);
883 }
884 newLIR1(cUnit, kThumbBlxR, rLR);
885}
886
887static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
888{
889 DecodedInstruction* dInsn = &mir->dalvikInsn;
890 int callState = 0;
891 ArmLIR* nullCk;
892// FIXME - redundantly loading arg0/r1 ("this")
893 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
894 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
895 false, nextSuperCallInsn);
896 else
897 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
898 nextSuperCallInsn);
899 // Finish up any of the call sequence not interleaved in arg loading
900 while (callState >= 0) {
901 callState = nextSuperCallInsn(cUnit, mir, dInsn, callState);
902 }
903 newLIR1(cUnit, kThumbBlxR, rLR);
904}
905
906static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
907{
908 DecodedInstruction* dInsn = &mir->dalvikInsn;
909 int callState = 0;
910 ArmLIR* nullCk;
buzbee7b1b86d2011-08-26 18:59:10 -0700911 int fastPath = false; // TODO: set based on resolution results
912
913 NextCallInsn nextCallInsn = fastPath ? nextVCallInsn : nextVCallInsnSP;
914 // TODO - redundantly loading arg0/r1 ("this")
buzbee67bf8852011-08-17 17:51:35 -0700915 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
916 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700917 false, nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700918 else
919 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee7b1b86d2011-08-26 18:59:10 -0700920 nextCallInsn);
buzbee67bf8852011-08-17 17:51:35 -0700921 // Finish up any of the call sequence not interleaved in arg loading
922 while (callState >= 0) {
buzbee7b1b86d2011-08-26 18:59:10 -0700923 callState = nextCallInsn(cUnit, mir, dInsn, callState);
buzbee67bf8852011-08-17 17:51:35 -0700924 }
925 newLIR1(cUnit, kThumbBlxR, rLR);
926}
927
928// TODO: break out the case handlers. Might make it easier to support x86
929static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
930 BasicBlock* bb, ArmLIR* labelList)
931{
932 bool res = false; // Assume success
933 RegLocation rlSrc[3];
934 RegLocation rlDest = badLoc;
935 RegLocation rlResult = badLoc;
936 Opcode opcode = mir->dalvikInsn.opcode;
937
938 /* Prep Src and Dest locations */
939 int nextSreg = 0;
940 int nextLoc = 0;
941 int attrs = oatDataFlowAttributes[opcode];
942 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
943 if (attrs & DF_UA) {
944 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
945 nextSreg++;
946 } else if (attrs & DF_UA_WIDE) {
947 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
948 nextSreg + 1);
949 nextSreg+= 2;
950 }
951 if (attrs & DF_UB) {
952 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
953 nextSreg++;
954 } else if (attrs & DF_UB_WIDE) {
955 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
956 nextSreg + 1);
957 nextSreg+= 2;
958 }
959 if (attrs & DF_UC) {
960 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
961 } else if (attrs & DF_UC_WIDE) {
962 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
963 nextSreg + 1);
964 }
965 if (attrs & DF_DA) {
966 rlDest = oatGetDest(cUnit, mir, 0);
967 } else if (attrs & DF_DA_WIDE) {
968 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
969 }
970
971 switch(opcode) {
972 case OP_NOP:
973 break;
974
975 case OP_MOVE_EXCEPTION:
976 int exOffset;
977 int resetReg;
buzbeec143c552011-08-20 17:38:58 -0700978 exOffset = Thread::ExceptionOffset().Int32Value();
buzbee67bf8852011-08-17 17:51:35 -0700979 resetReg = oatAllocTemp(cUnit);
980 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
981 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
982 loadConstant(cUnit, resetReg, 0);
983 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
984 storeValue(cUnit, rlDest, rlResult);
985 break;
986
987 case OP_RETURN_VOID:
988 break;
989
990 case OP_RETURN:
991 case OP_RETURN_OBJECT:
992 storeValue(cUnit, retLoc, rlSrc[0]);
993 break;
994
995 case OP_RETURN_WIDE:
996 rlDest = retLocWide;
997 rlDest.fp = rlSrc[0].fp;
998 storeValueWide(cUnit, rlDest, rlSrc[0]);
999 break;
1000
1001 case OP_MOVE_RESULT_WIDE:
1002 if (mir->OptimizationFlags & MIR_INLINED)
1003 break; // Nop - combined w/ previous invoke
1004 /*
1005 * Somewhat hacky here. Because we're now passing
1006 * return values in registers, we have to let the
1007 * register allocation utilities know that the return
1008 * registers are live and may not be used for address
1009 * formation in storeValueWide.
1010 */
1011 assert(retLocWide.lowReg == r0);
1012 assert(retLocWide.lowReg == r1);
1013 oatLockTemp(cUnit, retLocWide.lowReg);
1014 oatLockTemp(cUnit, retLocWide.highReg);
1015 storeValueWide(cUnit, rlDest, retLocWide);
1016 oatFreeTemp(cUnit, retLocWide.lowReg);
1017 oatFreeTemp(cUnit, retLocWide.highReg);
1018 break;
1019
1020 case OP_MOVE_RESULT:
1021 case OP_MOVE_RESULT_OBJECT:
1022 if (mir->OptimizationFlags & MIR_INLINED)
1023 break; // Nop - combined w/ previous invoke
1024 /* See comment for OP_MOVE_RESULT_WIDE */
1025 assert(retLoc.lowReg == r0);
1026 oatLockTemp(cUnit, retLoc.lowReg);
1027 storeValue(cUnit, rlDest, retLoc);
1028 oatFreeTemp(cUnit, retLoc.lowReg);
1029 break;
1030
1031 case OP_MOVE:
1032 case OP_MOVE_OBJECT:
1033 case OP_MOVE_16:
1034 case OP_MOVE_OBJECT_16:
1035 case OP_MOVE_FROM16:
1036 case OP_MOVE_OBJECT_FROM16:
1037 storeValue(cUnit, rlDest, rlSrc[0]);
1038 break;
1039
1040 case OP_MOVE_WIDE:
1041 case OP_MOVE_WIDE_16:
1042 case OP_MOVE_WIDE_FROM16:
1043 storeValueWide(cUnit, rlDest, rlSrc[0]);
1044 break;
1045
1046 case OP_CONST:
1047 case OP_CONST_4:
1048 case OP_CONST_16:
1049 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1050 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1051 storeValue(cUnit, rlDest, rlResult);
1052 break;
1053
1054 case OP_CONST_HIGH16:
1055 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1056 loadConstantNoClobber(cUnit, rlResult.lowReg,
1057 mir->dalvikInsn.vB << 16);
1058 storeValue(cUnit, rlDest, rlResult);
1059 break;
1060
1061 case OP_CONST_WIDE_16:
1062 case OP_CONST_WIDE_32:
1063 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1064 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1065 //TUNING: do high separately to avoid load dependency
1066 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
1067 storeValueWide(cUnit, rlDest, rlResult);
1068 break;
1069
1070 case OP_CONST_WIDE:
1071 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1072 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
buzbee54330722011-08-23 16:46:55 -07001073 mir->dalvikInsn.vB_wide & 0xffffffff,
1074 (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
buzbee3ea4ec52011-08-22 17:37:19 -07001075 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001076 break;
1077
1078 case OP_CONST_WIDE_HIGH16:
1079 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1080 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
1081 0, mir->dalvikInsn.vB << 16);
buzbee7b1b86d2011-08-26 18:59:10 -07001082 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001083 break;
1084
1085 case OP_MONITOR_ENTER:
1086 genMonitorEnter(cUnit, mir, rlSrc[0]);
1087 break;
1088
1089 case OP_MONITOR_EXIT:
1090 genMonitorExit(cUnit, mir, rlSrc[0]);
1091 break;
1092
1093 case OP_CHECK_CAST:
1094 genCheckCast(cUnit, mir, rlSrc[0]);
1095 break;
1096
1097 case OP_INSTANCE_OF:
1098 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
1099 break;
1100
1101 case OP_NEW_INSTANCE:
1102 genNewInstance(cUnit, mir, rlDest);
1103 break;
1104
1105 case OP_THROW:
1106 genThrow(cUnit, mir, rlSrc[0]);
1107 break;
1108
1109 case OP_ARRAY_LENGTH:
1110 int lenOffset;
buzbeec143c552011-08-20 17:38:58 -07001111 lenOffset = Array::LengthOffset().Int32Value();
buzbee7b1b86d2011-08-26 18:59:10 -07001112 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
buzbee67bf8852011-08-17 17:51:35 -07001113 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg,
1114 mir->offset, NULL);
1115 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1116 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
1117 rlResult.lowReg);
1118 storeValue(cUnit, rlDest, rlResult);
1119 break;
1120
1121 case OP_CONST_STRING:
1122 case OP_CONST_STRING_JUMBO:
1123 genConstString(cUnit, mir, rlDest, rlSrc[0]);
1124 break;
1125
1126 case OP_CONST_CLASS:
1127 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
1128 break;
1129
1130 case OP_FILL_ARRAY_DATA:
1131 genFillArrayData(cUnit, mir, rlSrc[0]);
1132 break;
1133
1134 case OP_FILLED_NEW_ARRAY:
1135 genFilledNewArray(cUnit, mir, false /* not range */);
1136 break;
1137
1138 case OP_FILLED_NEW_ARRAY_RANGE:
1139 genFilledNewArray(cUnit, mir, true /* range */);
1140 break;
1141
1142 case OP_NEW_ARRAY:
1143 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
1144 break;
1145
1146 case OP_GOTO:
1147 case OP_GOTO_16:
1148 case OP_GOTO_32:
1149 // TUNING: add MIR flag to disable when unnecessary
1150 bool backwardBranch;
1151 backwardBranch = (bb->taken->startOffset <= mir->offset);
1152 if (backwardBranch) {
1153 genSuspendPoll(cUnit, mir);
1154 }
1155 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1156 break;
1157
1158 case OP_PACKED_SWITCH:
1159 genPackedSwitch(cUnit, mir, rlSrc[0]);
1160 break;
1161
1162 case OP_SPARSE_SWITCH:
1163 genSparseSwitch(cUnit, mir, rlSrc[0]);
1164 break;
1165
1166 case OP_CMPL_FLOAT:
1167 case OP_CMPG_FLOAT:
1168 case OP_CMPL_DOUBLE:
1169 case OP_CMPG_DOUBLE:
1170 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1171 break;
1172
1173 case OP_CMP_LONG:
1174 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1175 break;
1176
1177 case OP_IF_EQ:
1178 case OP_IF_NE:
1179 case OP_IF_LT:
1180 case OP_IF_GE:
1181 case OP_IF_GT:
1182 case OP_IF_LE: {
1183 bool backwardBranch;
1184 ArmConditionCode cond;
1185 backwardBranch = (bb->taken->startOffset <= mir->offset);
1186 if (backwardBranch) {
1187 genSuspendPoll(cUnit, mir);
1188 }
1189 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1190 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1191 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1192 switch(opcode) {
1193 case OP_IF_EQ:
1194 cond = kArmCondEq;
1195 break;
1196 case OP_IF_NE:
1197 cond = kArmCondNe;
1198 break;
1199 case OP_IF_LT:
1200 cond = kArmCondLt;
1201 break;
1202 case OP_IF_GE:
1203 cond = kArmCondGe;
1204 break;
1205 case OP_IF_GT:
1206 cond = kArmCondGt;
1207 break;
1208 case OP_IF_LE:
1209 cond = kArmCondLe;
1210 break;
1211 default:
1212 cond = (ArmConditionCode)0;
1213 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1214 }
1215 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1216 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1217 break;
1218 }
1219
1220 case OP_IF_EQZ:
1221 case OP_IF_NEZ:
1222 case OP_IF_LTZ:
1223 case OP_IF_GEZ:
1224 case OP_IF_GTZ:
1225 case OP_IF_LEZ: {
1226 bool backwardBranch;
1227 ArmConditionCode cond;
1228 backwardBranch = (bb->taken->startOffset <= mir->offset);
1229 if (backwardBranch) {
1230 genSuspendPoll(cUnit, mir);
1231 }
1232 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1233 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1234 switch(opcode) {
1235 case OP_IF_EQZ:
1236 cond = kArmCondEq;
1237 break;
1238 case OP_IF_NEZ:
1239 cond = kArmCondNe;
1240 break;
1241 case OP_IF_LTZ:
1242 cond = kArmCondLt;
1243 break;
1244 case OP_IF_GEZ:
1245 cond = kArmCondGe;
1246 break;
1247 case OP_IF_GTZ:
1248 cond = kArmCondGt;
1249 break;
1250 case OP_IF_LEZ:
1251 cond = kArmCondLe;
1252 break;
1253 default:
1254 cond = (ArmConditionCode)0;
1255 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1256 }
1257 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1258 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1259 break;
1260 }
1261
1262 case OP_AGET_WIDE:
1263 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1264 break;
1265 case OP_AGET:
1266 case OP_AGET_OBJECT:
1267 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1268 break;
1269 case OP_AGET_BOOLEAN:
1270 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1271 rlDest, 0);
1272 break;
1273 case OP_AGET_BYTE:
1274 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1275 break;
1276 case OP_AGET_CHAR:
1277 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1278 rlDest, 1);
1279 break;
1280 case OP_AGET_SHORT:
1281 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1282 break;
1283 case OP_APUT_WIDE:
1284 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1285 break;
1286 case OP_APUT:
1287 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1288 break;
1289 case OP_APUT_OBJECT:
buzbeec143c552011-08-20 17:38:58 -07001290 genArrayPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
buzbee67bf8852011-08-17 17:51:35 -07001291 break;
1292 case OP_APUT_SHORT:
1293 case OP_APUT_CHAR:
1294 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1295 rlSrc[0], 1);
1296 break;
1297 case OP_APUT_BYTE:
1298 case OP_APUT_BOOLEAN:
1299 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1300 rlSrc[0], 0);
1301 break;
1302
1303 case OP_IGET_WIDE:
1304 case OP_IGET_WIDE_VOLATILE:
1305 genIGetWideX(cUnit, mir, rlDest, rlSrc[0]);
1306 break;
1307
1308 case OP_IGET:
1309 case OP_IGET_VOLATILE:
1310 case OP_IGET_OBJECT:
1311 case OP_IGET_OBJECT_VOLATILE:
1312 genIGetX(cUnit, mir, kWord, rlDest, rlSrc[0]);
1313 break;
1314
1315 case OP_IGET_BOOLEAN:
1316 case OP_IGET_BYTE:
1317 genIGetX(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
1318 break;
1319
1320 case OP_IGET_CHAR:
1321 genIGetX(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
1322 break;
1323
1324 case OP_IGET_SHORT:
1325 genIGetX(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
1326 break;
1327
1328 case OP_IPUT_WIDE:
1329 case OP_IPUT_WIDE_VOLATILE:
1330 genIPutWideX(cUnit, mir, rlSrc[0], rlSrc[1]);
1331 break;
1332
1333 case OP_IPUT_OBJECT:
1334 case OP_IPUT_OBJECT_VOLATILE:
1335 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
1336 break;
1337
1338 case OP_IPUT:
1339 case OP_IPUT_VOLATILE:
1340 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
1341 break;
1342
1343 case OP_IPUT_BOOLEAN:
1344 case OP_IPUT_BYTE:
1345 genIPutX(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
1346 break;
1347
1348 case OP_IPUT_CHAR:
1349 genIPutX(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
1350 break;
1351
1352 case OP_IPUT_SHORT:
1353 genIPutX(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
1354 break;
1355
1356 case OP_SGET:
1357 case OP_SGET_OBJECT:
1358 case OP_SGET_BOOLEAN:
1359 case OP_SGET_BYTE:
1360 case OP_SGET_CHAR:
1361 case OP_SGET_SHORT:
1362 genSget(cUnit, mir, rlResult, rlDest);
1363 break;
1364
1365 case OP_SGET_WIDE:
1366 genSgetWide(cUnit, mir, rlResult, rlDest);
1367 break;
1368
1369 case OP_SPUT:
1370 case OP_SPUT_OBJECT:
1371 case OP_SPUT_BOOLEAN:
1372 case OP_SPUT_BYTE:
1373 case OP_SPUT_CHAR:
1374 case OP_SPUT_SHORT:
1375 genSput(cUnit, mir, rlSrc[0]);
1376 break;
1377
1378 case OP_SPUT_WIDE:
1379 genSputWide(cUnit, mir, rlSrc[0]);
1380 break;
1381
1382 case OP_INVOKE_STATIC_RANGE:
1383 case OP_INVOKE_STATIC:
1384 genInvokeStatic(cUnit, mir);
1385 break;
1386
1387 case OP_INVOKE_DIRECT:
1388 case OP_INVOKE_DIRECT_RANGE:
1389 genInvokeDirect(cUnit, mir);
1390 break;
1391
1392 case OP_INVOKE_VIRTUAL:
1393 case OP_INVOKE_VIRTUAL_RANGE:
1394 genInvokeVirtual(cUnit, mir);
1395 break;
1396
1397 case OP_INVOKE_SUPER:
1398 case OP_INVOKE_SUPER_RANGE:
1399 genInvokeSuper(cUnit, mir);
1400 break;
1401
1402 case OP_INVOKE_INTERFACE:
1403 case OP_INVOKE_INTERFACE_RANGE:
1404 genInvokeInterface(cUnit, mir);
1405 break;
1406
1407 case OP_NEG_INT:
1408 case OP_NOT_INT:
1409 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1410 break;
1411
1412 case OP_NEG_LONG:
1413 case OP_NOT_LONG:
1414 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1415 break;
1416
1417 case OP_NEG_FLOAT:
1418 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1419 break;
1420
1421 case OP_NEG_DOUBLE:
1422 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1423 break;
1424
1425 case OP_INT_TO_LONG:
1426 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1427 if (rlSrc[0].location == kLocPhysReg) {
1428 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1429 } else {
1430 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1431 }
1432 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1433 rlResult.lowReg, 31);
1434 storeValueWide(cUnit, rlDest, rlResult);
1435 break;
1436
1437 case OP_LONG_TO_INT:
1438 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1439 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1440 storeValue(cUnit, rlDest, rlSrc[0]);
1441 break;
1442
1443 case OP_INT_TO_BYTE:
1444 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1445 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1446 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1447 storeValue(cUnit, rlDest, rlResult);
1448 break;
1449
1450 case OP_INT_TO_SHORT:
1451 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1452 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1453 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1454 storeValue(cUnit, rlDest, rlResult);
1455 break;
1456
1457 case OP_INT_TO_CHAR:
1458 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1459 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1460 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1461 storeValue(cUnit, rlDest, rlResult);
1462 break;
1463
1464 case OP_INT_TO_FLOAT:
1465 case OP_INT_TO_DOUBLE:
1466 case OP_LONG_TO_FLOAT:
1467 case OP_LONG_TO_DOUBLE:
1468 case OP_FLOAT_TO_INT:
1469 case OP_FLOAT_TO_LONG:
1470 case OP_FLOAT_TO_DOUBLE:
1471 case OP_DOUBLE_TO_INT:
1472 case OP_DOUBLE_TO_LONG:
1473 case OP_DOUBLE_TO_FLOAT:
1474 genConversion(cUnit, mir);
1475 break;
1476
1477 case OP_ADD_INT:
1478 case OP_SUB_INT:
1479 case OP_MUL_INT:
1480 case OP_DIV_INT:
1481 case OP_REM_INT:
1482 case OP_AND_INT:
1483 case OP_OR_INT:
1484 case OP_XOR_INT:
1485 case OP_SHL_INT:
1486 case OP_SHR_INT:
1487 case OP_USHR_INT:
1488 case OP_ADD_INT_2ADDR:
1489 case OP_SUB_INT_2ADDR:
1490 case OP_MUL_INT_2ADDR:
1491 case OP_DIV_INT_2ADDR:
1492 case OP_REM_INT_2ADDR:
1493 case OP_AND_INT_2ADDR:
1494 case OP_OR_INT_2ADDR:
1495 case OP_XOR_INT_2ADDR:
1496 case OP_SHL_INT_2ADDR:
1497 case OP_SHR_INT_2ADDR:
1498 case OP_USHR_INT_2ADDR:
1499 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1500 break;
1501
1502 case OP_ADD_LONG:
1503 case OP_SUB_LONG:
1504 case OP_MUL_LONG:
1505 case OP_DIV_LONG:
1506 case OP_REM_LONG:
1507 case OP_AND_LONG:
1508 case OP_OR_LONG:
1509 case OP_XOR_LONG:
1510 case OP_ADD_LONG_2ADDR:
1511 case OP_SUB_LONG_2ADDR:
1512 case OP_MUL_LONG_2ADDR:
1513 case OP_DIV_LONG_2ADDR:
1514 case OP_REM_LONG_2ADDR:
1515 case OP_AND_LONG_2ADDR:
1516 case OP_OR_LONG_2ADDR:
1517 case OP_XOR_LONG_2ADDR:
1518 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1519 break;
1520
buzbee67bf8852011-08-17 17:51:35 -07001521 case OP_SHL_LONG:
1522 case OP_SHR_LONG:
1523 case OP_USHR_LONG:
buzbeee6d61962011-08-27 11:58:19 -07001524 case OP_SHL_LONG_2ADDR:
1525 case OP_SHR_LONG_2ADDR:
1526 case OP_USHR_LONG_2ADDR:
buzbee67bf8852011-08-17 17:51:35 -07001527 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1528 break;
1529
1530 case OP_ADD_FLOAT:
1531 case OP_SUB_FLOAT:
1532 case OP_MUL_FLOAT:
1533 case OP_DIV_FLOAT:
1534 case OP_REM_FLOAT:
1535 case OP_ADD_FLOAT_2ADDR:
1536 case OP_SUB_FLOAT_2ADDR:
1537 case OP_MUL_FLOAT_2ADDR:
1538 case OP_DIV_FLOAT_2ADDR:
1539 case OP_REM_FLOAT_2ADDR:
1540 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1541 break;
1542
1543 case OP_ADD_DOUBLE:
1544 case OP_SUB_DOUBLE:
1545 case OP_MUL_DOUBLE:
1546 case OP_DIV_DOUBLE:
1547 case OP_REM_DOUBLE:
1548 case OP_ADD_DOUBLE_2ADDR:
1549 case OP_SUB_DOUBLE_2ADDR:
1550 case OP_MUL_DOUBLE_2ADDR:
1551 case OP_DIV_DOUBLE_2ADDR:
1552 case OP_REM_DOUBLE_2ADDR:
1553 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1554 break;
1555
1556 case OP_RSUB_INT:
1557 case OP_ADD_INT_LIT16:
1558 case OP_MUL_INT_LIT16:
1559 case OP_DIV_INT_LIT16:
1560 case OP_REM_INT_LIT16:
1561 case OP_AND_INT_LIT16:
1562 case OP_OR_INT_LIT16:
1563 case OP_XOR_INT_LIT16:
1564 case OP_ADD_INT_LIT8:
1565 case OP_RSUB_INT_LIT8:
1566 case OP_MUL_INT_LIT8:
1567 case OP_DIV_INT_LIT8:
1568 case OP_REM_INT_LIT8:
1569 case OP_AND_INT_LIT8:
1570 case OP_OR_INT_LIT8:
1571 case OP_XOR_INT_LIT8:
1572 case OP_SHL_INT_LIT8:
1573 case OP_SHR_INT_LIT8:
1574 case OP_USHR_INT_LIT8:
1575 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1576 break;
1577
1578 default:
1579 res = true;
1580 }
1581 return res;
1582}
1583
1584static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1585 "kMirOpPhi",
1586 "kMirOpNullNRangeUpCheck",
1587 "kMirOpNullNRangeDownCheck",
1588 "kMirOpLowerBound",
1589 "kMirOpPunt",
1590 "kMirOpCheckInlinePrediction",
1591};
1592
1593/* Extended MIR instructions like PHI */
1594static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1595{
1596 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1597 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1598 strcpy(msg, extendedMIROpNames[opOffset]);
1599 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1600
1601 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1602 case kMirOpPhi: {
1603 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1604 op->flags.isNop = true;
1605 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1606 break;
1607 }
1608 default:
1609 break;
1610 }
1611}
1612
1613/* If there are any ins passed in registers that have not been promoted
1614 * to a callee-save register, flush them to the frame.
buzbeedfd3d702011-08-28 12:56:51 -07001615 * Note: at this pointCopy any ins that are passed in register to their
1616 * home location */
buzbee67bf8852011-08-17 17:51:35 -07001617static void flushIns(CompilationUnit* cUnit)
1618{
buzbeec143c552011-08-20 17:38:58 -07001619 if (cUnit->method->num_ins_ == 0)
buzbee67bf8852011-08-17 17:51:35 -07001620 return;
buzbeec143c552011-08-20 17:38:58 -07001621 int inRegs = (cUnit->method->num_ins_ > 2) ? 3 : cUnit->method->num_ins_;
buzbee67bf8852011-08-17 17:51:35 -07001622 int startReg = r1;
buzbeec143c552011-08-20 17:38:58 -07001623 int startLoc = cUnit->method->num_registers_ - cUnit->method->num_ins_;
buzbee67bf8852011-08-17 17:51:35 -07001624 for (int i = 0; i < inRegs; i++) {
1625 RegLocation loc = cUnit->regLocation[startLoc + i];
buzbeedfd3d702011-08-28 12:56:51 -07001626 //TUNING: be smarter about flushing ins to frame
1627 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
buzbee67bf8852011-08-17 17:51:35 -07001628 if (loc.location == kLocPhysReg) {
1629 genRegCopy(cUnit, loc.lowReg, startReg + i);
buzbee67bf8852011-08-17 17:51:35 -07001630 }
1631 }
1632
1633 // Handle special case of wide argument half in regs, half in frame
1634 if (inRegs == 3) {
1635 RegLocation loc = cUnit->regLocation[startLoc + 2];
1636 if (loc.wide && loc.location == kLocPhysReg) {
1637 // Load the other half of the arg into the promoted pair
1638 loadBaseDisp(cUnit, NULL, rSP, loc.spOffset+4,
1639 loc.highReg, kWord, INVALID_SREG);
1640 inRegs++;
1641 }
1642 }
1643
1644 // Now, do initial assignment of all promoted arguments passed in frame
buzbeec143c552011-08-20 17:38:58 -07001645 for (int i = inRegs; i < cUnit->method->num_ins_;) {
buzbee67bf8852011-08-17 17:51:35 -07001646 RegLocation loc = cUnit->regLocation[startLoc + i];
1647 if (loc.fpLocation == kLocPhysReg) {
1648 loc.location = kLocPhysReg;
1649 loc.fp = true;
1650 loc.lowReg = loc.fpLowReg;
1651 loc.highReg = loc.fpHighReg;
1652 }
1653 if (loc.location == kLocPhysReg) {
1654 if (loc.wide) {
1655 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1656 loc.lowReg, loc.highReg, INVALID_SREG);
1657 i++;
1658 } else {
1659 loadBaseDisp(cUnit, NULL, rSP, loc.spOffset,
1660 loc.lowReg, kWord, INVALID_SREG);
1661 }
1662 }
1663 i++;
1664 }
1665}
1666
1667/* Handle the content in each basic block */
1668static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1669{
1670 MIR* mir;
1671 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1672 int blockId = bb->id;
1673
1674 cUnit->curBlock = bb;
1675 labelList[blockId].operands[0] = bb->startOffset;
1676
1677 /* Insert the block label */
1678 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1679 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1680
1681 oatClobberAllRegs(cUnit);
1682 oatResetNullCheck(cUnit);
1683
1684 ArmLIR* headLIR = NULL;
1685
1686 if (bb->blockType == kEntryBlock) {
1687 /*
1688 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1689 * mechanism know so it doesn't try to use any of them when
1690 * expanding the frame or flushing. This leaves the utility
1691 * code with a single temp: r12. This should be enough.
1692 */
1693 oatLockTemp(cUnit, r0);
1694 oatLockTemp(cUnit, r1);
1695 oatLockTemp(cUnit, r2);
1696 oatLockTemp(cUnit, r3);
1697 newLIR0(cUnit, kArmPseudoMethodEntry);
1698 /* Spill core callee saves */
1699 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1700 /* Need to spill any FP regs? */
1701 if (cUnit->numFPSpills) {
1702 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1703 }
1704 opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1705 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1706 flushIns(cUnit);
1707 oatFreeTemp(cUnit, r0);
1708 oatFreeTemp(cUnit, r1);
1709 oatFreeTemp(cUnit, r2);
1710 oatFreeTemp(cUnit, r3);
1711 } else if (bb->blockType == kExitBlock) {
1712 newLIR0(cUnit, kArmPseudoMethodExit);
1713 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1714 /* Need to restore any FP callee saves? */
1715 if (cUnit->numFPSpills) {
1716 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1717 }
1718 if (cUnit->coreSpillMask & (1 << rLR)) {
1719 /* Unspill rLR to rPC */
1720 cUnit->coreSpillMask &= ~(1 << rLR);
1721 cUnit->coreSpillMask |= (1 << rPC);
1722 }
1723 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1724 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1725 /* We didn't pop to rPC, so must do a bv rLR */
1726 newLIR1(cUnit, kThumbBx, rLR);
1727 }
1728 }
1729
1730 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1731
1732 oatResetRegPool(cUnit);
1733 if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
1734 oatClobberAllRegs(cUnit);
1735 }
1736
1737 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1738 oatResetDefTracking(cUnit);
1739 }
1740
1741 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1742 handleExtendedMethodMIR(cUnit, mir);
1743 continue;
1744 }
1745
1746 cUnit->currentDalvikOffset = mir->offset;
1747
1748 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1749 InstructionFormat dalvikFormat =
1750 dexGetFormatFromOpcode(dalvikOpcode);
1751
1752 ArmLIR* boundaryLIR;
1753
1754 /* Mark the beginning of a Dalvik instruction for line tracking */
1755 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1756 (int) oatGetDalvikDisassembly(
1757 &mir->dalvikInsn, ""));
1758 /* Remember the first LIR for this block */
1759 if (headLIR == NULL) {
1760 headLIR = boundaryLIR;
1761 /* Set the first boundaryLIR as a scheduling barrier */
1762 headLIR->defMask = ENCODE_ALL;
1763 }
1764
1765 /* Don't generate the SSA annotation unless verbose mode is on */
1766 if (cUnit->printMe && mir->ssaRep) {
1767 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1768 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1769 }
1770
1771 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1772
1773 if (notHandled) {
1774 char buf[100];
1775 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1776 mir->offset,
1777 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1778 dalvikFormat);
1779 LOG(FATAL) << buf;
1780 }
1781 }
1782
1783 if (headLIR) {
1784 /*
1785 * Eliminate redundant loads/stores and delay stores into later
1786 * slots
1787 */
1788 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1789 cUnit->lastLIRInsn);
1790
1791 /*
1792 * Generate an unconditional branch to the fallthrough block.
1793 */
1794 if (bb->fallThrough) {
1795 genUnconditionalBranch(cUnit,
1796 &labelList[bb->fallThrough->id]);
1797 }
1798 }
1799 return false;
1800}
1801
1802/*
1803 * Nop any unconditional branches that go to the next instruction.
1804 * Note: new redundant branches may be inserted later, and we'll
1805 * use a check in final instruction assembly to nop those out.
1806 */
1807void removeRedundantBranches(CompilationUnit* cUnit)
1808{
1809 ArmLIR* thisLIR;
1810
1811 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1812 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1813 thisLIR = NEXT_LIR(thisLIR)) {
1814
1815 /* Branch to the next instruction */
1816 if ((thisLIR->opcode == kThumbBUncond) ||
1817 (thisLIR->opcode == kThumb2BUncond)) {
1818 ArmLIR* nextLIR = thisLIR;
1819
1820 while (true) {
1821 nextLIR = NEXT_LIR(nextLIR);
1822
1823 /*
1824 * Is the branch target the next instruction?
1825 */
1826 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1827 thisLIR->flags.isNop = true;
1828 break;
1829 }
1830
1831 /*
1832 * Found real useful stuff between the branch and the target.
1833 * Need to explicitly check the lastLIRInsn here because it
1834 * might be the last real instruction.
1835 */
1836 if (!isPseudoOpcode(nextLIR->opcode) ||
1837 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1838 break;
1839 }
1840 }
1841 }
1842}
1843
1844void oatMethodMIR2LIR(CompilationUnit* cUnit)
1845{
1846 /* Used to hold the labels of each block */
1847 cUnit->blockLabelList =
1848 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
1849
1850 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
1851 kPreOrderDFSTraversal, false /* Iterative */);
1852 removeRedundantBranches(cUnit);
1853}
1854
1855/* Common initialization routine for an architecture family */
1856bool oatArchInit()
1857{
1858 int i;
1859
1860 for (i = 0; i < kArmLast; i++) {
1861 if (EncodingMap[i].opcode != i) {
1862 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1863 " is wrong: expecting " << i << ", seeing " <<
1864 (int)EncodingMap[i].opcode;
1865 }
1866 }
1867
1868 return oatArchVariantInit();
1869}
1870
1871/* Needed by the Assembler */
1872void oatSetupResourceMasks(ArmLIR* lir)
1873{
1874 setupResourceMasks(lir);
1875}
1876
1877/* Needed by the ld/st optmizatons */
1878ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
1879{
1880 return genRegCopyNoInsert(cUnit, rDest, rSrc);
1881}
1882
1883/* Needed by the register allocator */
1884ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
1885{
1886 return genRegCopy(cUnit, rDest, rSrc);
1887}
1888
1889/* Needed by the register allocator */
1890void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
1891 int srcLo, int srcHi)
1892{
1893 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
1894}
1895
1896void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
1897 int displacement, int rSrc, OpSize size)
1898{
1899 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
1900}
1901
1902void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
1903 int displacement, int rSrcLo, int rSrcHi)
1904{
1905 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
1906}