blob: b5ce6ea46730be6edec6f5ec52fc79caa9adce57 [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
buzbee67bf8852011-08-17 17:51:35 -070017static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
18 INVALID_REG, INVALID_SREG, 0,
19 kLocDalvikFrame, INVALID_REG, INVALID_REG,
20 INVALID_OFFSET};
21static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
22static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
23
buzbeedfd3d702011-08-28 12:56:51 -070024/*
25 * Let helper function take care of everything. Will call
26 * Array::AllocFromCode(type_idx, method, count);
27 * Note: AllocFromCode will handle checks for errNegativeArraySize.
28 */
buzbee67bf8852011-08-17 17:51:35 -070029static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
30 RegLocation rlSrc)
31{
buzbeedfd3d702011-08-28 12:56:51 -070032 oatFlushAllRegs(cUnit); /* Everything to home location */
33 loadWordDisp(cUnit, rSELF,
34 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
35 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
36 loadConstant(cUnit, r0, mir->dalvikInsn.vC); // arg0 <- type_id
37 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
38 opReg(cUnit, kOpBlx, rLR);
39 oatClobberCallRegs(cUnit);
40 RegLocation rlResult = oatGetReturn(cUnit);
41 storeValue(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -070042}
43
44/*
45 * Similar to genNewArray, but with post-allocation initialization.
46 * Verifier guarantees we're dealing with an array class. Current
47 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
48 * Current code also throws internal unimp if not 'L', '[' or 'I'.
49 */
50static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
51{
52 DecodedInstruction* dInsn = &mir->dalvikInsn;
53 int elems;
buzbeedfd3d702011-08-28 12:56:51 -070054 int typeId;
buzbee67bf8852011-08-17 17:51:35 -070055 if (isRange) {
56 elems = dInsn->vA;
buzbeedfd3d702011-08-28 12:56:51 -070057 typeId = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -070058 } else {
59 elems = dInsn->vB;
buzbeedfd3d702011-08-28 12:56:51 -070060 typeId = dInsn->vC;
buzbee67bf8852011-08-17 17:51:35 -070061 }
buzbeedfd3d702011-08-28 12:56:51 -070062 oatFlushAllRegs(cUnit); /* Everything to home location */
buzbeedfd3d702011-08-28 12:56:51 -070063 loadWordDisp(cUnit, rSELF,
buzbee1da522d2011-09-04 11:22:20 -070064 OFFSETOF_MEMBER(Thread, pCheckAndAllocFromCode), rLR);
buzbeedfd3d702011-08-28 12:56:51 -070065 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
66 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
67 loadConstant(cUnit, r2, elems); // arg2 <- count
68 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -070069 /*
buzbeedfd3d702011-08-28 12:56:51 -070070 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
71 * return region. Because AllocFromCode placed the new array
72 * in r0, we'll just lock it into place. When debugger support is
73 * added, it may be necessary to additionally copy all return
74 * values to a home location in thread-local storage
buzbee67bf8852011-08-17 17:51:35 -070075 */
buzbee67bf8852011-08-17 17:51:35 -070076 oatLockTemp(cUnit, r0);
buzbeedfd3d702011-08-28 12:56:51 -070077
buzbee67bf8852011-08-17 17:51:35 -070078 // Having a range of 0 is legal
79 if (isRange && (dInsn->vA > 0)) {
80 /*
81 * Bit of ugliness here. We're going generate a mem copy loop
82 * on the register range, but it is possible that some regs
83 * in the range have been promoted. This is unlikely, but
84 * before generating the copy, we'll just force a flush
85 * of any regs in the source range that have been promoted to
86 * home location.
87 */
88 for (unsigned int i = 0; i < dInsn->vA; i++) {
89 RegLocation loc = oatUpdateLoc(cUnit,
90 oatGetSrc(cUnit, mir, i));
91 if (loc.location == kLocPhysReg) {
92 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
93 }
94 }
95 /*
96 * TUNING note: generated code here could be much improved, but
97 * this is an uncommon operation and isn't especially performance
98 * critical.
99 */
100 int rSrc = oatAllocTemp(cUnit);
101 int rDst = oatAllocTemp(cUnit);
102 int rIdx = oatAllocTemp(cUnit);
103 int rVal = rLR; // Using a lot of temps, rLR is known free here
104 // Set up source pointer
105 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
106 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
107 // Set up the target pointer
108 opRegRegImm(cUnit, kOpAdd, rDst, r0,
buzbeec143c552011-08-20 17:38:58 -0700109 Array::DataOffset().Int32Value());
buzbee67bf8852011-08-17 17:51:35 -0700110 // Set up the loop counter (known to be > 0)
111 loadConstant(cUnit, rIdx, dInsn->vA);
112 // Generate the copy loop. Going backwards for convenience
113 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
114 target->defMask = ENCODE_ALL;
115 // Copy next element
116 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
117 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
118 // Use setflags encoding here
119 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
120 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
121 branch->generic.target = (LIR*)target;
122 } else if (!isRange) {
123 // TUNING: interleave
124 for (unsigned int i = 0; i < dInsn->vA; i++) {
125 RegLocation rlArg = loadValue(cUnit,
126 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbeec143c552011-08-20 17:38:58 -0700127 storeBaseDisp(cUnit, r0,
128 Array::DataOffset().Int32Value() +
buzbee67bf8852011-08-17 17:51:35 -0700129 i * 4, rlArg.lowReg, kWord);
130 // If the loadValue caused a temp to be allocated, free it
131 if (oatIsTemp(cUnit, rlArg.lowReg)) {
132 oatFreeTemp(cUnit, rlArg.lowReg);
133 }
134 }
135 }
136}
137
138static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
139{
buzbeee1931742011-08-28 21:15:53 -0700140 bool isObject = ((mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
141 (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE));
buzbee1da522d2011-09-04 11:22:20 -0700142 int fieldIdx = mir->dalvikInsn.vB;
143 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
144 if (field == NULL) {
145 // Slow path
146 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
147 : OFFSETOF_MEMBER(Thread, pSet32Static);
buzbeee1931742011-08-28 21:15:53 -0700148 oatFlushAllRegs(cUnit);
149 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
150 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
151 loadCurrMethodDirect(cUnit, r1);
152 loadValueDirect(cUnit, rlSrc, r2);
153 opReg(cUnit, kOpBlx, rLR);
154 oatClobberCallRegs(cUnit);
155 } else {
buzbee1da522d2011-09-04 11:22:20 -0700156 // fast path
157 int fieldOffset = field->GetOffset().Int32Value();
158 art::ClassLinker* class_linker = art::Runtime::Current()->
159 GetClassLinker();
160 const art::DexFile& dex_file = class_linker->
161 FindDexFile(field->GetDeclaringClass()->GetDexCache());
162 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
163 int typeIdx = field_id.class_idx_;
164 // Using fixed register to sync with slow path
165 int rMethod = r1;
166 oatLockTemp(cUnit, rMethod);
167 loadCurrMethodDirect(cUnit, rMethod);
168 int rBase = r0;
169 oatLockTemp(cUnit, rBase);
170 loadWordDisp(cUnit, rMethod,
171 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
172 rBase);
173 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
174 sizeof(int32_t*)* typeIdx, rBase);
175 // TUNING: fast path should fall through
176 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
177 loadWordDisp(cUnit, rSELF,
178 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
179 loadConstant(cUnit, r0, typeIdx);
180 opReg(cUnit, kOpBlx, rLR);
181 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
182 skipTarget->defMask = ENCODE_ALL;
183 branchOver->generic.target = (LIR*)skipTarget;
184 rlSrc = oatGetSrc(cUnit, mir, 0);
185 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
186 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -0700187#if ANDROID_SMP != 0
buzbee1da522d2011-09-04 11:22:20 -0700188 if (field->IsVolatile()) {
189 oatGenMemBarrier(cUnit, kSY);
190 }
buzbee67bf8852011-08-17 17:51:35 -0700191#endif
buzbee1da522d2011-09-04 11:22:20 -0700192 if (isObject) {
193 markGCCard(cUnit, rlSrc.lowReg, rBase);
194 }
195 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700196 }
buzbee67bf8852011-08-17 17:51:35 -0700197}
198
199static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
200{
buzbee1da522d2011-09-04 11:22:20 -0700201 int fieldIdx = mir->dalvikInsn.vB;
202 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
203 if (field == NULL) {
buzbeee1931742011-08-28 21:15:53 -0700204 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700205 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pSet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700206 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
207 loadCurrMethodDirect(cUnit, r1);
208 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
209 opReg(cUnit, kOpBlx, rLR);
210 oatClobberCallRegs(cUnit);
211 } else {
buzbee1da522d2011-09-04 11:22:20 -0700212 // fast path
213 int fieldOffset = field->GetOffset().Int32Value();
214 art::ClassLinker* class_linker = art::Runtime::Current()->
215 GetClassLinker();
216 const art::DexFile& dex_file = class_linker->
217 FindDexFile(field->GetDeclaringClass()->GetDexCache());
218 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
219 int typeIdx = field_id.class_idx_;
220 // Using fixed register to sync with slow path
221 int rMethod = r1;
222 oatLockTemp(cUnit, rMethod);
223 loadCurrMethodDirect(cUnit, r1);
224 int rBase = r0;
225 oatLockTemp(cUnit, rBase);
226 loadWordDisp(cUnit, rMethod,
227 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
228 rBase);
229 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
230 sizeof(int32_t*)* typeIdx, rBase);
231 // TUNING: fast path should fall through
232 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
233 loadWordDisp(cUnit, rSELF,
234 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
235 loadConstant(cUnit, r0, typeIdx);
236 opReg(cUnit, kOpBlx, rLR);
237 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
238 skipTarget->defMask = ENCODE_ALL;
239 branchOver->generic.target = (LIR*)skipTarget;
240 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
241 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
242 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
243 rlSrc.highReg);
244#if ANDROID_SMP != 0
245 if (field->IsVolatile()) {
246 oatGenMemBarrier(cUnit, kSY);
247 }
buzbeec143c552011-08-20 17:38:58 -0700248#endif
buzbee1da522d2011-09-04 11:22:20 -0700249 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700250 }
buzbee67bf8852011-08-17 17:51:35 -0700251}
252
253
buzbee67bf8852011-08-17 17:51:35 -0700254static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
255 RegLocation rlResult, RegLocation rlDest)
256{
buzbee1da522d2011-09-04 11:22:20 -0700257 int fieldIdx = mir->dalvikInsn.vB;
258 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
259 if (field == NULL) {
buzbeee1931742011-08-28 21:15:53 -0700260 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700261 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pGet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700262 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
263 loadCurrMethodDirect(cUnit, r1);
264 opReg(cUnit, kOpBlx, rLR);
265 RegLocation rlResult = oatGetReturnWide(cUnit);
266 storeValueWide(cUnit, rlDest, rlResult);
267 } else {
buzbee1da522d2011-09-04 11:22:20 -0700268 // Fast path
269 int fieldOffset = field->GetOffset().Int32Value();
270 art::ClassLinker* class_linker = art::Runtime::Current()->
271 GetClassLinker();
272 const art::DexFile& dex_file = class_linker->
273 FindDexFile(field->GetDeclaringClass()->GetDexCache());
274 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
275 int typeIdx = field_id.class_idx_;
276 // Using fixed register to sync with slow path
277 int rMethod = r1;
278 oatLockTemp(cUnit, rMethod);
279 loadCurrMethodDirect(cUnit, rMethod);
280 int rBase = r0;
281 oatLockTemp(cUnit, rBase);
282 loadWordDisp(cUnit, rMethod,
283 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
284 rBase);
285 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
286 sizeof(int32_t*)* typeIdx, rBase);
287 // TUNING: fast path should fall through
288 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
289 loadWordDisp(cUnit, rSELF,
290 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
291 loadConstant(cUnit, r0, typeIdx);
292 opReg(cUnit, kOpBlx, rLR);
293 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
294 skipTarget->defMask = ENCODE_ALL;
295 branchOver->generic.target = (LIR*)skipTarget;
296 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
297 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
298#if ANDROID_SMP != 0
299 if (isVolatile) {
300 oatGenMemBarrier(cUnit, kSY);
301 }
buzbeec143c552011-08-20 17:38:58 -0700302#endif
buzbee1da522d2011-09-04 11:22:20 -0700303 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
304 rlResult.highReg, INVALID_SREG);
305 oatFreeTemp(cUnit, rBase);
306 storeValueWide(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700307 }
buzbee67bf8852011-08-17 17:51:35 -0700308}
309
310static void genSget(CompilationUnit* cUnit, MIR* mir,
311 RegLocation rlResult, RegLocation rlDest)
312{
buzbee1da522d2011-09-04 11:22:20 -0700313 int fieldIdx = mir->dalvikInsn.vB;
314 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
buzbeee1931742011-08-28 21:15:53 -0700315 bool isObject = ((mir->dalvikInsn.opcode == OP_SGET_OBJECT) ||
316 (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE));
buzbee1da522d2011-09-04 11:22:20 -0700317 if (field == NULL) {
318 // Slow path
319 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
320 : OFFSETOF_MEMBER(Thread, pGet32Static);
buzbeee1931742011-08-28 21:15:53 -0700321 oatFlushAllRegs(cUnit);
322 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
323 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
324 loadCurrMethodDirect(cUnit, r1);
325 opReg(cUnit, kOpBlx, rLR);
326 RegLocation rlResult = oatGetReturn(cUnit);
327 storeValue(cUnit, rlDest, rlResult);
328 } else {
buzbee1da522d2011-09-04 11:22:20 -0700329 // Fast path
330 int fieldOffset = field->GetOffset().Int32Value();
331 art::ClassLinker* class_linker = art::Runtime::Current()->
332 GetClassLinker();
333 const art::DexFile& dex_file = class_linker->
334 FindDexFile(field->GetDeclaringClass()->GetDexCache());
335 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
336 int typeIdx = field_id.class_idx_;
337 // Using fixed register to sync with slow path
338 int rMethod = r1;
339 oatLockTemp(cUnit, rMethod);
340 loadCurrMethodDirect(cUnit, rMethod);
341 int rBase = r0;
342 oatLockTemp(cUnit, rBase);
343 loadWordDisp(cUnit, rMethod,
344 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
345 rBase);
346 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
347 sizeof(int32_t*)* typeIdx, rBase);
348 // TUNING: fast path should fall through
349 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
350 loadWordDisp(cUnit, rSELF,
351 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
352 loadConstant(cUnit, r0, typeIdx);
353 opReg(cUnit, kOpBlx, rLR);
354 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
355 skipTarget->defMask = ENCODE_ALL;
356 branchOver->generic.target = (LIR*)skipTarget;
357 rlDest = oatGetDest(cUnit, mir, 0);
358 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
buzbee67bf8852011-08-17 17:51:35 -0700359#if ANDROID_SMP != 0
buzbee1da522d2011-09-04 11:22:20 -0700360 if (isVolatile) {
361 oatGenMemBarrier(cUnit, kSY);
362 }
buzbee67bf8852011-08-17 17:51:35 -0700363#endif
buzbee1da522d2011-09-04 11:22:20 -0700364 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
365 oatFreeTemp(cUnit, rBase);
366 storeValue(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700367 }
buzbee67bf8852011-08-17 17:51:35 -0700368}
369
buzbee561227c2011-09-02 15:28:19 -0700370typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int,
371 ArmLIR*);
buzbee67bf8852011-08-17 17:51:35 -0700372
373/*
374 * Bit of a hack here - in leiu of a real scheduling pass,
375 * emit the next instruction in static & direct invoke sequences.
376 */
377static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700378 DecodedInstruction* dInsn, int state,
379 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700380{
buzbee561227c2011-09-02 15:28:19 -0700381 DCHECK(rollback == NULL);
382 uint32_t idx = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -0700383 switch(state) {
384 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700385 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700386 break;
buzbee561227c2011-09-02 15:28:19 -0700387 case 1: // Get method->code_and_direct_methods_
388 loadWordDisp(cUnit, r0,
389 Method::GetDexCacheCodeAndDirectMethodsOffset().Int32Value(),
390 r0);
buzbee67bf8852011-08-17 17:51:35 -0700391 break;
buzbee561227c2011-09-02 15:28:19 -0700392 case 2: // Grab target method* and target code_
393 loadWordDisp(cUnit, r0,
394 art::CodeAndDirectMethods::CodeOffsetInBytes(idx), rLR);
395 loadWordDisp(cUnit, r0,
396 art::CodeAndDirectMethods::MethodOffsetInBytes(idx), r0);
buzbeec5ef0462011-08-25 18:44:49 -0700397 break;
398 default:
399 return -1;
400 }
401 return state + 1;
402}
403
buzbee67bf8852011-08-17 17:51:35 -0700404/*
405 * Bit of a hack here - in leiu of a real scheduling pass,
406 * emit the next instruction in a virtual invoke sequence.
407 * We can use rLR as a temp prior to target address loading
408 * Note also that we'll load the first argument ("this") into
409 * r1 here rather than the standard loadArgRegs.
410 */
411static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700412 DecodedInstruction* dInsn, int state,
413 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700414{
buzbee561227c2011-09-02 15:28:19 -0700415 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700416 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700417 /*
418 * This is the fast path in which the target virtual method is
419 * fully resolved at compile time.
420 */
421 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
422 Get(dInsn->vB);
423 CHECK(baseMethod != NULL);
424 uint32_t target_idx = baseMethod->GetMethodIndex();
buzbee67bf8852011-08-17 17:51:35 -0700425 switch(state) {
buzbee561227c2011-09-02 15:28:19 -0700426 case 0: // Get "this" [set r1]
buzbee67bf8852011-08-17 17:51:35 -0700427 rlArg = oatGetSrc(cUnit, mir, 0);
428 loadValueDirectFixed(cUnit, rlArg, r1);
429 break;
buzbee561227c2011-09-02 15:28:19 -0700430 case 1: // Is "this" null? [use r1]
431 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
432 // get this->klass_ [use r1, set rLR]
433 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700434 break;
buzbee561227c2011-09-02 15:28:19 -0700435 case 2: // Get this->klass_->vtable [usr rLR, set rLR]
436 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700437 break;
buzbee561227c2011-09-02 15:28:19 -0700438 case 3: // Get target method [use rLR, set r0]
439 loadWordDisp(cUnit, rLR, (target_idx * 4) +
440 art::Array::DataOffset().Int32Value(), r0);
441 break;
442 case 4: // Get the target compiled code address [uses r0, sets rLR]
443 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700444 break;
445 default:
446 return -1;
447 }
448 return state + 1;
449}
450
buzbee7b1b86d2011-08-26 18:59:10 -0700451static int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700452 DecodedInstruction* dInsn, int state,
453 ArmLIR* rollback)
buzbee7b1b86d2011-08-26 18:59:10 -0700454{
buzbee561227c2011-09-02 15:28:19 -0700455 DCHECK(rollback != NULL);
buzbee7b1b86d2011-08-26 18:59:10 -0700456 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700457 ArmLIR* skipBranch;
458 ArmLIR* skipTarget;
459 /*
460 * This handles the case in which the base method is not fully
461 * resolved at compile time. We must generate code to test
462 * for resolution a run time, bail to the slow path if not to
463 * fill in all the tables. In the latter case, we'll restart at
464 * at the beginning of the sequence.
465 */
buzbee7b1b86d2011-08-26 18:59:10 -0700466 switch(state) {
467 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700468 loadCurrMethodDirect(cUnit, r0);
buzbee7b1b86d2011-08-26 18:59:10 -0700469 break;
buzbee561227c2011-09-02 15:28:19 -0700470 case 1: // Get method->dex_cache_resolved_methods_
471 loadWordDisp(cUnit, r0,
472 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700473 break;
buzbee561227c2011-09-02 15:28:19 -0700474 case 2: // method->dex_cache_resolved_methods_->Get(method_idx)
475 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
476 art::Array::DataOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700477 break;
buzbee561227c2011-09-02 15:28:19 -0700478 case 3: // Resolved?
479 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
480 // Slowest path, bail to helper, rollback and retry
481 loadWordDisp(cUnit, rSELF,
482 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
483 loadConstant(cUnit, r1, dInsn->vB);
484 newLIR1(cUnit, kThumbBlxR, rLR);
485 genUnconditionalBranch(cUnit, rollback);
486 // Resume normal slow path
487 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
488 skipTarget->defMask = ENCODE_ALL;
489 skipBranch->generic.target = (LIR*)skipTarget;
buzbee4a3164f2011-09-03 11:25:10 -0700490 // Get base_method->method_index [usr rLR, set r0]
buzbee561227c2011-09-02 15:28:19 -0700491 loadBaseDisp(cUnit, mir, rLR,
492 Method::GetMethodIndexOffset().Int32Value(), r0,
493 kUnsignedHalf, INVALID_SREG);
buzbee7b1b86d2011-08-26 18:59:10 -0700494 // Load "this" [set r1]
495 rlArg = oatGetSrc(cUnit, mir, 0);
496 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee7b1b86d2011-08-26 18:59:10 -0700497 break;
498 case 4:
499 // Is "this" null? [use r1]
500 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
501 // get this->clazz [use r1, set rLR]
buzbee561227c2011-09-02 15:28:19 -0700502 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700503 break;
buzbee561227c2011-09-02 15:28:19 -0700504 case 5:
505 // get this->klass_->vtable_ [usr rLR, set rLR]
506 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
507 DCHECK((art::Array::DataOffset().Int32Value() & 0x3) == 0);
508 // In load shadow fold vtable_ object header size into method_index_
509 opRegImm(cUnit, kOpAdd, r0,
510 art::Array::DataOffset().Int32Value() / 4);
511 // Get target Method*
512 loadBaseIndexed(cUnit, rLR, r0, r0, 2, kWord);
513 break;
514 case 6: // Get the target compiled code address [uses r0, sets rLR]
515 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700516 break;
517 default:
518 return -1;
519 }
520 return state + 1;
521}
522
buzbee67bf8852011-08-17 17:51:35 -0700523/* Load up to 3 arguments in r1..r3 */
524static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
525 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700526 int *args, NextCallInsn nextCallInsn, ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700527{
528 for (int i = 0; i < 3; i++) {
529 if (args[i] != INVALID_REG) {
530 RegLocation rlArg = oatGetSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700531 // Arguments are treated as a series of untyped 32-bit values.
532 rlArg.wide = false;
buzbee67bf8852011-08-17 17:51:35 -0700533 loadValueDirectFixed(cUnit, rlArg, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700534 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700535 }
536 }
537 return callState;
538}
539
buzbee4a3164f2011-09-03 11:25:10 -0700540// Interleave launch code for INVOKE_INTERFACE.
buzbee67bf8852011-08-17 17:51:35 -0700541static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700542 DecodedInstruction* dInsn, int state,
543 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700544{
buzbee67bf8852011-08-17 17:51:35 -0700545 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700546 case 0: // Load trampoline target
547 loadWordDisp(cUnit, rSELF,
548 OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline),
549 rLR);
550 // Load r0 with method index
551 loadConstant(cUnit, r0, dInsn->vB);
buzbee67bf8852011-08-17 17:51:35 -0700552 break;
buzbee67bf8852011-08-17 17:51:35 -0700553 default:
554 return -1;
555 }
556 return state + 1;
557}
558
buzbee67bf8852011-08-17 17:51:35 -0700559/*
560 * Interleave launch code for INVOKE_SUPER. See comments
561 * for nextVCallIns.
562 */
563static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700564 DecodedInstruction* dInsn, int state,
565 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700566{
buzbee4a3164f2011-09-03 11:25:10 -0700567 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700568 RegLocation rlArg;
buzbee4a3164f2011-09-03 11:25:10 -0700569 /*
570 * This is the fast path in which the target virtual method is
571 * fully resolved at compile time. Note also that this path assumes
572 * that the check to verify that the target method index falls
573 * within the size of the super's vtable has been done at compile-time.
574 */
575 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
576 Get(dInsn->vB);
577 CHECK(baseMethod != NULL);
578 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
579 CHECK(superClass != NULL);
580 int32_t target_idx = baseMethod->GetMethodIndex();
581 CHECK(superClass->GetVTable()->GetLength() > target_idx);
582 Method* targetMethod = superClass->GetVTable()->Get(target_idx);
583 CHECK(targetMethod != NULL);
buzbee67bf8852011-08-17 17:51:35 -0700584 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700585 case 0: // Get current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700586 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700587 // Load "this" [set r1]
588 rlArg = oatGetSrc(cUnit, mir, 0);
589 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee4a3164f2011-09-03 11:25:10 -0700590 // Get method->declaring_class_ [use r0, set rLR]
591 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
592 rLR);
buzbee67bf8852011-08-17 17:51:35 -0700593 // Is "this" null? [use r1]
594 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
595 mir->offset, NULL);
buzbee4a3164f2011-09-03 11:25:10 -0700596 break;
597 case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
598 loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
599 rLR);
600 break;
601 case 2: // Get ...->super_class_->vtable [u/s rLR]
602 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
603 break;
604 case 3: // Get target method [use rLR, set r0]
605 loadWordDisp(cUnit, rLR, (target_idx * 4) +
606 art::Array::DataOffset().Int32Value(), r0);
607 break;
608 case 4: // Get the target compiled code address [uses r0, sets rLR]
609 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
610 break;
buzbee67bf8852011-08-17 17:51:35 -0700611 default:
612 return -1;
613 }
buzbee4a3164f2011-09-03 11:25:10 -0700614 return state + 1;
615}
616
617/* Slow-path version of nextSuperCallInsn */
618static int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
619 DecodedInstruction* dInsn, int state,
620 ArmLIR* rollback)
621{
622 DCHECK(rollback != NULL);
623 RegLocation rlArg;
624 ArmLIR* skipBranch;
625 ArmLIR* skipTarget;
626 int tReg;
627 /*
628 * This handles the case in which the base method is not fully
629 * resolved at compile time. We must generate code to test
630 * for resolution a run time, bail to the slow path if not to
631 * fill in all the tables. In the latter case, we'll restart at
632 * at the beginning of the sequence.
633 */
634 switch(state) {
635 case 0: // Get the current Method* [sets r0]
636 loadCurrMethodDirect(cUnit, r0);
637 break;
638 case 1: // Get method->dex_cache_resolved_methods_ [usr r0, set rLR]
639 loadWordDisp(cUnit, r0,
640 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
641 break;
642 case 2: // method->dex_cache_resolved_methods_->Get(meth_idx) [u/s rLR]
643 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
644 art::Array::DataOffset().Int32Value(), rLR);
645 break;
646 case 3: // Resolved?
647 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
648 // Slowest path, bail to helper, rollback and retry
649 loadWordDisp(cUnit, rSELF,
650 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
651 loadConstant(cUnit, r1, dInsn->vB);
652 newLIR1(cUnit, kThumbBlxR, rLR);
653 genUnconditionalBranch(cUnit, rollback);
654 // Resume normal slow path
655 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
656 skipTarget->defMask = ENCODE_ALL;
657 skipBranch->generic.target = (LIR*)skipTarget;
658 // Get base_method->method_index [usr rLR, set rLR]
659 loadBaseDisp(cUnit, mir, rLR,
660 Method::GetMethodIndexOffset().Int32Value(), rLR,
661 kUnsignedHalf, INVALID_SREG);
662 // Load "this" [set r1]
663 rlArg = oatGetSrc(cUnit, mir, 0);
664 loadValueDirectFixed(cUnit, rlArg, r1);
665 // Load curMethod->declaring_class_ [uses r0, sets r0]
666 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
667 r0);
668 case 4: // Get method->declaring_class_->super_class [usr r0, set r0]
669 loadWordDisp(cUnit, r0, Class::SuperClassOffset().Int32Value(), r0);
670 break;
671 case 5: // Get ...->super_class_->vtable [u/s r0]
672 loadWordDisp(cUnit, r0, Class::VTableOffset().Int32Value(), r0);
673 // In load shadow fold vtable_ object header size into method_index_
674 opRegImm(cUnit, kOpAdd, rLR,
675 art::Array::DataOffset().Int32Value() / 4);
676 if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
677 // Range check, throw NSM on failure
678 tReg = oatAllocTemp(cUnit);
679 loadWordDisp(cUnit, r0, art::Array::LengthOffset().Int32Value(),
680 tReg);
681 genBoundsCheck(cUnit, tReg, rLR, mir->offset, NULL);
682 oatFreeTemp(cUnit, tReg);
683 }
684 // Get target Method*
685 loadBaseIndexed(cUnit, r0, r0, rLR, 2, kWord);
686 break;
687 case 6: // Get the target compiled code address [uses r0, sets rLR]
688 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
689 break;
690 default:
691 return -1;
692 }
buzbee67bf8852011-08-17 17:51:35 -0700693 return state + 1;
694}
695
696/*
697 * Load up to 5 arguments, the first three of which will be in
698 * r1 .. r3. On entry r0 contains the current method pointer,
699 * and as part of the load sequence, it must be replaced with
700 * the target method pointer. Note, this may also be called
701 * for "range" variants if the number of arguments is 5 or fewer.
702 */
703static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
704 DecodedInstruction* dInsn, int callState,
705 ArmLIR** pcrLabel, bool isRange,
buzbee1da522d2011-09-04 11:22:20 -0700706 NextCallInsn nextCallInsn, ArmLIR* rollback,
707 bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700708{
709 RegLocation rlArg;
710 int registerArgs[3];
711
712 /* If no arguments, just return */
713 if (dInsn->vA == 0)
714 return callState;
715
buzbee2e748f32011-08-29 21:02:19 -0700716 oatLockCallTemps(cUnit);
buzbee561227c2011-09-02 15:28:19 -0700717 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700718
719 /*
720 * Load frame arguments arg4 & arg5 first. Coded a little odd to
721 * pre-schedule the method pointer target.
722 */
723 for (unsigned int i=3; i < dInsn->vA; i++) {
724 int reg;
725 int arg = (isRange) ? dInsn->vC + i : i;
726 rlArg = oatUpdateLoc(cUnit, oatGetSrc(cUnit, mir, arg));
727 if (rlArg.location == kLocPhysReg) {
728 reg = rlArg.lowReg;
729 } else {
730 reg = r1;
731 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee561227c2011-09-02 15:28:19 -0700732 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700733 }
734 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700735 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700736 }
737
738 /* Load register arguments r1..r3 */
buzbee1da522d2011-09-04 11:22:20 -0700739 for (unsigned int i = skipThis ? 1 : 0; i < 3; i++) {
buzbee67bf8852011-08-17 17:51:35 -0700740 if (i < dInsn->vA)
741 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
742 else
743 registerArgs[i] = INVALID_REG;
744 }
745 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
buzbee561227c2011-09-02 15:28:19 -0700746 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700747
748 // Load direct & need a "this" null check?
749 if (pcrLabel) {
750 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1,
751 mir->offset, NULL);
752 }
753 return callState;
754}
755
756/*
757 * May have 0+ arguments (also used for jumbo). Note that
758 * source virtual registers may be in physical registers, so may
759 * need to be flushed to home location before copying. This
760 * applies to arg3 and above (see below).
761 *
762 * Two general strategies:
763 * If < 20 arguments
764 * Pass args 3-18 using vldm/vstm block copy
765 * Pass arg0, arg1 & arg2 in r1-r3
766 * If 20+ arguments
767 * Pass args arg19+ using memcpy block copy
768 * Pass arg0, arg1 & arg2 in r1-r3
769 *
770 */
771static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
772 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700773 ArmLIR** pcrLabel, NextCallInsn nextCallInsn,
buzbee1da522d2011-09-04 11:22:20 -0700774 ArmLIR* rollback, bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700775{
776 int firstArg = dInsn->vC;
777 int numArgs = dInsn->vA;
778
779 // If we can treat it as non-range (Jumbo ops will use range form)
780 if (numArgs <= 5)
781 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
buzbee1da522d2011-09-04 11:22:20 -0700782 true, nextCallInsn, rollback, skipThis);
buzbee67bf8852011-08-17 17:51:35 -0700783 /*
784 * Make sure range list doesn't span the break between in normal
785 * Dalvik vRegs and the ins.
786 */
buzbee1b4c8592011-08-31 10:43:51 -0700787 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700788 int boundaryReg = cUnit->method->NumRegisters() - cUnit->method->NumIns();
buzbee1b4c8592011-08-31 10:43:51 -0700789 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
790 LOG(FATAL) << "Argument list spanned locals & args";
buzbee67bf8852011-08-17 17:51:35 -0700791 }
792
793 /*
794 * First load the non-register arguments. Both forms expect all
795 * of the source arguments to be in their home frame location, so
796 * scan the sReg names and flush any that have been promoted to
797 * frame backing storage.
798 */
799 // Scan the rest of the args - if in physReg flush to memory
800 for (int i = 4; i < numArgs; i++) {
buzbee1b4c8592011-08-31 10:43:51 -0700801 RegLocation loc = oatGetSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700802 if (loc.wide) {
803 loc = oatUpdateLocWide(cUnit, loc);
804 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
805 storeBaseDispWide(cUnit, rSP, loc.spOffset, loc.lowReg,
806 loc.highReg);
buzbee561227c2011-09-02 15:28:19 -0700807 callState = nextCallInsn(cUnit, mir, dInsn, callState,
808 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700809 }
810 } else {
811 loc = oatUpdateLoc(cUnit, loc);
812 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
813 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700814 callState = nextCallInsn(cUnit, mir, dInsn, callState,
815 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700816 }
buzbee67bf8852011-08-17 17:51:35 -0700817 }
818 }
819
820 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
821 int outsOffset = 4 /* Method* */ + (3 * 4);
822 if (numArgs >= 20) {
823 // Generate memcpy, but first make sure all of
824 opRegRegImm(cUnit, kOpAdd, r0, rSP, startOffset);
825 opRegRegImm(cUnit, kOpAdd, r1, rSP, outsOffset);
826 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
827 loadConstant(cUnit, r2, (numArgs - 3) * 4);
828 newLIR1(cUnit, kThumbBlxR, rLR);
829 } else {
830 // Use vldm/vstm pair using r3 as a temp
buzbeec143c552011-08-20 17:38:58 -0700831 int regsLeft = std::min(numArgs - 3, 16);
buzbee561227c2011-09-02 15:28:19 -0700832 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700833 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
buzbee1b4c8592011-08-31 10:43:51 -0700834 newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700835 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700836 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
buzbee561227c2011-09-02 15:28:19 -0700837 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700838 newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700839 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700840 }
841
842 // Handle the 1st 3 in r1, r2 & r3
buzbee1da522d2011-09-04 11:22:20 -0700843 for (unsigned int i = skipThis? 1 : 0; i < dInsn->vA && i < 3; i++) {
buzbee67bf8852011-08-17 17:51:35 -0700844 RegLocation loc = oatGetSrc(cUnit, mir, firstArg + i);
845 loadValueDirectFixed(cUnit, loc, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700846 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700847 }
848
849 // Finally, deal with the register arguments
850 // We'll be using fixed registers here
buzbee2e748f32011-08-29 21:02:19 -0700851 oatLockCallTemps(cUnit);
buzbee561227c2011-09-02 15:28:19 -0700852 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700853 return callState;
854}
855
buzbee561227c2011-09-02 15:28:19 -0700856static void genInvokeStaticDirect(CompilationUnit* cUnit, MIR* mir,
857 bool direct, bool range)
buzbee67bf8852011-08-17 17:51:35 -0700858{
859 DecodedInstruction* dInsn = &mir->dalvikInsn;
860 int callState = 0;
861 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700862 ArmLIR** pNullCk = direct ? &nullCk : NULL;
buzbee7b1b86d2011-08-26 18:59:10 -0700863
buzbee561227c2011-09-02 15:28:19 -0700864 NextCallInsn nextCallInsn = nextSDCallInsn;
865
866 if (range) {
867 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700868 nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700869 } else {
870 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700871 false, nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700872 }
buzbee67bf8852011-08-17 17:51:35 -0700873 // Finish up any of the call sequence not interleaved in arg loading
874 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700875 callState = nextCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700876 }
877 newLIR1(cUnit, kThumbBlxR, rLR);
878}
879
buzbee4a3164f2011-09-03 11:25:10 -0700880/*
881 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
882 * which will locate the target and continue on via a tail call.
883 */
buzbee67bf8852011-08-17 17:51:35 -0700884static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
885{
886 DecodedInstruction* dInsn = &mir->dalvikInsn;
887 int callState = 0;
888 ArmLIR* nullCk;
889 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
buzbee561227c2011-09-02 15:28:19 -0700890 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700891 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
892 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700893 false, nextInterfaceCallInsn, NULL,
894 true);
buzbee67bf8852011-08-17 17:51:35 -0700895 else
896 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700897 nextInterfaceCallInsn, NULL, true);
buzbee67bf8852011-08-17 17:51:35 -0700898 // Finish up any of the call sequence not interleaved in arg loading
899 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700900 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700901 }
902 newLIR1(cUnit, kThumbBlxR, rLR);
903}
904
905static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
906{
907 DecodedInstruction* dInsn = &mir->dalvikInsn;
908 int callState = 0;
909 ArmLIR* nullCk;
buzbee4a3164f2011-09-03 11:25:10 -0700910 ArmLIR* rollback;
911 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
912 Get(dInsn->vB);
913 NextCallInsn nextCallInsn;
914 bool fastPath = true;
915 if (baseMethod == NULL) {
916 fastPath = false;
917 } else {
918 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
919 if (superClass == NULL) {
920 fastPath = false;
921 } else {
922 int32_t target_idx = baseMethod->GetMethodIndex();
923 if (superClass->GetVTable()->GetLength() <= target_idx) {
924 fastPath = false;
925 } else {
926 fastPath = (superClass->GetVTable()->Get(target_idx) != NULL);
927 }
928 }
929 }
930 if (fastPath) {
931 nextCallInsn = nextSuperCallInsn;
932 rollback = NULL;
933 } else {
934 nextCallInsn = nextSuperCallInsnSP;
935 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
936 rollback->defMask = -1;
937 }
buzbee67bf8852011-08-17 17:51:35 -0700938 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
939 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700940 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700941 else
942 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700943 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700944 // Finish up any of the call sequence not interleaved in arg loading
945 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700946 callState = nextSuperCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700947 }
948 newLIR1(cUnit, kThumbBlxR, rLR);
949}
950
951static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
952{
953 DecodedInstruction* dInsn = &mir->dalvikInsn;
954 int callState = 0;
955 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700956 ArmLIR* rollback;
957 Method* method = cUnit->method->GetDexCacheResolvedMethods()->
958 Get(dInsn->vB);
959 NextCallInsn nextCallInsn;
buzbee7b1b86d2011-08-26 18:59:10 -0700960
Elliott Hughesa59d1792011-09-04 18:42:35 -0700961 method = NULL; // TODO
962 UNIMPLEMENTED(WARNING) << "the genInvokeVirtual fast path generates bad code (r0/r9 mixup?)";
buzbee561227c2011-09-02 15:28:19 -0700963 if (method == NULL) {
964 // Slow path
965 nextCallInsn = nextVCallInsnSP;
966 // If we need a slow-path callout, we'll restart here
967 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
968 rollback->defMask = -1;
969 } else {
970 // Fast path
971 nextCallInsn = nextVCallInsn;
972 rollback = NULL;
973 }
buzbee67bf8852011-08-17 17:51:35 -0700974 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
975 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700976 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700977 else
978 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700979 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700980 // Finish up any of the call sequence not interleaved in arg loading
981 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700982 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700983 }
984 newLIR1(cUnit, kThumbBlxR, rLR);
985}
986
buzbee67bf8852011-08-17 17:51:35 -0700987static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
988 BasicBlock* bb, ArmLIR* labelList)
989{
990 bool res = false; // Assume success
991 RegLocation rlSrc[3];
992 RegLocation rlDest = badLoc;
993 RegLocation rlResult = badLoc;
994 Opcode opcode = mir->dalvikInsn.opcode;
995
996 /* Prep Src and Dest locations */
997 int nextSreg = 0;
998 int nextLoc = 0;
999 int attrs = oatDataFlowAttributes[opcode];
1000 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
1001 if (attrs & DF_UA) {
1002 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1003 nextSreg++;
1004 } else if (attrs & DF_UA_WIDE) {
1005 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1006 nextSreg + 1);
1007 nextSreg+= 2;
1008 }
1009 if (attrs & DF_UB) {
1010 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1011 nextSreg++;
1012 } else if (attrs & DF_UB_WIDE) {
1013 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1014 nextSreg + 1);
1015 nextSreg+= 2;
1016 }
1017 if (attrs & DF_UC) {
1018 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1019 } else if (attrs & DF_UC_WIDE) {
1020 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1021 nextSreg + 1);
1022 }
1023 if (attrs & DF_DA) {
1024 rlDest = oatGetDest(cUnit, mir, 0);
1025 } else if (attrs & DF_DA_WIDE) {
1026 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
1027 }
1028
1029 switch(opcode) {
1030 case OP_NOP:
1031 break;
1032
1033 case OP_MOVE_EXCEPTION:
1034 int exOffset;
1035 int resetReg;
buzbeec143c552011-08-20 17:38:58 -07001036 exOffset = Thread::ExceptionOffset().Int32Value();
buzbee67bf8852011-08-17 17:51:35 -07001037 resetReg = oatAllocTemp(cUnit);
1038 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1039 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
1040 loadConstant(cUnit, resetReg, 0);
1041 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
1042 storeValue(cUnit, rlDest, rlResult);
1043 break;
1044
1045 case OP_RETURN_VOID:
1046 break;
1047
1048 case OP_RETURN:
1049 case OP_RETURN_OBJECT:
1050 storeValue(cUnit, retLoc, rlSrc[0]);
1051 break;
1052
1053 case OP_RETURN_WIDE:
1054 rlDest = retLocWide;
1055 rlDest.fp = rlSrc[0].fp;
1056 storeValueWide(cUnit, rlDest, rlSrc[0]);
1057 break;
1058
1059 case OP_MOVE_RESULT_WIDE:
1060 if (mir->OptimizationFlags & MIR_INLINED)
1061 break; // Nop - combined w/ previous invoke
1062 /*
1063 * Somewhat hacky here. Because we're now passing
1064 * return values in registers, we have to let the
1065 * register allocation utilities know that the return
1066 * registers are live and may not be used for address
1067 * formation in storeValueWide.
1068 */
1069 assert(retLocWide.lowReg == r0);
buzbee1da522d2011-09-04 11:22:20 -07001070 assert(retLocWide.highReg == r1);
buzbee67bf8852011-08-17 17:51:35 -07001071 oatLockTemp(cUnit, retLocWide.lowReg);
1072 oatLockTemp(cUnit, retLocWide.highReg);
1073 storeValueWide(cUnit, rlDest, retLocWide);
1074 oatFreeTemp(cUnit, retLocWide.lowReg);
1075 oatFreeTemp(cUnit, retLocWide.highReg);
1076 break;
1077
1078 case OP_MOVE_RESULT:
1079 case OP_MOVE_RESULT_OBJECT:
1080 if (mir->OptimizationFlags & MIR_INLINED)
1081 break; // Nop - combined w/ previous invoke
1082 /* See comment for OP_MOVE_RESULT_WIDE */
1083 assert(retLoc.lowReg == r0);
1084 oatLockTemp(cUnit, retLoc.lowReg);
1085 storeValue(cUnit, rlDest, retLoc);
1086 oatFreeTemp(cUnit, retLoc.lowReg);
1087 break;
1088
1089 case OP_MOVE:
1090 case OP_MOVE_OBJECT:
1091 case OP_MOVE_16:
1092 case OP_MOVE_OBJECT_16:
1093 case OP_MOVE_FROM16:
1094 case OP_MOVE_OBJECT_FROM16:
1095 storeValue(cUnit, rlDest, rlSrc[0]);
1096 break;
1097
1098 case OP_MOVE_WIDE:
1099 case OP_MOVE_WIDE_16:
1100 case OP_MOVE_WIDE_FROM16:
1101 storeValueWide(cUnit, rlDest, rlSrc[0]);
1102 break;
1103
1104 case OP_CONST:
1105 case OP_CONST_4:
1106 case OP_CONST_16:
1107 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1108 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1109 storeValue(cUnit, rlDest, rlResult);
1110 break;
1111
1112 case OP_CONST_HIGH16:
1113 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1114 loadConstantNoClobber(cUnit, rlResult.lowReg,
1115 mir->dalvikInsn.vB << 16);
1116 storeValue(cUnit, rlDest, rlResult);
1117 break;
1118
1119 case OP_CONST_WIDE_16:
1120 case OP_CONST_WIDE_32:
1121 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1122 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1123 //TUNING: do high separately to avoid load dependency
1124 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
1125 storeValueWide(cUnit, rlDest, rlResult);
1126 break;
1127
1128 case OP_CONST_WIDE:
1129 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1130 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
buzbee54330722011-08-23 16:46:55 -07001131 mir->dalvikInsn.vB_wide & 0xffffffff,
1132 (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
buzbee3ea4ec52011-08-22 17:37:19 -07001133 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001134 break;
1135
1136 case OP_CONST_WIDE_HIGH16:
1137 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1138 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
1139 0, mir->dalvikInsn.vB << 16);
buzbee7b1b86d2011-08-26 18:59:10 -07001140 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001141 break;
1142
1143 case OP_MONITOR_ENTER:
1144 genMonitorEnter(cUnit, mir, rlSrc[0]);
1145 break;
1146
1147 case OP_MONITOR_EXIT:
1148 genMonitorExit(cUnit, mir, rlSrc[0]);
1149 break;
1150
1151 case OP_CHECK_CAST:
1152 genCheckCast(cUnit, mir, rlSrc[0]);
1153 break;
1154
1155 case OP_INSTANCE_OF:
1156 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
1157 break;
1158
1159 case OP_NEW_INSTANCE:
1160 genNewInstance(cUnit, mir, rlDest);
1161 break;
1162
1163 case OP_THROW:
1164 genThrow(cUnit, mir, rlSrc[0]);
1165 break;
1166
1167 case OP_ARRAY_LENGTH:
1168 int lenOffset;
buzbeec143c552011-08-20 17:38:58 -07001169 lenOffset = Array::LengthOffset().Int32Value();
buzbee7b1b86d2011-08-26 18:59:10 -07001170 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
buzbee67bf8852011-08-17 17:51:35 -07001171 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg,
1172 mir->offset, NULL);
1173 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1174 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
1175 rlResult.lowReg);
1176 storeValue(cUnit, rlDest, rlResult);
1177 break;
1178
1179 case OP_CONST_STRING:
1180 case OP_CONST_STRING_JUMBO:
1181 genConstString(cUnit, mir, rlDest, rlSrc[0]);
1182 break;
1183
1184 case OP_CONST_CLASS:
1185 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
1186 break;
1187
1188 case OP_FILL_ARRAY_DATA:
1189 genFillArrayData(cUnit, mir, rlSrc[0]);
1190 break;
1191
1192 case OP_FILLED_NEW_ARRAY:
1193 genFilledNewArray(cUnit, mir, false /* not range */);
1194 break;
1195
1196 case OP_FILLED_NEW_ARRAY_RANGE:
1197 genFilledNewArray(cUnit, mir, true /* range */);
1198 break;
1199
1200 case OP_NEW_ARRAY:
1201 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
1202 break;
1203
1204 case OP_GOTO:
1205 case OP_GOTO_16:
1206 case OP_GOTO_32:
1207 // TUNING: add MIR flag to disable when unnecessary
1208 bool backwardBranch;
1209 backwardBranch = (bb->taken->startOffset <= mir->offset);
1210 if (backwardBranch) {
1211 genSuspendPoll(cUnit, mir);
1212 }
1213 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1214 break;
1215
1216 case OP_PACKED_SWITCH:
1217 genPackedSwitch(cUnit, mir, rlSrc[0]);
1218 break;
1219
1220 case OP_SPARSE_SWITCH:
1221 genSparseSwitch(cUnit, mir, rlSrc[0]);
1222 break;
1223
1224 case OP_CMPL_FLOAT:
1225 case OP_CMPG_FLOAT:
1226 case OP_CMPL_DOUBLE:
1227 case OP_CMPG_DOUBLE:
1228 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1229 break;
1230
1231 case OP_CMP_LONG:
1232 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1233 break;
1234
1235 case OP_IF_EQ:
1236 case OP_IF_NE:
1237 case OP_IF_LT:
1238 case OP_IF_GE:
1239 case OP_IF_GT:
1240 case OP_IF_LE: {
1241 bool backwardBranch;
1242 ArmConditionCode cond;
1243 backwardBranch = (bb->taken->startOffset <= mir->offset);
1244 if (backwardBranch) {
1245 genSuspendPoll(cUnit, mir);
1246 }
1247 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1248 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1249 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1250 switch(opcode) {
1251 case OP_IF_EQ:
1252 cond = kArmCondEq;
1253 break;
1254 case OP_IF_NE:
1255 cond = kArmCondNe;
1256 break;
1257 case OP_IF_LT:
1258 cond = kArmCondLt;
1259 break;
1260 case OP_IF_GE:
1261 cond = kArmCondGe;
1262 break;
1263 case OP_IF_GT:
1264 cond = kArmCondGt;
1265 break;
1266 case OP_IF_LE:
1267 cond = kArmCondLe;
1268 break;
1269 default:
1270 cond = (ArmConditionCode)0;
1271 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1272 }
1273 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1274 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1275 break;
1276 }
1277
1278 case OP_IF_EQZ:
1279 case OP_IF_NEZ:
1280 case OP_IF_LTZ:
1281 case OP_IF_GEZ:
1282 case OP_IF_GTZ:
1283 case OP_IF_LEZ: {
1284 bool backwardBranch;
1285 ArmConditionCode cond;
1286 backwardBranch = (bb->taken->startOffset <= mir->offset);
1287 if (backwardBranch) {
1288 genSuspendPoll(cUnit, mir);
1289 }
1290 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1291 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1292 switch(opcode) {
1293 case OP_IF_EQZ:
1294 cond = kArmCondEq;
1295 break;
1296 case OP_IF_NEZ:
1297 cond = kArmCondNe;
1298 break;
1299 case OP_IF_LTZ:
1300 cond = kArmCondLt;
1301 break;
1302 case OP_IF_GEZ:
1303 cond = kArmCondGe;
1304 break;
1305 case OP_IF_GTZ:
1306 cond = kArmCondGt;
1307 break;
1308 case OP_IF_LEZ:
1309 cond = kArmCondLe;
1310 break;
1311 default:
1312 cond = (ArmConditionCode)0;
1313 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1314 }
1315 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1316 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1317 break;
1318 }
1319
1320 case OP_AGET_WIDE:
1321 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1322 break;
1323 case OP_AGET:
1324 case OP_AGET_OBJECT:
1325 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1326 break;
1327 case OP_AGET_BOOLEAN:
1328 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1329 rlDest, 0);
1330 break;
1331 case OP_AGET_BYTE:
1332 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1333 break;
1334 case OP_AGET_CHAR:
1335 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1336 rlDest, 1);
1337 break;
1338 case OP_AGET_SHORT:
1339 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1340 break;
1341 case OP_APUT_WIDE:
1342 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1343 break;
1344 case OP_APUT:
1345 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1346 break;
1347 case OP_APUT_OBJECT:
buzbee1b4c8592011-08-31 10:43:51 -07001348 genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
buzbee67bf8852011-08-17 17:51:35 -07001349 break;
1350 case OP_APUT_SHORT:
1351 case OP_APUT_CHAR:
1352 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1353 rlSrc[0], 1);
1354 break;
1355 case OP_APUT_BYTE:
1356 case OP_APUT_BOOLEAN:
1357 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1358 rlSrc[0], 0);
1359 break;
1360
1361 case OP_IGET_WIDE:
1362 case OP_IGET_WIDE_VOLATILE:
1363 genIGetWideX(cUnit, mir, rlDest, rlSrc[0]);
1364 break;
1365
1366 case OP_IGET:
1367 case OP_IGET_VOLATILE:
1368 case OP_IGET_OBJECT:
1369 case OP_IGET_OBJECT_VOLATILE:
1370 genIGetX(cUnit, mir, kWord, rlDest, rlSrc[0]);
1371 break;
1372
1373 case OP_IGET_BOOLEAN:
1374 case OP_IGET_BYTE:
1375 genIGetX(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
1376 break;
1377
1378 case OP_IGET_CHAR:
1379 genIGetX(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
1380 break;
1381
1382 case OP_IGET_SHORT:
1383 genIGetX(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
1384 break;
1385
1386 case OP_IPUT_WIDE:
1387 case OP_IPUT_WIDE_VOLATILE:
1388 genIPutWideX(cUnit, mir, rlSrc[0], rlSrc[1]);
1389 break;
1390
1391 case OP_IPUT_OBJECT:
1392 case OP_IPUT_OBJECT_VOLATILE:
1393 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
1394 break;
1395
1396 case OP_IPUT:
1397 case OP_IPUT_VOLATILE:
1398 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
1399 break;
1400
1401 case OP_IPUT_BOOLEAN:
1402 case OP_IPUT_BYTE:
1403 genIPutX(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
1404 break;
1405
1406 case OP_IPUT_CHAR:
1407 genIPutX(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
1408 break;
1409
1410 case OP_IPUT_SHORT:
1411 genIPutX(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
1412 break;
1413
1414 case OP_SGET:
1415 case OP_SGET_OBJECT:
1416 case OP_SGET_BOOLEAN:
1417 case OP_SGET_BYTE:
1418 case OP_SGET_CHAR:
1419 case OP_SGET_SHORT:
1420 genSget(cUnit, mir, rlResult, rlDest);
1421 break;
1422
1423 case OP_SGET_WIDE:
1424 genSgetWide(cUnit, mir, rlResult, rlDest);
1425 break;
1426
1427 case OP_SPUT:
1428 case OP_SPUT_OBJECT:
1429 case OP_SPUT_BOOLEAN:
1430 case OP_SPUT_BYTE:
1431 case OP_SPUT_CHAR:
1432 case OP_SPUT_SHORT:
1433 genSput(cUnit, mir, rlSrc[0]);
1434 break;
1435
1436 case OP_SPUT_WIDE:
1437 genSputWide(cUnit, mir, rlSrc[0]);
1438 break;
1439
1440 case OP_INVOKE_STATIC_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001441 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1442 true /*range*/);
1443 break;
buzbee67bf8852011-08-17 17:51:35 -07001444 case OP_INVOKE_STATIC:
buzbee561227c2011-09-02 15:28:19 -07001445 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1446 false /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001447 break;
1448
1449 case OP_INVOKE_DIRECT:
buzbee561227c2011-09-02 15:28:19 -07001450 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1451 false /*range*/);
1452 break;
buzbee67bf8852011-08-17 17:51:35 -07001453 case OP_INVOKE_DIRECT_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001454 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1455 true /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001456 break;
1457
1458 case OP_INVOKE_VIRTUAL:
1459 case OP_INVOKE_VIRTUAL_RANGE:
1460 genInvokeVirtual(cUnit, mir);
1461 break;
1462
1463 case OP_INVOKE_SUPER:
1464 case OP_INVOKE_SUPER_RANGE:
1465 genInvokeSuper(cUnit, mir);
1466 break;
1467
1468 case OP_INVOKE_INTERFACE:
1469 case OP_INVOKE_INTERFACE_RANGE:
1470 genInvokeInterface(cUnit, mir);
1471 break;
1472
1473 case OP_NEG_INT:
1474 case OP_NOT_INT:
1475 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1476 break;
1477
1478 case OP_NEG_LONG:
1479 case OP_NOT_LONG:
1480 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1481 break;
1482
1483 case OP_NEG_FLOAT:
1484 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1485 break;
1486
1487 case OP_NEG_DOUBLE:
1488 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1489 break;
1490
1491 case OP_INT_TO_LONG:
1492 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1493 if (rlSrc[0].location == kLocPhysReg) {
1494 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1495 } else {
1496 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1497 }
1498 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1499 rlResult.lowReg, 31);
1500 storeValueWide(cUnit, rlDest, rlResult);
1501 break;
1502
1503 case OP_LONG_TO_INT:
1504 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1505 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1506 storeValue(cUnit, rlDest, rlSrc[0]);
1507 break;
1508
1509 case OP_INT_TO_BYTE:
1510 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1511 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1512 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1513 storeValue(cUnit, rlDest, rlResult);
1514 break;
1515
1516 case OP_INT_TO_SHORT:
1517 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1518 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1519 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1520 storeValue(cUnit, rlDest, rlResult);
1521 break;
1522
1523 case OP_INT_TO_CHAR:
1524 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1525 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1526 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1527 storeValue(cUnit, rlDest, rlResult);
1528 break;
1529
1530 case OP_INT_TO_FLOAT:
1531 case OP_INT_TO_DOUBLE:
1532 case OP_LONG_TO_FLOAT:
1533 case OP_LONG_TO_DOUBLE:
1534 case OP_FLOAT_TO_INT:
1535 case OP_FLOAT_TO_LONG:
1536 case OP_FLOAT_TO_DOUBLE:
1537 case OP_DOUBLE_TO_INT:
1538 case OP_DOUBLE_TO_LONG:
1539 case OP_DOUBLE_TO_FLOAT:
1540 genConversion(cUnit, mir);
1541 break;
1542
1543 case OP_ADD_INT:
1544 case OP_SUB_INT:
1545 case OP_MUL_INT:
1546 case OP_DIV_INT:
1547 case OP_REM_INT:
1548 case OP_AND_INT:
1549 case OP_OR_INT:
1550 case OP_XOR_INT:
1551 case OP_SHL_INT:
1552 case OP_SHR_INT:
1553 case OP_USHR_INT:
1554 case OP_ADD_INT_2ADDR:
1555 case OP_SUB_INT_2ADDR:
1556 case OP_MUL_INT_2ADDR:
1557 case OP_DIV_INT_2ADDR:
1558 case OP_REM_INT_2ADDR:
1559 case OP_AND_INT_2ADDR:
1560 case OP_OR_INT_2ADDR:
1561 case OP_XOR_INT_2ADDR:
1562 case OP_SHL_INT_2ADDR:
1563 case OP_SHR_INT_2ADDR:
1564 case OP_USHR_INT_2ADDR:
1565 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1566 break;
1567
1568 case OP_ADD_LONG:
1569 case OP_SUB_LONG:
1570 case OP_MUL_LONG:
1571 case OP_DIV_LONG:
1572 case OP_REM_LONG:
1573 case OP_AND_LONG:
1574 case OP_OR_LONG:
1575 case OP_XOR_LONG:
1576 case OP_ADD_LONG_2ADDR:
1577 case OP_SUB_LONG_2ADDR:
1578 case OP_MUL_LONG_2ADDR:
1579 case OP_DIV_LONG_2ADDR:
1580 case OP_REM_LONG_2ADDR:
1581 case OP_AND_LONG_2ADDR:
1582 case OP_OR_LONG_2ADDR:
1583 case OP_XOR_LONG_2ADDR:
1584 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1585 break;
1586
buzbee67bf8852011-08-17 17:51:35 -07001587 case OP_SHL_LONG:
1588 case OP_SHR_LONG:
1589 case OP_USHR_LONG:
buzbeee6d61962011-08-27 11:58:19 -07001590 case OP_SHL_LONG_2ADDR:
1591 case OP_SHR_LONG_2ADDR:
1592 case OP_USHR_LONG_2ADDR:
buzbee67bf8852011-08-17 17:51:35 -07001593 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1594 break;
1595
1596 case OP_ADD_FLOAT:
1597 case OP_SUB_FLOAT:
1598 case OP_MUL_FLOAT:
1599 case OP_DIV_FLOAT:
1600 case OP_REM_FLOAT:
1601 case OP_ADD_FLOAT_2ADDR:
1602 case OP_SUB_FLOAT_2ADDR:
1603 case OP_MUL_FLOAT_2ADDR:
1604 case OP_DIV_FLOAT_2ADDR:
1605 case OP_REM_FLOAT_2ADDR:
1606 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1607 break;
1608
1609 case OP_ADD_DOUBLE:
1610 case OP_SUB_DOUBLE:
1611 case OP_MUL_DOUBLE:
1612 case OP_DIV_DOUBLE:
1613 case OP_REM_DOUBLE:
1614 case OP_ADD_DOUBLE_2ADDR:
1615 case OP_SUB_DOUBLE_2ADDR:
1616 case OP_MUL_DOUBLE_2ADDR:
1617 case OP_DIV_DOUBLE_2ADDR:
1618 case OP_REM_DOUBLE_2ADDR:
1619 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1620 break;
1621
1622 case OP_RSUB_INT:
1623 case OP_ADD_INT_LIT16:
1624 case OP_MUL_INT_LIT16:
1625 case OP_DIV_INT_LIT16:
1626 case OP_REM_INT_LIT16:
1627 case OP_AND_INT_LIT16:
1628 case OP_OR_INT_LIT16:
1629 case OP_XOR_INT_LIT16:
1630 case OP_ADD_INT_LIT8:
1631 case OP_RSUB_INT_LIT8:
1632 case OP_MUL_INT_LIT8:
1633 case OP_DIV_INT_LIT8:
1634 case OP_REM_INT_LIT8:
1635 case OP_AND_INT_LIT8:
1636 case OP_OR_INT_LIT8:
1637 case OP_XOR_INT_LIT8:
1638 case OP_SHL_INT_LIT8:
1639 case OP_SHR_INT_LIT8:
1640 case OP_USHR_INT_LIT8:
1641 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1642 break;
1643
1644 default:
1645 res = true;
1646 }
1647 return res;
1648}
1649
1650static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1651 "kMirOpPhi",
1652 "kMirOpNullNRangeUpCheck",
1653 "kMirOpNullNRangeDownCheck",
1654 "kMirOpLowerBound",
1655 "kMirOpPunt",
1656 "kMirOpCheckInlinePrediction",
1657};
1658
1659/* Extended MIR instructions like PHI */
1660static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1661{
1662 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1663 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1664 strcpy(msg, extendedMIROpNames[opOffset]);
1665 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1666
1667 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1668 case kMirOpPhi: {
1669 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1670 op->flags.isNop = true;
1671 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1672 break;
1673 }
1674 default:
1675 break;
1676 }
1677}
1678
1679/* If there are any ins passed in registers that have not been promoted
1680 * to a callee-save register, flush them to the frame.
buzbeedfd3d702011-08-28 12:56:51 -07001681 * Note: at this pointCopy any ins that are passed in register to their
1682 * home location */
buzbee67bf8852011-08-17 17:51:35 -07001683static void flushIns(CompilationUnit* cUnit)
1684{
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001685 if (cUnit->method->NumIns() == 0)
buzbee67bf8852011-08-17 17:51:35 -07001686 return;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001687 int inRegs = (cUnit->method->NumIns() > 2) ? 3
1688 : cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001689 int startReg = r1;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001690 int startLoc = cUnit->method->NumRegisters() -
1691 cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001692 for (int i = 0; i < inRegs; i++) {
1693 RegLocation loc = cUnit->regLocation[startLoc + i];
buzbeedfd3d702011-08-28 12:56:51 -07001694 //TUNING: be smarter about flushing ins to frame
1695 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
buzbee67bf8852011-08-17 17:51:35 -07001696 if (loc.location == kLocPhysReg) {
1697 genRegCopy(cUnit, loc.lowReg, startReg + i);
buzbee67bf8852011-08-17 17:51:35 -07001698 }
1699 }
1700
1701 // Handle special case of wide argument half in regs, half in frame
1702 if (inRegs == 3) {
1703 RegLocation loc = cUnit->regLocation[startLoc + 2];
1704 if (loc.wide && loc.location == kLocPhysReg) {
1705 // Load the other half of the arg into the promoted pair
buzbee561227c2011-09-02 15:28:19 -07001706 loadWordDisp(cUnit, rSP, loc.spOffset + 4, loc.highReg);
buzbee67bf8852011-08-17 17:51:35 -07001707 inRegs++;
1708 }
1709 }
1710
1711 // Now, do initial assignment of all promoted arguments passed in frame
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001712 for (int i = inRegs; i < cUnit->method->NumIns();) {
buzbee67bf8852011-08-17 17:51:35 -07001713 RegLocation loc = cUnit->regLocation[startLoc + i];
1714 if (loc.fpLocation == kLocPhysReg) {
1715 loc.location = kLocPhysReg;
1716 loc.fp = true;
1717 loc.lowReg = loc.fpLowReg;
1718 loc.highReg = loc.fpHighReg;
1719 }
1720 if (loc.location == kLocPhysReg) {
1721 if (loc.wide) {
1722 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1723 loc.lowReg, loc.highReg, INVALID_SREG);
1724 i++;
1725 } else {
buzbee561227c2011-09-02 15:28:19 -07001726 loadWordDisp(cUnit, rSP, loc.spOffset, loc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -07001727 }
1728 }
1729 i++;
1730 }
1731}
1732
1733/* Handle the content in each basic block */
1734static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1735{
1736 MIR* mir;
1737 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1738 int blockId = bb->id;
1739
1740 cUnit->curBlock = bb;
1741 labelList[blockId].operands[0] = bb->startOffset;
1742
1743 /* Insert the block label */
1744 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1745 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1746
1747 oatClobberAllRegs(cUnit);
1748 oatResetNullCheck(cUnit);
1749
1750 ArmLIR* headLIR = NULL;
1751
1752 if (bb->blockType == kEntryBlock) {
1753 /*
1754 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1755 * mechanism know so it doesn't try to use any of them when
1756 * expanding the frame or flushing. This leaves the utility
1757 * code with a single temp: r12. This should be enough.
1758 */
1759 oatLockTemp(cUnit, r0);
1760 oatLockTemp(cUnit, r1);
1761 oatLockTemp(cUnit, r2);
1762 oatLockTemp(cUnit, r3);
1763 newLIR0(cUnit, kArmPseudoMethodEntry);
1764 /* Spill core callee saves */
1765 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1766 /* Need to spill any FP regs? */
1767 if (cUnit->numFPSpills) {
1768 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1769 }
1770 opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1771 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1772 flushIns(cUnit);
1773 oatFreeTemp(cUnit, r0);
1774 oatFreeTemp(cUnit, r1);
1775 oatFreeTemp(cUnit, r2);
1776 oatFreeTemp(cUnit, r3);
1777 } else if (bb->blockType == kExitBlock) {
1778 newLIR0(cUnit, kArmPseudoMethodExit);
1779 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1780 /* Need to restore any FP callee saves? */
1781 if (cUnit->numFPSpills) {
1782 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1783 }
1784 if (cUnit->coreSpillMask & (1 << rLR)) {
1785 /* Unspill rLR to rPC */
1786 cUnit->coreSpillMask &= ~(1 << rLR);
1787 cUnit->coreSpillMask |= (1 << rPC);
1788 }
1789 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1790 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1791 /* We didn't pop to rPC, so must do a bv rLR */
1792 newLIR1(cUnit, kThumbBx, rLR);
1793 }
1794 }
1795
1796 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1797
1798 oatResetRegPool(cUnit);
1799 if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
1800 oatClobberAllRegs(cUnit);
1801 }
1802
1803 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1804 oatResetDefTracking(cUnit);
1805 }
1806
1807 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1808 handleExtendedMethodMIR(cUnit, mir);
1809 continue;
1810 }
1811
1812 cUnit->currentDalvikOffset = mir->offset;
1813
1814 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1815 InstructionFormat dalvikFormat =
1816 dexGetFormatFromOpcode(dalvikOpcode);
1817
1818 ArmLIR* boundaryLIR;
1819
1820 /* Mark the beginning of a Dalvik instruction for line tracking */
1821 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1822 (int) oatGetDalvikDisassembly(
1823 &mir->dalvikInsn, ""));
1824 /* Remember the first LIR for this block */
1825 if (headLIR == NULL) {
1826 headLIR = boundaryLIR;
1827 /* Set the first boundaryLIR as a scheduling barrier */
1828 headLIR->defMask = ENCODE_ALL;
1829 }
1830
1831 /* Don't generate the SSA annotation unless verbose mode is on */
1832 if (cUnit->printMe && mir->ssaRep) {
1833 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1834 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1835 }
1836
1837 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1838
1839 if (notHandled) {
1840 char buf[100];
1841 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1842 mir->offset,
1843 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1844 dalvikFormat);
1845 LOG(FATAL) << buf;
1846 }
1847 }
1848
1849 if (headLIR) {
1850 /*
1851 * Eliminate redundant loads/stores and delay stores into later
1852 * slots
1853 */
1854 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1855 cUnit->lastLIRInsn);
1856
1857 /*
1858 * Generate an unconditional branch to the fallthrough block.
1859 */
1860 if (bb->fallThrough) {
1861 genUnconditionalBranch(cUnit,
1862 &labelList[bb->fallThrough->id]);
1863 }
1864 }
1865 return false;
1866}
1867
1868/*
1869 * Nop any unconditional branches that go to the next instruction.
1870 * Note: new redundant branches may be inserted later, and we'll
1871 * use a check in final instruction assembly to nop those out.
1872 */
1873void removeRedundantBranches(CompilationUnit* cUnit)
1874{
1875 ArmLIR* thisLIR;
1876
1877 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1878 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1879 thisLIR = NEXT_LIR(thisLIR)) {
1880
1881 /* Branch to the next instruction */
1882 if ((thisLIR->opcode == kThumbBUncond) ||
1883 (thisLIR->opcode == kThumb2BUncond)) {
1884 ArmLIR* nextLIR = thisLIR;
1885
1886 while (true) {
1887 nextLIR = NEXT_LIR(nextLIR);
1888
1889 /*
1890 * Is the branch target the next instruction?
1891 */
1892 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1893 thisLIR->flags.isNop = true;
1894 break;
1895 }
1896
1897 /*
1898 * Found real useful stuff between the branch and the target.
1899 * Need to explicitly check the lastLIRInsn here because it
1900 * might be the last real instruction.
1901 */
1902 if (!isPseudoOpcode(nextLIR->opcode) ||
1903 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1904 break;
1905 }
1906 }
1907 }
1908}
1909
1910void oatMethodMIR2LIR(CompilationUnit* cUnit)
1911{
1912 /* Used to hold the labels of each block */
1913 cUnit->blockLabelList =
1914 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
1915
1916 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
1917 kPreOrderDFSTraversal, false /* Iterative */);
1918 removeRedundantBranches(cUnit);
1919}
1920
1921/* Common initialization routine for an architecture family */
1922bool oatArchInit()
1923{
1924 int i;
1925
1926 for (i = 0; i < kArmLast; i++) {
1927 if (EncodingMap[i].opcode != i) {
1928 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1929 " is wrong: expecting " << i << ", seeing " <<
1930 (int)EncodingMap[i].opcode;
1931 }
1932 }
1933
1934 return oatArchVariantInit();
1935}
1936
1937/* Needed by the Assembler */
1938void oatSetupResourceMasks(ArmLIR* lir)
1939{
1940 setupResourceMasks(lir);
1941}
1942
1943/* Needed by the ld/st optmizatons */
1944ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
1945{
1946 return genRegCopyNoInsert(cUnit, rDest, rSrc);
1947}
1948
1949/* Needed by the register allocator */
1950ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
1951{
1952 return genRegCopy(cUnit, rDest, rSrc);
1953}
1954
1955/* Needed by the register allocator */
1956void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
1957 int srcLo, int srcHi)
1958{
1959 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
1960}
1961
1962void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
1963 int displacement, int rSrc, OpSize size)
1964{
1965 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
1966}
1967
1968void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
1969 int displacement, int rSrcLo, int rSrcHi)
1970{
1971 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
1972}