blob: 81eb24cbcbc8ee1a79dd79c885fcbbfbf749194c [file] [log] [blame]
buzbee67bf8852011-08-17 17:51:35 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
buzbee67bf8852011-08-17 17:51:35 -070017static const RegLocation badLoc = {kLocDalvikFrame, 0, 0, INVALID_REG,
18 INVALID_REG, INVALID_SREG, 0,
19 kLocDalvikFrame, INVALID_REG, INVALID_REG,
20 INVALID_OFFSET};
21static const RegLocation retLoc = LOC_DALVIK_RETURN_VAL;
22static const RegLocation retLocWide = LOC_DALVIK_RETURN_VAL_WIDE;
23
buzbeedfd3d702011-08-28 12:56:51 -070024/*
25 * Let helper function take care of everything. Will call
26 * Array::AllocFromCode(type_idx, method, count);
27 * Note: AllocFromCode will handle checks for errNegativeArraySize.
28 */
buzbee67bf8852011-08-17 17:51:35 -070029static void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
30 RegLocation rlSrc)
31{
buzbeedfd3d702011-08-28 12:56:51 -070032 oatFlushAllRegs(cUnit); /* Everything to home location */
33 loadWordDisp(cUnit, rSELF,
34 OFFSETOF_MEMBER(Thread, pAllocFromCode), rLR);
35 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
36 loadConstant(cUnit, r0, mir->dalvikInsn.vC); // arg0 <- type_id
37 loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
38 opReg(cUnit, kOpBlx, rLR);
39 oatClobberCallRegs(cUnit);
40 RegLocation rlResult = oatGetReturn(cUnit);
41 storeValue(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -070042}
43
44/*
45 * Similar to genNewArray, but with post-allocation initialization.
46 * Verifier guarantees we're dealing with an array class. Current
47 * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
48 * Current code also throws internal unimp if not 'L', '[' or 'I'.
49 */
50static void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
51{
52 DecodedInstruction* dInsn = &mir->dalvikInsn;
53 int elems;
buzbeedfd3d702011-08-28 12:56:51 -070054 int typeId;
buzbee67bf8852011-08-17 17:51:35 -070055 if (isRange) {
56 elems = dInsn->vA;
buzbeedfd3d702011-08-28 12:56:51 -070057 typeId = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -070058 } else {
59 elems = dInsn->vB;
buzbeedfd3d702011-08-28 12:56:51 -070060 typeId = dInsn->vC;
buzbee67bf8852011-08-17 17:51:35 -070061 }
buzbeedfd3d702011-08-28 12:56:51 -070062 oatFlushAllRegs(cUnit); /* Everything to home location */
buzbeedfd3d702011-08-28 12:56:51 -070063 loadWordDisp(cUnit, rSELF,
buzbee1da522d2011-09-04 11:22:20 -070064 OFFSETOF_MEMBER(Thread, pCheckAndAllocFromCode), rLR);
buzbeedfd3d702011-08-28 12:56:51 -070065 loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
66 loadConstant(cUnit, r0, typeId); // arg0 <- type_id
67 loadConstant(cUnit, r2, elems); // arg2 <- count
68 opReg(cUnit, kOpBlx, rLR);
buzbee67bf8852011-08-17 17:51:35 -070069 /*
buzbeedfd3d702011-08-28 12:56:51 -070070 * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
71 * return region. Because AllocFromCode placed the new array
72 * in r0, we'll just lock it into place. When debugger support is
73 * added, it may be necessary to additionally copy all return
74 * values to a home location in thread-local storage
buzbee67bf8852011-08-17 17:51:35 -070075 */
buzbee67bf8852011-08-17 17:51:35 -070076 oatLockTemp(cUnit, r0);
buzbeedfd3d702011-08-28 12:56:51 -070077
buzbee67bf8852011-08-17 17:51:35 -070078 // Having a range of 0 is legal
79 if (isRange && (dInsn->vA > 0)) {
80 /*
81 * Bit of ugliness here. We're going generate a mem copy loop
82 * on the register range, but it is possible that some regs
83 * in the range have been promoted. This is unlikely, but
84 * before generating the copy, we'll just force a flush
85 * of any regs in the source range that have been promoted to
86 * home location.
87 */
88 for (unsigned int i = 0; i < dInsn->vA; i++) {
89 RegLocation loc = oatUpdateLoc(cUnit,
90 oatGetSrc(cUnit, mir, i));
91 if (loc.location == kLocPhysReg) {
92 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
93 }
94 }
95 /*
96 * TUNING note: generated code here could be much improved, but
97 * this is an uncommon operation and isn't especially performance
98 * critical.
99 */
100 int rSrc = oatAllocTemp(cUnit);
101 int rDst = oatAllocTemp(cUnit);
102 int rIdx = oatAllocTemp(cUnit);
103 int rVal = rLR; // Using a lot of temps, rLR is known free here
104 // Set up source pointer
105 RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
106 opRegRegImm(cUnit, kOpAdd, rSrc, rSP, rlFirst.spOffset);
107 // Set up the target pointer
108 opRegRegImm(cUnit, kOpAdd, rDst, r0,
buzbeec143c552011-08-20 17:38:58 -0700109 Array::DataOffset().Int32Value());
buzbee67bf8852011-08-17 17:51:35 -0700110 // Set up the loop counter (known to be > 0)
111 loadConstant(cUnit, rIdx, dInsn->vA);
112 // Generate the copy loop. Going backwards for convenience
113 ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
114 target->defMask = ENCODE_ALL;
115 // Copy next element
116 loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
117 storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
118 // Use setflags encoding here
119 newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
120 ArmLIR* branch = opCondBranch(cUnit, kArmCondNe);
121 branch->generic.target = (LIR*)target;
122 } else if (!isRange) {
123 // TUNING: interleave
124 for (unsigned int i = 0; i < dInsn->vA; i++) {
125 RegLocation rlArg = loadValue(cUnit,
126 oatGetSrc(cUnit, mir, i), kCoreReg);
buzbeec143c552011-08-20 17:38:58 -0700127 storeBaseDisp(cUnit, r0,
128 Array::DataOffset().Int32Value() +
buzbee67bf8852011-08-17 17:51:35 -0700129 i * 4, rlArg.lowReg, kWord);
130 // If the loadValue caused a temp to be allocated, free it
131 if (oatIsTemp(cUnit, rlArg.lowReg)) {
132 oatFreeTemp(cUnit, rlArg.lowReg);
133 }
134 }
135 }
136}
137
138static void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
139{
buzbeee1931742011-08-28 21:15:53 -0700140 bool isObject = ((mir->dalvikInsn.opcode == OP_SPUT_OBJECT) ||
141 (mir->dalvikInsn.opcode == OP_SPUT_OBJECT_VOLATILE));
buzbee1da522d2011-09-04 11:22:20 -0700142 int fieldIdx = mir->dalvikInsn.vB;
143 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
144 if (field == NULL) {
145 // Slow path
146 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
147 : OFFSETOF_MEMBER(Thread, pSet32Static);
buzbeee1931742011-08-28 21:15:53 -0700148 oatFlushAllRegs(cUnit);
149 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
150 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
151 loadCurrMethodDirect(cUnit, r1);
152 loadValueDirect(cUnit, rlSrc, r2);
153 opReg(cUnit, kOpBlx, rLR);
154 oatClobberCallRegs(cUnit);
155 } else {
buzbee1da522d2011-09-04 11:22:20 -0700156 // fast path
157 int fieldOffset = field->GetOffset().Int32Value();
158 art::ClassLinker* class_linker = art::Runtime::Current()->
159 GetClassLinker();
160 const art::DexFile& dex_file = class_linker->
161 FindDexFile(field->GetDeclaringClass()->GetDexCache());
162 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
163 int typeIdx = field_id.class_idx_;
164 // Using fixed register to sync with slow path
165 int rMethod = r1;
166 oatLockTemp(cUnit, rMethod);
167 loadCurrMethodDirect(cUnit, rMethod);
168 int rBase = r0;
169 oatLockTemp(cUnit, rBase);
170 loadWordDisp(cUnit, rMethod,
171 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
172 rBase);
173 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
174 sizeof(int32_t*)* typeIdx, rBase);
175 // TUNING: fast path should fall through
176 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
177 loadWordDisp(cUnit, rSELF,
178 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
179 loadConstant(cUnit, r0, typeIdx);
180 opReg(cUnit, kOpBlx, rLR);
181 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
182 skipTarget->defMask = ENCODE_ALL;
183 branchOver->generic.target = (LIR*)skipTarget;
184 rlSrc = oatGetSrc(cUnit, mir, 0);
185 rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
186 storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -0700187#if ANDROID_SMP != 0
buzbee1da522d2011-09-04 11:22:20 -0700188 if (field->IsVolatile()) {
189 oatGenMemBarrier(cUnit, kSY);
190 }
buzbee67bf8852011-08-17 17:51:35 -0700191#endif
buzbee1da522d2011-09-04 11:22:20 -0700192 if (isObject) {
193 markGCCard(cUnit, rlSrc.lowReg, rBase);
194 }
195 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700196 }
buzbee67bf8852011-08-17 17:51:35 -0700197}
198
199static void genSputWide(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc)
200{
buzbee1da522d2011-09-04 11:22:20 -0700201 int fieldIdx = mir->dalvikInsn.vB;
202 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
203 if (field == NULL) {
buzbeee1931742011-08-28 21:15:53 -0700204 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700205 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pSet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700206 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
207 loadCurrMethodDirect(cUnit, r1);
208 loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
209 opReg(cUnit, kOpBlx, rLR);
210 oatClobberCallRegs(cUnit);
211 } else {
buzbee1da522d2011-09-04 11:22:20 -0700212 // fast path
213 int fieldOffset = field->GetOffset().Int32Value();
214 art::ClassLinker* class_linker = art::Runtime::Current()->
215 GetClassLinker();
216 const art::DexFile& dex_file = class_linker->
217 FindDexFile(field->GetDeclaringClass()->GetDexCache());
218 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
219 int typeIdx = field_id.class_idx_;
220 // Using fixed register to sync with slow path
221 int rMethod = r1;
222 oatLockTemp(cUnit, rMethod);
223 loadCurrMethodDirect(cUnit, r1);
224 int rBase = r0;
225 oatLockTemp(cUnit, rBase);
226 loadWordDisp(cUnit, rMethod,
227 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
228 rBase);
229 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
230 sizeof(int32_t*)* typeIdx, rBase);
231 // TUNING: fast path should fall through
232 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
233 loadWordDisp(cUnit, rSELF,
234 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
235 loadConstant(cUnit, r0, typeIdx);
236 opReg(cUnit, kOpBlx, rLR);
237 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
238 skipTarget->defMask = ENCODE_ALL;
239 branchOver->generic.target = (LIR*)skipTarget;
240 rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
241 rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
242 storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
243 rlSrc.highReg);
244#if ANDROID_SMP != 0
245 if (field->IsVolatile()) {
246 oatGenMemBarrier(cUnit, kSY);
247 }
buzbeec143c552011-08-20 17:38:58 -0700248#endif
buzbee1da522d2011-09-04 11:22:20 -0700249 oatFreeTemp(cUnit, rBase);
buzbeee1931742011-08-28 21:15:53 -0700250 }
buzbee67bf8852011-08-17 17:51:35 -0700251}
252
253
buzbee67bf8852011-08-17 17:51:35 -0700254static void genSgetWide(CompilationUnit* cUnit, MIR* mir,
255 RegLocation rlResult, RegLocation rlDest)
256{
buzbee1da522d2011-09-04 11:22:20 -0700257 int fieldIdx = mir->dalvikInsn.vB;
258 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
259 if (field == NULL) {
buzbeee1931742011-08-28 21:15:53 -0700260 oatFlushAllRegs(cUnit);
buzbee1da522d2011-09-04 11:22:20 -0700261 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pGet64Static), rLR);
buzbeee1931742011-08-28 21:15:53 -0700262 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
263 loadCurrMethodDirect(cUnit, r1);
264 opReg(cUnit, kOpBlx, rLR);
265 RegLocation rlResult = oatGetReturnWide(cUnit);
266 storeValueWide(cUnit, rlDest, rlResult);
267 } else {
buzbee1da522d2011-09-04 11:22:20 -0700268 // Fast path
269 int fieldOffset = field->GetOffset().Int32Value();
270 art::ClassLinker* class_linker = art::Runtime::Current()->
271 GetClassLinker();
272 const art::DexFile& dex_file = class_linker->
273 FindDexFile(field->GetDeclaringClass()->GetDexCache());
274 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
275 int typeIdx = field_id.class_idx_;
276 // Using fixed register to sync with slow path
277 int rMethod = r1;
278 oatLockTemp(cUnit, rMethod);
279 loadCurrMethodDirect(cUnit, rMethod);
280 int rBase = r0;
281 oatLockTemp(cUnit, rBase);
282 loadWordDisp(cUnit, rMethod,
283 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
284 rBase);
285 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
286 sizeof(int32_t*)* typeIdx, rBase);
287 // TUNING: fast path should fall through
288 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
289 loadWordDisp(cUnit, rSELF,
290 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
291 loadConstant(cUnit, r0, typeIdx);
292 opReg(cUnit, kOpBlx, rLR);
293 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
294 skipTarget->defMask = ENCODE_ALL;
295 branchOver->generic.target = (LIR*)skipTarget;
296 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
297 RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
298#if ANDROID_SMP != 0
299 if (isVolatile) {
300 oatGenMemBarrier(cUnit, kSY);
301 }
buzbeec143c552011-08-20 17:38:58 -0700302#endif
buzbee1da522d2011-09-04 11:22:20 -0700303 loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
304 rlResult.highReg, INVALID_SREG);
305 oatFreeTemp(cUnit, rBase);
306 storeValueWide(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700307 }
buzbee67bf8852011-08-17 17:51:35 -0700308}
309
310static void genSget(CompilationUnit* cUnit, MIR* mir,
311 RegLocation rlResult, RegLocation rlDest)
312{
buzbee1da522d2011-09-04 11:22:20 -0700313 int fieldIdx = mir->dalvikInsn.vB;
314 Field* field = cUnit->method->GetDexCacheResolvedFields()->Get(fieldIdx);
buzbeee1931742011-08-28 21:15:53 -0700315 bool isObject = ((mir->dalvikInsn.opcode == OP_SGET_OBJECT) ||
316 (mir->dalvikInsn.opcode == OP_SGET_OBJECT_VOLATILE));
buzbee1da522d2011-09-04 11:22:20 -0700317 if (field == NULL) {
318 // Slow path
319 int funcOffset = isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
320 : OFFSETOF_MEMBER(Thread, pGet32Static);
buzbeee1931742011-08-28 21:15:53 -0700321 oatFlushAllRegs(cUnit);
322 loadWordDisp(cUnit, rSELF, funcOffset, rLR);
323 loadConstant(cUnit, r0, mir->dalvikInsn.vB);
324 loadCurrMethodDirect(cUnit, r1);
325 opReg(cUnit, kOpBlx, rLR);
326 RegLocation rlResult = oatGetReturn(cUnit);
327 storeValue(cUnit, rlDest, rlResult);
328 } else {
buzbee1da522d2011-09-04 11:22:20 -0700329 // Fast path
330 int fieldOffset = field->GetOffset().Int32Value();
331 art::ClassLinker* class_linker = art::Runtime::Current()->
332 GetClassLinker();
333 const art::DexFile& dex_file = class_linker->
334 FindDexFile(field->GetDeclaringClass()->GetDexCache());
335 const art::DexFile::FieldId& field_id = dex_file.GetFieldId(fieldIdx);
336 int typeIdx = field_id.class_idx_;
337 // Using fixed register to sync with slow path
338 int rMethod = r1;
339 oatLockTemp(cUnit, rMethod);
340 loadCurrMethodDirect(cUnit, rMethod);
341 int rBase = r0;
342 oatLockTemp(cUnit, rBase);
343 loadWordDisp(cUnit, rMethod,
344 Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
345 rBase);
346 loadWordDisp(cUnit, rBase, art::Array::DataOffset().Int32Value() +
347 sizeof(int32_t*)* typeIdx, rBase);
348 // TUNING: fast path should fall through
349 ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
350 loadWordDisp(cUnit, rSELF,
351 OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
352 loadConstant(cUnit, r0, typeIdx);
353 opReg(cUnit, kOpBlx, rLR);
354 ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
355 skipTarget->defMask = ENCODE_ALL;
356 branchOver->generic.target = (LIR*)skipTarget;
357 rlDest = oatGetDest(cUnit, mir, 0);
358 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
buzbee67bf8852011-08-17 17:51:35 -0700359#if ANDROID_SMP != 0
buzbee1da522d2011-09-04 11:22:20 -0700360 if (isVolatile) {
361 oatGenMemBarrier(cUnit, kSY);
362 }
buzbee67bf8852011-08-17 17:51:35 -0700363#endif
buzbee1da522d2011-09-04 11:22:20 -0700364 loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
365 oatFreeTemp(cUnit, rBase);
366 storeValue(cUnit, rlDest, rlResult);
buzbeee1931742011-08-28 21:15:53 -0700367 }
buzbee67bf8852011-08-17 17:51:35 -0700368}
369
buzbee561227c2011-09-02 15:28:19 -0700370typedef int (*NextCallInsn)(CompilationUnit*, MIR*, DecodedInstruction*, int,
371 ArmLIR*);
buzbee67bf8852011-08-17 17:51:35 -0700372
373/*
374 * Bit of a hack here - in leiu of a real scheduling pass,
375 * emit the next instruction in static & direct invoke sequences.
376 */
377static int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700378 DecodedInstruction* dInsn, int state,
379 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700380{
buzbee561227c2011-09-02 15:28:19 -0700381 DCHECK(rollback == NULL);
382 uint32_t idx = dInsn->vB;
buzbee67bf8852011-08-17 17:51:35 -0700383 switch(state) {
384 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700385 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700386 break;
buzbee561227c2011-09-02 15:28:19 -0700387 case 1: // Get method->code_and_direct_methods_
388 loadWordDisp(cUnit, r0,
389 Method::GetDexCacheCodeAndDirectMethodsOffset().Int32Value(),
390 r0);
buzbee67bf8852011-08-17 17:51:35 -0700391 break;
buzbee561227c2011-09-02 15:28:19 -0700392 case 2: // Grab target method* and target code_
393 loadWordDisp(cUnit, r0,
394 art::CodeAndDirectMethods::CodeOffsetInBytes(idx), rLR);
395 loadWordDisp(cUnit, r0,
396 art::CodeAndDirectMethods::MethodOffsetInBytes(idx), r0);
buzbeec5ef0462011-08-25 18:44:49 -0700397 break;
398 default:
399 return -1;
400 }
401 return state + 1;
402}
403
buzbee67bf8852011-08-17 17:51:35 -0700404/*
405 * Bit of a hack here - in leiu of a real scheduling pass,
406 * emit the next instruction in a virtual invoke sequence.
407 * We can use rLR as a temp prior to target address loading
408 * Note also that we'll load the first argument ("this") into
409 * r1 here rather than the standard loadArgRegs.
410 */
411static int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700412 DecodedInstruction* dInsn, int state,
413 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700414{
buzbee561227c2011-09-02 15:28:19 -0700415 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700416 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700417 /*
418 * This is the fast path in which the target virtual method is
419 * fully resolved at compile time.
420 */
421 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
422 Get(dInsn->vB);
423 CHECK(baseMethod != NULL);
424 uint32_t target_idx = baseMethod->GetMethodIndex();
buzbee67bf8852011-08-17 17:51:35 -0700425 switch(state) {
buzbee561227c2011-09-02 15:28:19 -0700426 case 0: // Get "this" [set r1]
buzbee67bf8852011-08-17 17:51:35 -0700427 rlArg = oatGetSrc(cUnit, mir, 0);
428 loadValueDirectFixed(cUnit, rlArg, r1);
429 break;
buzbee561227c2011-09-02 15:28:19 -0700430 case 1: // Is "this" null? [use r1]
431 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
432 // get this->klass_ [use r1, set rLR]
433 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700434 break;
buzbee561227c2011-09-02 15:28:19 -0700435 case 2: // Get this->klass_->vtable [usr rLR, set rLR]
436 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700437 break;
buzbee561227c2011-09-02 15:28:19 -0700438 case 3: // Get target method [use rLR, set r0]
439 loadWordDisp(cUnit, rLR, (target_idx * 4) +
440 art::Array::DataOffset().Int32Value(), r0);
441 break;
442 case 4: // Get the target compiled code address [uses r0, sets rLR]
443 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee67bf8852011-08-17 17:51:35 -0700444 break;
445 default:
446 return -1;
447 }
448 return state + 1;
449}
450
buzbee7b1b86d2011-08-26 18:59:10 -0700451static int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700452 DecodedInstruction* dInsn, int state,
453 ArmLIR* rollback)
buzbee7b1b86d2011-08-26 18:59:10 -0700454{
buzbee561227c2011-09-02 15:28:19 -0700455 DCHECK(rollback != NULL);
buzbee7b1b86d2011-08-26 18:59:10 -0700456 RegLocation rlArg;
buzbee561227c2011-09-02 15:28:19 -0700457 ArmLIR* skipBranch;
458 ArmLIR* skipTarget;
459 /*
460 * This handles the case in which the base method is not fully
461 * resolved at compile time. We must generate code to test
462 * for resolution a run time, bail to the slow path if not to
463 * fill in all the tables. In the latter case, we'll restart at
464 * at the beginning of the sequence.
465 */
buzbee7b1b86d2011-08-26 18:59:10 -0700466 switch(state) {
467 case 0: // Get the current Method* [sets r0]
buzbeedfd3d702011-08-28 12:56:51 -0700468 loadCurrMethodDirect(cUnit, r0);
buzbee7b1b86d2011-08-26 18:59:10 -0700469 break;
buzbee561227c2011-09-02 15:28:19 -0700470 case 1: // Get method->dex_cache_resolved_methods_
471 loadWordDisp(cUnit, r0,
472 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700473 break;
buzbee561227c2011-09-02 15:28:19 -0700474 case 2: // method->dex_cache_resolved_methods_->Get(method_idx)
475 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
476 art::Array::DataOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700477 break;
buzbee561227c2011-09-02 15:28:19 -0700478 case 3: // Resolved?
479 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
480 // Slowest path, bail to helper, rollback and retry
481 loadWordDisp(cUnit, rSELF,
482 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
483 loadConstant(cUnit, r1, dInsn->vB);
484 newLIR1(cUnit, kThumbBlxR, rLR);
485 genUnconditionalBranch(cUnit, rollback);
486 // Resume normal slow path
487 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
488 skipTarget->defMask = ENCODE_ALL;
489 skipBranch->generic.target = (LIR*)skipTarget;
buzbee4a3164f2011-09-03 11:25:10 -0700490 // Get base_method->method_index [usr rLR, set r0]
buzbee561227c2011-09-02 15:28:19 -0700491 loadBaseDisp(cUnit, mir, rLR,
492 Method::GetMethodIndexOffset().Int32Value(), r0,
493 kUnsignedHalf, INVALID_SREG);
buzbee7b1b86d2011-08-26 18:59:10 -0700494 // Load "this" [set r1]
495 rlArg = oatGetSrc(cUnit, mir, 0);
496 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee7b1b86d2011-08-26 18:59:10 -0700497 break;
498 case 4:
499 // Is "this" null? [use r1]
500 genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir->offset, NULL);
501 // get this->clazz [use r1, set rLR]
buzbee561227c2011-09-02 15:28:19 -0700502 loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700503 break;
buzbee561227c2011-09-02 15:28:19 -0700504 case 5:
505 // get this->klass_->vtable_ [usr rLR, set rLR]
506 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
507 DCHECK((art::Array::DataOffset().Int32Value() & 0x3) == 0);
508 // In load shadow fold vtable_ object header size into method_index_
509 opRegImm(cUnit, kOpAdd, r0,
510 art::Array::DataOffset().Int32Value() / 4);
511 // Get target Method*
512 loadBaseIndexed(cUnit, rLR, r0, r0, 2, kWord);
513 break;
514 case 6: // Get the target compiled code address [uses r0, sets rLR]
515 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
buzbee7b1b86d2011-08-26 18:59:10 -0700516 break;
517 default:
518 return -1;
519 }
520 return state + 1;
521}
522
buzbee67bf8852011-08-17 17:51:35 -0700523/* Load up to 3 arguments in r1..r3 */
524static int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
525 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700526 int *args, NextCallInsn nextCallInsn, ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700527{
528 for (int i = 0; i < 3; i++) {
529 if (args[i] != INVALID_REG) {
530 RegLocation rlArg = oatGetSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700531 // Arguments are treated as a series of untyped 32-bit values.
532 rlArg.wide = false;
buzbee67bf8852011-08-17 17:51:35 -0700533 loadValueDirectFixed(cUnit, rlArg, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700534 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700535 }
536 }
537 return callState;
538}
539
buzbee4a3164f2011-09-03 11:25:10 -0700540// Interleave launch code for INVOKE_INTERFACE.
buzbee67bf8852011-08-17 17:51:35 -0700541static int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700542 DecodedInstruction* dInsn, int state,
543 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700544{
buzbee67bf8852011-08-17 17:51:35 -0700545 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700546 case 0: // Load trampoline target
547 loadWordDisp(cUnit, rSELF,
548 OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline),
549 rLR);
550 // Load r0 with method index
551 loadConstant(cUnit, r0, dInsn->vB);
buzbee67bf8852011-08-17 17:51:35 -0700552 break;
buzbee67bf8852011-08-17 17:51:35 -0700553 default:
554 return -1;
555 }
556 return state + 1;
557}
558
buzbee67bf8852011-08-17 17:51:35 -0700559/*
560 * Interleave launch code for INVOKE_SUPER. See comments
561 * for nextVCallIns.
562 */
563static int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
buzbee561227c2011-09-02 15:28:19 -0700564 DecodedInstruction* dInsn, int state,
565 ArmLIR* rollback)
buzbee67bf8852011-08-17 17:51:35 -0700566{
buzbee4a3164f2011-09-03 11:25:10 -0700567 DCHECK(rollback == NULL);
buzbee67bf8852011-08-17 17:51:35 -0700568 RegLocation rlArg;
buzbee4a3164f2011-09-03 11:25:10 -0700569 /*
570 * This is the fast path in which the target virtual method is
571 * fully resolved at compile time. Note also that this path assumes
572 * that the check to verify that the target method index falls
573 * within the size of the super's vtable has been done at compile-time.
574 */
575 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
576 Get(dInsn->vB);
577 CHECK(baseMethod != NULL);
578 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
579 CHECK(superClass != NULL);
580 int32_t target_idx = baseMethod->GetMethodIndex();
581 CHECK(superClass->GetVTable()->GetLength() > target_idx);
582 Method* targetMethod = superClass->GetVTable()->Get(target_idx);
583 CHECK(targetMethod != NULL);
buzbee67bf8852011-08-17 17:51:35 -0700584 switch(state) {
buzbee4a3164f2011-09-03 11:25:10 -0700585 case 0: // Get current Method* [set r0]
buzbeedfd3d702011-08-28 12:56:51 -0700586 loadCurrMethodDirect(cUnit, r0);
buzbee67bf8852011-08-17 17:51:35 -0700587 // Load "this" [set r1]
588 rlArg = oatGetSrc(cUnit, mir, 0);
589 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee4a3164f2011-09-03 11:25:10 -0700590 // Get method->declaring_class_ [use r0, set rLR]
591 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
592 rLR);
buzbee67bf8852011-08-17 17:51:35 -0700593 // Is "this" null? [use r1]
594 genNullCheck(cUnit, oatSSASrc(mir,0), r1,
595 mir->offset, NULL);
buzbee4a3164f2011-09-03 11:25:10 -0700596 break;
597 case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
598 loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
599 rLR);
600 break;
601 case 2: // Get ...->super_class_->vtable [u/s rLR]
602 loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
603 break;
604 case 3: // Get target method [use rLR, set r0]
605 loadWordDisp(cUnit, rLR, (target_idx * 4) +
606 art::Array::DataOffset().Int32Value(), r0);
607 break;
608 case 4: // Get the target compiled code address [uses r0, sets rLR]
609 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
610 break;
buzbee67bf8852011-08-17 17:51:35 -0700611 default:
612 return -1;
613 }
buzbee4a3164f2011-09-03 11:25:10 -0700614 return state + 1;
615}
616
617/* Slow-path version of nextSuperCallInsn */
618static int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
619 DecodedInstruction* dInsn, int state,
620 ArmLIR* rollback)
621{
622 DCHECK(rollback != NULL);
623 RegLocation rlArg;
624 ArmLIR* skipBranch;
625 ArmLIR* skipTarget;
626 int tReg;
627 /*
628 * This handles the case in which the base method is not fully
629 * resolved at compile time. We must generate code to test
630 * for resolution a run time, bail to the slow path if not to
631 * fill in all the tables. In the latter case, we'll restart at
632 * at the beginning of the sequence.
633 */
634 switch(state) {
635 case 0: // Get the current Method* [sets r0]
636 loadCurrMethodDirect(cUnit, r0);
637 break;
638 case 1: // Get method->dex_cache_resolved_methods_ [usr r0, set rLR]
639 loadWordDisp(cUnit, r0,
640 Method::GetDexCacheResolvedMethodsOffset().Int32Value(), rLR);
641 break;
642 case 2: // method->dex_cache_resolved_methods_->Get(meth_idx) [u/s rLR]
643 loadWordDisp(cUnit, rLR, (dInsn->vB * 4) +
644 art::Array::DataOffset().Int32Value(), rLR);
645 break;
646 case 3: // Resolved?
647 skipBranch = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
648 // Slowest path, bail to helper, rollback and retry
649 loadWordDisp(cUnit, rSELF,
650 OFFSETOF_MEMBER(Thread, pResolveMethodFromCode), rLR);
651 loadConstant(cUnit, r1, dInsn->vB);
652 newLIR1(cUnit, kThumbBlxR, rLR);
653 genUnconditionalBranch(cUnit, rollback);
654 // Resume normal slow path
655 skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
656 skipTarget->defMask = ENCODE_ALL;
657 skipBranch->generic.target = (LIR*)skipTarget;
658 // Get base_method->method_index [usr rLR, set rLR]
659 loadBaseDisp(cUnit, mir, rLR,
660 Method::GetMethodIndexOffset().Int32Value(), rLR,
661 kUnsignedHalf, INVALID_SREG);
662 // Load "this" [set r1]
663 rlArg = oatGetSrc(cUnit, mir, 0);
664 loadValueDirectFixed(cUnit, rlArg, r1);
665 // Load curMethod->declaring_class_ [uses r0, sets r0]
666 loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
667 r0);
668 case 4: // Get method->declaring_class_->super_class [usr r0, set r0]
669 loadWordDisp(cUnit, r0, Class::SuperClassOffset().Int32Value(), r0);
670 break;
671 case 5: // Get ...->super_class_->vtable [u/s r0]
672 loadWordDisp(cUnit, r0, Class::VTableOffset().Int32Value(), r0);
673 // In load shadow fold vtable_ object header size into method_index_
674 opRegImm(cUnit, kOpAdd, rLR,
675 art::Array::DataOffset().Int32Value() / 4);
676 if (!(mir->OptimizationFlags & MIR_IGNORE_RANGE_CHECK)) {
677 // Range check, throw NSM on failure
678 tReg = oatAllocTemp(cUnit);
679 loadWordDisp(cUnit, r0, art::Array::LengthOffset().Int32Value(),
680 tReg);
681 genBoundsCheck(cUnit, tReg, rLR, mir->offset, NULL);
682 oatFreeTemp(cUnit, tReg);
683 }
684 // Get target Method*
685 loadBaseIndexed(cUnit, r0, r0, rLR, 2, kWord);
686 break;
687 case 6: // Get the target compiled code address [uses r0, sets rLR]
688 loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
689 break;
690 default:
691 return -1;
692 }
buzbee67bf8852011-08-17 17:51:35 -0700693 return state + 1;
694}
695
696/*
697 * Load up to 5 arguments, the first three of which will be in
698 * r1 .. r3. On entry r0 contains the current method pointer,
699 * and as part of the load sequence, it must be replaced with
700 * the target method pointer. Note, this may also be called
701 * for "range" variants if the number of arguments is 5 or fewer.
702 */
703static int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
704 DecodedInstruction* dInsn, int callState,
705 ArmLIR** pcrLabel, bool isRange,
buzbee1da522d2011-09-04 11:22:20 -0700706 NextCallInsn nextCallInsn, ArmLIR* rollback,
707 bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700708{
709 RegLocation rlArg;
710 int registerArgs[3];
711
712 /* If no arguments, just return */
713 if (dInsn->vA == 0)
714 return callState;
715
buzbee2e748f32011-08-29 21:02:19 -0700716 oatLockCallTemps(cUnit);
buzbee561227c2011-09-02 15:28:19 -0700717 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700718
719 /*
720 * Load frame arguments arg4 & arg5 first. Coded a little odd to
721 * pre-schedule the method pointer target.
722 */
723 for (unsigned int i=3; i < dInsn->vA; i++) {
724 int reg;
725 int arg = (isRange) ? dInsn->vC + i : i;
726 rlArg = oatUpdateLoc(cUnit, oatGetSrc(cUnit, mir, arg));
727 if (rlArg.location == kLocPhysReg) {
728 reg = rlArg.lowReg;
729 } else {
730 reg = r1;
731 loadValueDirectFixed(cUnit, rlArg, r1);
buzbee561227c2011-09-02 15:28:19 -0700732 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700733 }
734 storeBaseDisp(cUnit, rSP, (i + 1) * 4, reg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700735 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700736 }
737
738 /* Load register arguments r1..r3 */
buzbee1da522d2011-09-04 11:22:20 -0700739 for (unsigned int i = skipThis ? 1 : 0; i < 3; i++) {
buzbee67bf8852011-08-17 17:51:35 -0700740 if (i < dInsn->vA)
741 registerArgs[i] = (isRange) ? dInsn->vC + i : i;
742 else
743 registerArgs[i] = INVALID_REG;
744 }
745 callState = loadArgRegs(cUnit, mir, dInsn, callState, registerArgs,
buzbee561227c2011-09-02 15:28:19 -0700746 nextCallInsn, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700747
748 // Load direct & need a "this" null check?
749 if (pcrLabel) {
750 *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1,
751 mir->offset, NULL);
752 }
753 return callState;
754}
755
756/*
757 * May have 0+ arguments (also used for jumbo). Note that
758 * source virtual registers may be in physical registers, so may
759 * need to be flushed to home location before copying. This
760 * applies to arg3 and above (see below).
761 *
762 * Two general strategies:
763 * If < 20 arguments
764 * Pass args 3-18 using vldm/vstm block copy
765 * Pass arg0, arg1 & arg2 in r1-r3
766 * If 20+ arguments
767 * Pass args arg19+ using memcpy block copy
768 * Pass arg0, arg1 & arg2 in r1-r3
769 *
770 */
771static int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
772 DecodedInstruction* dInsn, int callState,
buzbee561227c2011-09-02 15:28:19 -0700773 ArmLIR** pcrLabel, NextCallInsn nextCallInsn,
buzbee1da522d2011-09-04 11:22:20 -0700774 ArmLIR* rollback, bool skipThis)
buzbee67bf8852011-08-17 17:51:35 -0700775{
776 int firstArg = dInsn->vC;
777 int numArgs = dInsn->vA;
778
779 // If we can treat it as non-range (Jumbo ops will use range form)
780 if (numArgs <= 5)
781 return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
buzbee1da522d2011-09-04 11:22:20 -0700782 true, nextCallInsn, rollback, skipThis);
buzbee67bf8852011-08-17 17:51:35 -0700783 /*
784 * Make sure range list doesn't span the break between in normal
785 * Dalvik vRegs and the ins.
786 */
buzbee1b4c8592011-08-31 10:43:51 -0700787 int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -0700788 int boundaryReg = cUnit->method->NumRegisters() - cUnit->method->NumIns();
buzbee1b4c8592011-08-31 10:43:51 -0700789 if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
790 LOG(FATAL) << "Argument list spanned locals & args";
buzbee67bf8852011-08-17 17:51:35 -0700791 }
792
793 /*
794 * First load the non-register arguments. Both forms expect all
795 * of the source arguments to be in their home frame location, so
796 * scan the sReg names and flush any that have been promoted to
797 * frame backing storage.
798 */
799 // Scan the rest of the args - if in physReg flush to memory
800 for (int i = 4; i < numArgs; i++) {
buzbee1b4c8592011-08-31 10:43:51 -0700801 RegLocation loc = oatGetSrc(cUnit, mir, i);
buzbee1b4c8592011-08-31 10:43:51 -0700802 if (loc.wide) {
803 loc = oatUpdateLocWide(cUnit, loc);
804 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
805 storeBaseDispWide(cUnit, rSP, loc.spOffset, loc.lowReg,
806 loc.highReg);
buzbee561227c2011-09-02 15:28:19 -0700807 callState = nextCallInsn(cUnit, mir, dInsn, callState,
808 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700809 }
810 } else {
811 loc = oatUpdateLoc(cUnit, loc);
812 if (loc.location == kLocPhysReg) { // TUNING: if dirty?
813 storeBaseDisp(cUnit, rSP, loc.spOffset, loc.lowReg, kWord);
buzbee561227c2011-09-02 15:28:19 -0700814 callState = nextCallInsn(cUnit, mir, dInsn, callState,
815 rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700816 }
buzbee67bf8852011-08-17 17:51:35 -0700817 }
818 }
819
820 int startOffset = cUnit->regLocation[mir->ssaRep->uses[3]].spOffset;
821 int outsOffset = 4 /* Method* */ + (3 * 4);
822 if (numArgs >= 20) {
823 // Generate memcpy, but first make sure all of
824 opRegRegImm(cUnit, kOpAdd, r0, rSP, startOffset);
825 opRegRegImm(cUnit, kOpAdd, r1, rSP, outsOffset);
826 loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
827 loadConstant(cUnit, r2, (numArgs - 3) * 4);
828 newLIR1(cUnit, kThumbBlxR, rLR);
829 } else {
830 // Use vldm/vstm pair using r3 as a temp
buzbeec143c552011-08-20 17:38:58 -0700831 int regsLeft = std::min(numArgs - 3, 16);
buzbee561227c2011-09-02 15:28:19 -0700832 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700833 opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
buzbee1b4c8592011-08-31 10:43:51 -0700834 newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700835 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700836 opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
buzbee561227c2011-09-02 15:28:19 -0700837 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee1b4c8592011-08-31 10:43:51 -0700838 newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
buzbee561227c2011-09-02 15:28:19 -0700839 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700840 }
841
842 // Handle the 1st 3 in r1, r2 & r3
buzbee1da522d2011-09-04 11:22:20 -0700843 for (unsigned int i = skipThis? 1 : 0; i < dInsn->vA && i < 3; i++) {
buzbee67bf8852011-08-17 17:51:35 -0700844 RegLocation loc = oatGetSrc(cUnit, mir, firstArg + i);
845 loadValueDirectFixed(cUnit, loc, r1 + i);
buzbee561227c2011-09-02 15:28:19 -0700846 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700847 }
848
849 // Finally, deal with the register arguments
850 // We'll be using fixed registers here
buzbee2e748f32011-08-29 21:02:19 -0700851 oatLockCallTemps(cUnit);
buzbee561227c2011-09-02 15:28:19 -0700852 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700853 return callState;
854}
855
buzbee561227c2011-09-02 15:28:19 -0700856static void genInvokeStaticDirect(CompilationUnit* cUnit, MIR* mir,
857 bool direct, bool range)
buzbee67bf8852011-08-17 17:51:35 -0700858{
859 DecodedInstruction* dInsn = &mir->dalvikInsn;
860 int callState = 0;
861 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700862 ArmLIR** pNullCk = direct ? &nullCk : NULL;
buzbee7b1b86d2011-08-26 18:59:10 -0700863
buzbee561227c2011-09-02 15:28:19 -0700864 NextCallInsn nextCallInsn = nextSDCallInsn;
865
866 if (range) {
867 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700868 nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700869 } else {
870 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pNullCk,
buzbee1da522d2011-09-04 11:22:20 -0700871 false, nextCallInsn, NULL, false);
buzbee561227c2011-09-02 15:28:19 -0700872 }
buzbee67bf8852011-08-17 17:51:35 -0700873 // Finish up any of the call sequence not interleaved in arg loading
874 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700875 callState = nextCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700876 }
877 newLIR1(cUnit, kThumbBlxR, rLR);
878}
879
buzbee4a3164f2011-09-03 11:25:10 -0700880/*
881 * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
882 * which will locate the target and continue on via a tail call.
883 */
buzbee67bf8852011-08-17 17:51:35 -0700884static void genInvokeInterface(CompilationUnit* cUnit, MIR* mir)
885{
886 DecodedInstruction* dInsn = &mir->dalvikInsn;
887 int callState = 0;
888 ArmLIR* nullCk;
889 /* Note: must call nextInterfaceCallInsn() prior to 1st argument load */
buzbee561227c2011-09-02 15:28:19 -0700890 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700891 if (mir->dalvikInsn.opcode == OP_INVOKE_INTERFACE)
892 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700893 false, nextInterfaceCallInsn, NULL,
894 true);
buzbee67bf8852011-08-17 17:51:35 -0700895 else
896 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700897 nextInterfaceCallInsn, NULL, true);
buzbee67bf8852011-08-17 17:51:35 -0700898 // Finish up any of the call sequence not interleaved in arg loading
899 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700900 callState = nextInterfaceCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700901 }
902 newLIR1(cUnit, kThumbBlxR, rLR);
903}
904
905static void genInvokeSuper(CompilationUnit* cUnit, MIR* mir)
906{
907 DecodedInstruction* dInsn = &mir->dalvikInsn;
908 int callState = 0;
909 ArmLIR* nullCk;
buzbee4a3164f2011-09-03 11:25:10 -0700910 ArmLIR* rollback;
911 Method* baseMethod = cUnit->method->GetDexCacheResolvedMethods()->
912 Get(dInsn->vB);
913 NextCallInsn nextCallInsn;
914 bool fastPath = true;
915 if (baseMethod == NULL) {
916 fastPath = false;
917 } else {
918 Class* superClass = cUnit->method->GetDeclaringClass()->GetSuperClass();
919 if (superClass == NULL) {
920 fastPath = false;
921 } else {
922 int32_t target_idx = baseMethod->GetMethodIndex();
923 if (superClass->GetVTable()->GetLength() <= target_idx) {
924 fastPath = false;
925 } else {
926 fastPath = (superClass->GetVTable()->Get(target_idx) != NULL);
927 }
928 }
929 }
930 if (fastPath) {
931 nextCallInsn = nextSuperCallInsn;
932 rollback = NULL;
933 } else {
934 nextCallInsn = nextSuperCallInsnSP;
935 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
936 rollback->defMask = -1;
937 }
buzbee67bf8852011-08-17 17:51:35 -0700938 if (mir->dalvikInsn.opcode == OP_INVOKE_SUPER)
939 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700940 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700941 else
942 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700943 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700944 // Finish up any of the call sequence not interleaved in arg loading
945 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700946 callState = nextSuperCallInsn(cUnit, mir, dInsn, callState, NULL);
buzbee67bf8852011-08-17 17:51:35 -0700947 }
948 newLIR1(cUnit, kThumbBlxR, rLR);
949}
950
951static void genInvokeVirtual(CompilationUnit* cUnit, MIR* mir)
952{
953 DecodedInstruction* dInsn = &mir->dalvikInsn;
954 int callState = 0;
955 ArmLIR* nullCk;
buzbee561227c2011-09-02 15:28:19 -0700956 ArmLIR* rollback;
957 Method* method = cUnit->method->GetDexCacheResolvedMethods()->
958 Get(dInsn->vB);
959 NextCallInsn nextCallInsn;
buzbee7b1b86d2011-08-26 18:59:10 -0700960
buzbee561227c2011-09-02 15:28:19 -0700961 if (method == NULL) {
962 // Slow path
963 nextCallInsn = nextVCallInsnSP;
964 // If we need a slow-path callout, we'll restart here
965 rollback = newLIR0(cUnit, kArmPseudoTargetLabel);
966 rollback->defMask = -1;
967 } else {
968 // Fast path
969 nextCallInsn = nextVCallInsn;
970 rollback = NULL;
971 }
buzbee67bf8852011-08-17 17:51:35 -0700972 if (mir->dalvikInsn.opcode == OP_INVOKE_VIRTUAL)
973 callState = genDalvikArgsNoRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700974 false, nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700975 else
976 callState = genDalvikArgsRange(cUnit, mir, dInsn, callState, &nullCk,
buzbee1da522d2011-09-04 11:22:20 -0700977 nextCallInsn, rollback, true);
buzbee67bf8852011-08-17 17:51:35 -0700978 // Finish up any of the call sequence not interleaved in arg loading
979 while (callState >= 0) {
buzbee561227c2011-09-02 15:28:19 -0700980 callState = nextCallInsn(cUnit, mir, dInsn, callState, rollback);
buzbee67bf8852011-08-17 17:51:35 -0700981 }
982 newLIR1(cUnit, kThumbBlxR, rLR);
983}
984
buzbee67bf8852011-08-17 17:51:35 -0700985static bool compileDalvikInstruction(CompilationUnit* cUnit, MIR* mir,
986 BasicBlock* bb, ArmLIR* labelList)
987{
988 bool res = false; // Assume success
989 RegLocation rlSrc[3];
990 RegLocation rlDest = badLoc;
991 RegLocation rlResult = badLoc;
992 Opcode opcode = mir->dalvikInsn.opcode;
993
994 /* Prep Src and Dest locations */
995 int nextSreg = 0;
996 int nextLoc = 0;
997 int attrs = oatDataFlowAttributes[opcode];
998 rlSrc[0] = rlSrc[1] = rlSrc[2] = badLoc;
999 if (attrs & DF_UA) {
1000 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1001 nextSreg++;
1002 } else if (attrs & DF_UA_WIDE) {
1003 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1004 nextSreg + 1);
1005 nextSreg+= 2;
1006 }
1007 if (attrs & DF_UB) {
1008 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1009 nextSreg++;
1010 } else if (attrs & DF_UB_WIDE) {
1011 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1012 nextSreg + 1);
1013 nextSreg+= 2;
1014 }
1015 if (attrs & DF_UC) {
1016 rlSrc[nextLoc++] = oatGetSrc(cUnit, mir, nextSreg);
1017 } else if (attrs & DF_UC_WIDE) {
1018 rlSrc[nextLoc++] = oatGetSrcWide(cUnit, mir, nextSreg,
1019 nextSreg + 1);
1020 }
1021 if (attrs & DF_DA) {
1022 rlDest = oatGetDest(cUnit, mir, 0);
1023 } else if (attrs & DF_DA_WIDE) {
1024 rlDest = oatGetDestWide(cUnit, mir, 0, 1);
1025 }
1026
1027 switch(opcode) {
1028 case OP_NOP:
1029 break;
1030
1031 case OP_MOVE_EXCEPTION:
1032 int exOffset;
1033 int resetReg;
buzbeec143c552011-08-20 17:38:58 -07001034 exOffset = Thread::ExceptionOffset().Int32Value();
buzbee67bf8852011-08-17 17:51:35 -07001035 resetReg = oatAllocTemp(cUnit);
1036 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1037 loadWordDisp(cUnit, rSELF, exOffset, rlResult.lowReg);
1038 loadConstant(cUnit, resetReg, 0);
1039 storeWordDisp(cUnit, rSELF, exOffset, resetReg);
1040 storeValue(cUnit, rlDest, rlResult);
1041 break;
1042
1043 case OP_RETURN_VOID:
1044 break;
1045
1046 case OP_RETURN:
1047 case OP_RETURN_OBJECT:
1048 storeValue(cUnit, retLoc, rlSrc[0]);
1049 break;
1050
1051 case OP_RETURN_WIDE:
1052 rlDest = retLocWide;
1053 rlDest.fp = rlSrc[0].fp;
1054 storeValueWide(cUnit, rlDest, rlSrc[0]);
1055 break;
1056
1057 case OP_MOVE_RESULT_WIDE:
1058 if (mir->OptimizationFlags & MIR_INLINED)
1059 break; // Nop - combined w/ previous invoke
1060 /*
1061 * Somewhat hacky here. Because we're now passing
1062 * return values in registers, we have to let the
1063 * register allocation utilities know that the return
1064 * registers are live and may not be used for address
1065 * formation in storeValueWide.
1066 */
1067 assert(retLocWide.lowReg == r0);
buzbee1da522d2011-09-04 11:22:20 -07001068 assert(retLocWide.highReg == r1);
buzbee67bf8852011-08-17 17:51:35 -07001069 oatLockTemp(cUnit, retLocWide.lowReg);
1070 oatLockTemp(cUnit, retLocWide.highReg);
1071 storeValueWide(cUnit, rlDest, retLocWide);
1072 oatFreeTemp(cUnit, retLocWide.lowReg);
1073 oatFreeTemp(cUnit, retLocWide.highReg);
1074 break;
1075
1076 case OP_MOVE_RESULT:
1077 case OP_MOVE_RESULT_OBJECT:
1078 if (mir->OptimizationFlags & MIR_INLINED)
1079 break; // Nop - combined w/ previous invoke
1080 /* See comment for OP_MOVE_RESULT_WIDE */
1081 assert(retLoc.lowReg == r0);
1082 oatLockTemp(cUnit, retLoc.lowReg);
1083 storeValue(cUnit, rlDest, retLoc);
1084 oatFreeTemp(cUnit, retLoc.lowReg);
1085 break;
1086
1087 case OP_MOVE:
1088 case OP_MOVE_OBJECT:
1089 case OP_MOVE_16:
1090 case OP_MOVE_OBJECT_16:
1091 case OP_MOVE_FROM16:
1092 case OP_MOVE_OBJECT_FROM16:
1093 storeValue(cUnit, rlDest, rlSrc[0]);
1094 break;
1095
1096 case OP_MOVE_WIDE:
1097 case OP_MOVE_WIDE_16:
1098 case OP_MOVE_WIDE_FROM16:
1099 storeValueWide(cUnit, rlDest, rlSrc[0]);
1100 break;
1101
1102 case OP_CONST:
1103 case OP_CONST_4:
1104 case OP_CONST_16:
1105 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1106 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1107 storeValue(cUnit, rlDest, rlResult);
1108 break;
1109
1110 case OP_CONST_HIGH16:
1111 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1112 loadConstantNoClobber(cUnit, rlResult.lowReg,
1113 mir->dalvikInsn.vB << 16);
1114 storeValue(cUnit, rlDest, rlResult);
1115 break;
1116
1117 case OP_CONST_WIDE_16:
1118 case OP_CONST_WIDE_32:
1119 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1120 loadConstantNoClobber(cUnit, rlResult.lowReg, mir->dalvikInsn.vB);
1121 //TUNING: do high separately to avoid load dependency
1122 opRegRegImm(cUnit, kOpAsr, rlResult.highReg, rlResult.lowReg, 31);
1123 storeValueWide(cUnit, rlDest, rlResult);
1124 break;
1125
1126 case OP_CONST_WIDE:
1127 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1128 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
buzbee54330722011-08-23 16:46:55 -07001129 mir->dalvikInsn.vB_wide & 0xffffffff,
1130 (mir->dalvikInsn.vB_wide >> 32) & 0xffffffff);
buzbee3ea4ec52011-08-22 17:37:19 -07001131 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001132 break;
1133
1134 case OP_CONST_WIDE_HIGH16:
1135 rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
1136 loadConstantValueWide(cUnit, rlResult.lowReg, rlResult.highReg,
1137 0, mir->dalvikInsn.vB << 16);
buzbee7b1b86d2011-08-26 18:59:10 -07001138 storeValueWide(cUnit, rlDest, rlResult);
buzbee67bf8852011-08-17 17:51:35 -07001139 break;
1140
1141 case OP_MONITOR_ENTER:
1142 genMonitorEnter(cUnit, mir, rlSrc[0]);
1143 break;
1144
1145 case OP_MONITOR_EXIT:
1146 genMonitorExit(cUnit, mir, rlSrc[0]);
1147 break;
1148
1149 case OP_CHECK_CAST:
1150 genCheckCast(cUnit, mir, rlSrc[0]);
1151 break;
1152
1153 case OP_INSTANCE_OF:
1154 genInstanceof(cUnit, mir, rlDest, rlSrc[0]);
1155 break;
1156
1157 case OP_NEW_INSTANCE:
1158 genNewInstance(cUnit, mir, rlDest);
1159 break;
1160
1161 case OP_THROW:
1162 genThrow(cUnit, mir, rlSrc[0]);
1163 break;
1164
1165 case OP_ARRAY_LENGTH:
1166 int lenOffset;
buzbeec143c552011-08-20 17:38:58 -07001167 lenOffset = Array::LengthOffset().Int32Value();
buzbee7b1b86d2011-08-26 18:59:10 -07001168 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
buzbee67bf8852011-08-17 17:51:35 -07001169 genNullCheck(cUnit, rlSrc[0].sRegLow, rlSrc[0].lowReg,
1170 mir->offset, NULL);
1171 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1172 loadWordDisp(cUnit, rlSrc[0].lowReg, lenOffset,
1173 rlResult.lowReg);
1174 storeValue(cUnit, rlDest, rlResult);
1175 break;
1176
1177 case OP_CONST_STRING:
1178 case OP_CONST_STRING_JUMBO:
1179 genConstString(cUnit, mir, rlDest, rlSrc[0]);
1180 break;
1181
1182 case OP_CONST_CLASS:
1183 genConstClass(cUnit, mir, rlDest, rlSrc[0]);
1184 break;
1185
1186 case OP_FILL_ARRAY_DATA:
1187 genFillArrayData(cUnit, mir, rlSrc[0]);
1188 break;
1189
1190 case OP_FILLED_NEW_ARRAY:
1191 genFilledNewArray(cUnit, mir, false /* not range */);
1192 break;
1193
1194 case OP_FILLED_NEW_ARRAY_RANGE:
1195 genFilledNewArray(cUnit, mir, true /* range */);
1196 break;
1197
1198 case OP_NEW_ARRAY:
1199 genNewArray(cUnit, mir, rlDest, rlSrc[0]);
1200 break;
1201
1202 case OP_GOTO:
1203 case OP_GOTO_16:
1204 case OP_GOTO_32:
1205 // TUNING: add MIR flag to disable when unnecessary
1206 bool backwardBranch;
1207 backwardBranch = (bb->taken->startOffset <= mir->offset);
1208 if (backwardBranch) {
1209 genSuspendPoll(cUnit, mir);
1210 }
1211 genUnconditionalBranch(cUnit, &labelList[bb->taken->id]);
1212 break;
1213
1214 case OP_PACKED_SWITCH:
1215 genPackedSwitch(cUnit, mir, rlSrc[0]);
1216 break;
1217
1218 case OP_SPARSE_SWITCH:
1219 genSparseSwitch(cUnit, mir, rlSrc[0]);
1220 break;
1221
1222 case OP_CMPL_FLOAT:
1223 case OP_CMPG_FLOAT:
1224 case OP_CMPL_DOUBLE:
1225 case OP_CMPG_DOUBLE:
1226 res = genCmpFP(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1227 break;
1228
1229 case OP_CMP_LONG:
1230 genCmpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1231 break;
1232
1233 case OP_IF_EQ:
1234 case OP_IF_NE:
1235 case OP_IF_LT:
1236 case OP_IF_GE:
1237 case OP_IF_GT:
1238 case OP_IF_LE: {
1239 bool backwardBranch;
1240 ArmConditionCode cond;
1241 backwardBranch = (bb->taken->startOffset <= mir->offset);
1242 if (backwardBranch) {
1243 genSuspendPoll(cUnit, mir);
1244 }
1245 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1246 rlSrc[1] = loadValue(cUnit, rlSrc[1], kCoreReg);
1247 opRegReg(cUnit, kOpCmp, rlSrc[0].lowReg, rlSrc[1].lowReg);
1248 switch(opcode) {
1249 case OP_IF_EQ:
1250 cond = kArmCondEq;
1251 break;
1252 case OP_IF_NE:
1253 cond = kArmCondNe;
1254 break;
1255 case OP_IF_LT:
1256 cond = kArmCondLt;
1257 break;
1258 case OP_IF_GE:
1259 cond = kArmCondGe;
1260 break;
1261 case OP_IF_GT:
1262 cond = kArmCondGt;
1263 break;
1264 case OP_IF_LE:
1265 cond = kArmCondLe;
1266 break;
1267 default:
1268 cond = (ArmConditionCode)0;
1269 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1270 }
1271 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1272 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1273 break;
1274 }
1275
1276 case OP_IF_EQZ:
1277 case OP_IF_NEZ:
1278 case OP_IF_LTZ:
1279 case OP_IF_GEZ:
1280 case OP_IF_GTZ:
1281 case OP_IF_LEZ: {
1282 bool backwardBranch;
1283 ArmConditionCode cond;
1284 backwardBranch = (bb->taken->startOffset <= mir->offset);
1285 if (backwardBranch) {
1286 genSuspendPoll(cUnit, mir);
1287 }
1288 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1289 opRegImm(cUnit, kOpCmp, rlSrc[0].lowReg, 0);
1290 switch(opcode) {
1291 case OP_IF_EQZ:
1292 cond = kArmCondEq;
1293 break;
1294 case OP_IF_NEZ:
1295 cond = kArmCondNe;
1296 break;
1297 case OP_IF_LTZ:
1298 cond = kArmCondLt;
1299 break;
1300 case OP_IF_GEZ:
1301 cond = kArmCondGe;
1302 break;
1303 case OP_IF_GTZ:
1304 cond = kArmCondGt;
1305 break;
1306 case OP_IF_LEZ:
1307 cond = kArmCondLe;
1308 break;
1309 default:
1310 cond = (ArmConditionCode)0;
1311 LOG(FATAL) << "Unexpected opcode " << (int)opcode;
1312 }
1313 genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
1314 genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
1315 break;
1316 }
1317
1318 case OP_AGET_WIDE:
1319 genArrayGet(cUnit, mir, kLong, rlSrc[0], rlSrc[1], rlDest, 3);
1320 break;
1321 case OP_AGET:
1322 case OP_AGET_OBJECT:
1323 genArrayGet(cUnit, mir, kWord, rlSrc[0], rlSrc[1], rlDest, 2);
1324 break;
1325 case OP_AGET_BOOLEAN:
1326 genArrayGet(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1],
1327 rlDest, 0);
1328 break;
1329 case OP_AGET_BYTE:
1330 genArrayGet(cUnit, mir, kSignedByte, rlSrc[0], rlSrc[1], rlDest, 0);
1331 break;
1332 case OP_AGET_CHAR:
1333 genArrayGet(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1],
1334 rlDest, 1);
1335 break;
1336 case OP_AGET_SHORT:
1337 genArrayGet(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], rlDest, 1);
1338 break;
1339 case OP_APUT_WIDE:
1340 genArrayPut(cUnit, mir, kLong, rlSrc[1], rlSrc[2], rlSrc[0], 3);
1341 break;
1342 case OP_APUT:
1343 genArrayPut(cUnit, mir, kWord, rlSrc[1], rlSrc[2], rlSrc[0], 2);
1344 break;
1345 case OP_APUT_OBJECT:
buzbee1b4c8592011-08-31 10:43:51 -07001346 genArrayObjPut(cUnit, mir, rlSrc[1], rlSrc[2], rlSrc[0], 2);
buzbee67bf8852011-08-17 17:51:35 -07001347 break;
1348 case OP_APUT_SHORT:
1349 case OP_APUT_CHAR:
1350 genArrayPut(cUnit, mir, kUnsignedHalf, rlSrc[1], rlSrc[2],
1351 rlSrc[0], 1);
1352 break;
1353 case OP_APUT_BYTE:
1354 case OP_APUT_BOOLEAN:
1355 genArrayPut(cUnit, mir, kUnsignedByte, rlSrc[1], rlSrc[2],
1356 rlSrc[0], 0);
1357 break;
1358
1359 case OP_IGET_WIDE:
1360 case OP_IGET_WIDE_VOLATILE:
1361 genIGetWideX(cUnit, mir, rlDest, rlSrc[0]);
1362 break;
1363
1364 case OP_IGET:
1365 case OP_IGET_VOLATILE:
1366 case OP_IGET_OBJECT:
1367 case OP_IGET_OBJECT_VOLATILE:
1368 genIGetX(cUnit, mir, kWord, rlDest, rlSrc[0]);
1369 break;
1370
1371 case OP_IGET_BOOLEAN:
1372 case OP_IGET_BYTE:
1373 genIGetX(cUnit, mir, kUnsignedByte, rlDest, rlSrc[0]);
1374 break;
1375
1376 case OP_IGET_CHAR:
1377 genIGetX(cUnit, mir, kUnsignedHalf, rlDest, rlSrc[0]);
1378 break;
1379
1380 case OP_IGET_SHORT:
1381 genIGetX(cUnit, mir, kSignedHalf, rlDest, rlSrc[0]);
1382 break;
1383
1384 case OP_IPUT_WIDE:
1385 case OP_IPUT_WIDE_VOLATILE:
1386 genIPutWideX(cUnit, mir, rlSrc[0], rlSrc[1]);
1387 break;
1388
1389 case OP_IPUT_OBJECT:
1390 case OP_IPUT_OBJECT_VOLATILE:
1391 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], true);
1392 break;
1393
1394 case OP_IPUT:
1395 case OP_IPUT_VOLATILE:
1396 genIPutX(cUnit, mir, kWord, rlSrc[0], rlSrc[1], false);
1397 break;
1398
1399 case OP_IPUT_BOOLEAN:
1400 case OP_IPUT_BYTE:
1401 genIPutX(cUnit, mir, kUnsignedByte, rlSrc[0], rlSrc[1], false);
1402 break;
1403
1404 case OP_IPUT_CHAR:
1405 genIPutX(cUnit, mir, kUnsignedHalf, rlSrc[0], rlSrc[1], false);
1406 break;
1407
1408 case OP_IPUT_SHORT:
1409 genIPutX(cUnit, mir, kSignedHalf, rlSrc[0], rlSrc[1], false);
1410 break;
1411
1412 case OP_SGET:
1413 case OP_SGET_OBJECT:
1414 case OP_SGET_BOOLEAN:
1415 case OP_SGET_BYTE:
1416 case OP_SGET_CHAR:
1417 case OP_SGET_SHORT:
1418 genSget(cUnit, mir, rlResult, rlDest);
1419 break;
1420
1421 case OP_SGET_WIDE:
1422 genSgetWide(cUnit, mir, rlResult, rlDest);
1423 break;
1424
1425 case OP_SPUT:
1426 case OP_SPUT_OBJECT:
1427 case OP_SPUT_BOOLEAN:
1428 case OP_SPUT_BYTE:
1429 case OP_SPUT_CHAR:
1430 case OP_SPUT_SHORT:
1431 genSput(cUnit, mir, rlSrc[0]);
1432 break;
1433
1434 case OP_SPUT_WIDE:
1435 genSputWide(cUnit, mir, rlSrc[0]);
1436 break;
1437
1438 case OP_INVOKE_STATIC_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001439 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1440 true /*range*/);
1441 break;
buzbee67bf8852011-08-17 17:51:35 -07001442 case OP_INVOKE_STATIC:
buzbee561227c2011-09-02 15:28:19 -07001443 genInvokeStaticDirect(cUnit, mir, false /*direct*/,
1444 false /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001445 break;
1446
1447 case OP_INVOKE_DIRECT:
buzbee561227c2011-09-02 15:28:19 -07001448 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1449 false /*range*/);
1450 break;
buzbee67bf8852011-08-17 17:51:35 -07001451 case OP_INVOKE_DIRECT_RANGE:
buzbee561227c2011-09-02 15:28:19 -07001452 genInvokeStaticDirect(cUnit, mir, true /*direct*/,
1453 true /*range*/);
buzbee67bf8852011-08-17 17:51:35 -07001454 break;
1455
1456 case OP_INVOKE_VIRTUAL:
1457 case OP_INVOKE_VIRTUAL_RANGE:
1458 genInvokeVirtual(cUnit, mir);
1459 break;
1460
1461 case OP_INVOKE_SUPER:
1462 case OP_INVOKE_SUPER_RANGE:
1463 genInvokeSuper(cUnit, mir);
1464 break;
1465
1466 case OP_INVOKE_INTERFACE:
1467 case OP_INVOKE_INTERFACE_RANGE:
1468 genInvokeInterface(cUnit, mir);
1469 break;
1470
1471 case OP_NEG_INT:
1472 case OP_NOT_INT:
1473 res = genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1474 break;
1475
1476 case OP_NEG_LONG:
1477 case OP_NOT_LONG:
1478 res = genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1479 break;
1480
1481 case OP_NEG_FLOAT:
1482 res = genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1483 break;
1484
1485 case OP_NEG_DOUBLE:
1486 res = genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[0]);
1487 break;
1488
1489 case OP_INT_TO_LONG:
1490 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1491 if (rlSrc[0].location == kLocPhysReg) {
1492 genRegCopy(cUnit, rlResult.lowReg, rlSrc[0].lowReg);
1493 } else {
1494 loadValueDirect(cUnit, rlSrc[0], rlResult.lowReg);
1495 }
1496 opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
1497 rlResult.lowReg, 31);
1498 storeValueWide(cUnit, rlDest, rlResult);
1499 break;
1500
1501 case OP_LONG_TO_INT:
1502 rlSrc[0] = oatUpdateLocWide(cUnit, rlSrc[0]);
1503 rlSrc[0] = oatWideToNarrow(cUnit, rlSrc[0]);
1504 storeValue(cUnit, rlDest, rlSrc[0]);
1505 break;
1506
1507 case OP_INT_TO_BYTE:
1508 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1509 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1510 opRegReg(cUnit, kOp2Byte, rlResult.lowReg, rlSrc[0].lowReg);
1511 storeValue(cUnit, rlDest, rlResult);
1512 break;
1513
1514 case OP_INT_TO_SHORT:
1515 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1516 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1517 opRegReg(cUnit, kOp2Short, rlResult.lowReg, rlSrc[0].lowReg);
1518 storeValue(cUnit, rlDest, rlResult);
1519 break;
1520
1521 case OP_INT_TO_CHAR:
1522 rlSrc[0] = loadValue(cUnit, rlSrc[0], kCoreReg);
1523 rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
1524 opRegReg(cUnit, kOp2Char, rlResult.lowReg, rlSrc[0].lowReg);
1525 storeValue(cUnit, rlDest, rlResult);
1526 break;
1527
1528 case OP_INT_TO_FLOAT:
1529 case OP_INT_TO_DOUBLE:
1530 case OP_LONG_TO_FLOAT:
1531 case OP_LONG_TO_DOUBLE:
1532 case OP_FLOAT_TO_INT:
1533 case OP_FLOAT_TO_LONG:
1534 case OP_FLOAT_TO_DOUBLE:
1535 case OP_DOUBLE_TO_INT:
1536 case OP_DOUBLE_TO_LONG:
1537 case OP_DOUBLE_TO_FLOAT:
1538 genConversion(cUnit, mir);
1539 break;
1540
1541 case OP_ADD_INT:
1542 case OP_SUB_INT:
1543 case OP_MUL_INT:
1544 case OP_DIV_INT:
1545 case OP_REM_INT:
1546 case OP_AND_INT:
1547 case OP_OR_INT:
1548 case OP_XOR_INT:
1549 case OP_SHL_INT:
1550 case OP_SHR_INT:
1551 case OP_USHR_INT:
1552 case OP_ADD_INT_2ADDR:
1553 case OP_SUB_INT_2ADDR:
1554 case OP_MUL_INT_2ADDR:
1555 case OP_DIV_INT_2ADDR:
1556 case OP_REM_INT_2ADDR:
1557 case OP_AND_INT_2ADDR:
1558 case OP_OR_INT_2ADDR:
1559 case OP_XOR_INT_2ADDR:
1560 case OP_SHL_INT_2ADDR:
1561 case OP_SHR_INT_2ADDR:
1562 case OP_USHR_INT_2ADDR:
1563 genArithOpInt(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1564 break;
1565
1566 case OP_ADD_LONG:
1567 case OP_SUB_LONG:
1568 case OP_MUL_LONG:
1569 case OP_DIV_LONG:
1570 case OP_REM_LONG:
1571 case OP_AND_LONG:
1572 case OP_OR_LONG:
1573 case OP_XOR_LONG:
1574 case OP_ADD_LONG_2ADDR:
1575 case OP_SUB_LONG_2ADDR:
1576 case OP_MUL_LONG_2ADDR:
1577 case OP_DIV_LONG_2ADDR:
1578 case OP_REM_LONG_2ADDR:
1579 case OP_AND_LONG_2ADDR:
1580 case OP_OR_LONG_2ADDR:
1581 case OP_XOR_LONG_2ADDR:
1582 genArithOpLong(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1583 break;
1584
buzbee67bf8852011-08-17 17:51:35 -07001585 case OP_SHL_LONG:
1586 case OP_SHR_LONG:
1587 case OP_USHR_LONG:
buzbeee6d61962011-08-27 11:58:19 -07001588 case OP_SHL_LONG_2ADDR:
1589 case OP_SHR_LONG_2ADDR:
1590 case OP_USHR_LONG_2ADDR:
buzbee67bf8852011-08-17 17:51:35 -07001591 genShiftOpLong(cUnit,mir, rlDest, rlSrc[0], rlSrc[1]);
1592 break;
1593
1594 case OP_ADD_FLOAT:
1595 case OP_SUB_FLOAT:
1596 case OP_MUL_FLOAT:
1597 case OP_DIV_FLOAT:
1598 case OP_REM_FLOAT:
1599 case OP_ADD_FLOAT_2ADDR:
1600 case OP_SUB_FLOAT_2ADDR:
1601 case OP_MUL_FLOAT_2ADDR:
1602 case OP_DIV_FLOAT_2ADDR:
1603 case OP_REM_FLOAT_2ADDR:
1604 genArithOpFloat(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1605 break;
1606
1607 case OP_ADD_DOUBLE:
1608 case OP_SUB_DOUBLE:
1609 case OP_MUL_DOUBLE:
1610 case OP_DIV_DOUBLE:
1611 case OP_REM_DOUBLE:
1612 case OP_ADD_DOUBLE_2ADDR:
1613 case OP_SUB_DOUBLE_2ADDR:
1614 case OP_MUL_DOUBLE_2ADDR:
1615 case OP_DIV_DOUBLE_2ADDR:
1616 case OP_REM_DOUBLE_2ADDR:
1617 genArithOpDouble(cUnit, mir, rlDest, rlSrc[0], rlSrc[1]);
1618 break;
1619
1620 case OP_RSUB_INT:
1621 case OP_ADD_INT_LIT16:
1622 case OP_MUL_INT_LIT16:
1623 case OP_DIV_INT_LIT16:
1624 case OP_REM_INT_LIT16:
1625 case OP_AND_INT_LIT16:
1626 case OP_OR_INT_LIT16:
1627 case OP_XOR_INT_LIT16:
1628 case OP_ADD_INT_LIT8:
1629 case OP_RSUB_INT_LIT8:
1630 case OP_MUL_INT_LIT8:
1631 case OP_DIV_INT_LIT8:
1632 case OP_REM_INT_LIT8:
1633 case OP_AND_INT_LIT8:
1634 case OP_OR_INT_LIT8:
1635 case OP_XOR_INT_LIT8:
1636 case OP_SHL_INT_LIT8:
1637 case OP_SHR_INT_LIT8:
1638 case OP_USHR_INT_LIT8:
1639 genArithOpIntLit(cUnit, mir, rlDest, rlSrc[0], mir->dalvikInsn.vC);
1640 break;
1641
1642 default:
1643 res = true;
1644 }
1645 return res;
1646}
1647
1648static const char *extendedMIROpNames[kMirOpLast - kMirOpFirst] = {
1649 "kMirOpPhi",
1650 "kMirOpNullNRangeUpCheck",
1651 "kMirOpNullNRangeDownCheck",
1652 "kMirOpLowerBound",
1653 "kMirOpPunt",
1654 "kMirOpCheckInlinePrediction",
1655};
1656
1657/* Extended MIR instructions like PHI */
1658static void handleExtendedMethodMIR(CompilationUnit* cUnit, MIR* mir)
1659{
1660 int opOffset = mir->dalvikInsn.opcode - kMirOpFirst;
1661 char* msg = (char*)oatNew(strlen(extendedMIROpNames[opOffset]) + 1, false);
1662 strcpy(msg, extendedMIROpNames[opOffset]);
1663 ArmLIR* op = newLIR1(cUnit, kArmPseudoExtended, (int) msg);
1664
1665 switch ((ExtendedMIROpcode)mir->dalvikInsn.opcode) {
1666 case kMirOpPhi: {
1667 char* ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1668 op->flags.isNop = true;
1669 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1670 break;
1671 }
1672 default:
1673 break;
1674 }
1675}
1676
1677/* If there are any ins passed in registers that have not been promoted
1678 * to a callee-save register, flush them to the frame.
buzbeedfd3d702011-08-28 12:56:51 -07001679 * Note: at this pointCopy any ins that are passed in register to their
1680 * home location */
buzbee67bf8852011-08-17 17:51:35 -07001681static void flushIns(CompilationUnit* cUnit)
1682{
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001683 if (cUnit->method->NumIns() == 0)
buzbee67bf8852011-08-17 17:51:35 -07001684 return;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001685 int inRegs = (cUnit->method->NumIns() > 2) ? 3
1686 : cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001687 int startReg = r1;
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001688 int startLoc = cUnit->method->NumRegisters() -
1689 cUnit->method->NumIns();
buzbee67bf8852011-08-17 17:51:35 -07001690 for (int i = 0; i < inRegs; i++) {
1691 RegLocation loc = cUnit->regLocation[startLoc + i];
buzbeedfd3d702011-08-28 12:56:51 -07001692 //TUNING: be smarter about flushing ins to frame
1693 storeBaseDisp(cUnit, rSP, loc.spOffset, startReg + i, kWord);
buzbee67bf8852011-08-17 17:51:35 -07001694 if (loc.location == kLocPhysReg) {
1695 genRegCopy(cUnit, loc.lowReg, startReg + i);
buzbee67bf8852011-08-17 17:51:35 -07001696 }
1697 }
1698
1699 // Handle special case of wide argument half in regs, half in frame
1700 if (inRegs == 3) {
1701 RegLocation loc = cUnit->regLocation[startLoc + 2];
1702 if (loc.wide && loc.location == kLocPhysReg) {
1703 // Load the other half of the arg into the promoted pair
buzbee561227c2011-09-02 15:28:19 -07001704 loadWordDisp(cUnit, rSP, loc.spOffset + 4, loc.highReg);
buzbee67bf8852011-08-17 17:51:35 -07001705 inRegs++;
1706 }
1707 }
1708
1709 // Now, do initial assignment of all promoted arguments passed in frame
Ian Rogers0cfe1fb2011-08-26 03:29:44 -07001710 for (int i = inRegs; i < cUnit->method->NumIns();) {
buzbee67bf8852011-08-17 17:51:35 -07001711 RegLocation loc = cUnit->regLocation[startLoc + i];
1712 if (loc.fpLocation == kLocPhysReg) {
1713 loc.location = kLocPhysReg;
1714 loc.fp = true;
1715 loc.lowReg = loc.fpLowReg;
1716 loc.highReg = loc.fpHighReg;
1717 }
1718 if (loc.location == kLocPhysReg) {
1719 if (loc.wide) {
1720 loadBaseDispWide(cUnit, NULL, rSP, loc.spOffset,
1721 loc.lowReg, loc.highReg, INVALID_SREG);
1722 i++;
1723 } else {
buzbee561227c2011-09-02 15:28:19 -07001724 loadWordDisp(cUnit, rSP, loc.spOffset, loc.lowReg);
buzbee67bf8852011-08-17 17:51:35 -07001725 }
1726 }
1727 i++;
1728 }
1729}
1730
1731/* Handle the content in each basic block */
1732static bool methodBlockCodeGen(CompilationUnit* cUnit, BasicBlock* bb)
1733{
1734 MIR* mir;
1735 ArmLIR* labelList = (ArmLIR*) cUnit->blockLabelList;
1736 int blockId = bb->id;
1737
1738 cUnit->curBlock = bb;
1739 labelList[blockId].operands[0] = bb->startOffset;
1740
1741 /* Insert the block label */
1742 labelList[blockId].opcode = kArmPseudoNormalBlockLabel;
1743 oatAppendLIR(cUnit, (LIR*) &labelList[blockId]);
1744
1745 oatClobberAllRegs(cUnit);
1746 oatResetNullCheck(cUnit);
1747
1748 ArmLIR* headLIR = NULL;
1749
1750 if (bb->blockType == kEntryBlock) {
1751 /*
1752 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
1753 * mechanism know so it doesn't try to use any of them when
1754 * expanding the frame or flushing. This leaves the utility
1755 * code with a single temp: r12. This should be enough.
1756 */
1757 oatLockTemp(cUnit, r0);
1758 oatLockTemp(cUnit, r1);
1759 oatLockTemp(cUnit, r2);
1760 oatLockTemp(cUnit, r3);
1761 newLIR0(cUnit, kArmPseudoMethodEntry);
1762 /* Spill core callee saves */
1763 newLIR1(cUnit, kThumb2Push, cUnit->coreSpillMask);
1764 /* Need to spill any FP regs? */
1765 if (cUnit->numFPSpills) {
1766 newLIR1(cUnit, kThumb2VPushCS, cUnit->numFPSpills);
1767 }
1768 opRegImm(cUnit, kOpSub, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1769 storeBaseDisp(cUnit, rSP, 0, r0, kWord);
1770 flushIns(cUnit);
1771 oatFreeTemp(cUnit, r0);
1772 oatFreeTemp(cUnit, r1);
1773 oatFreeTemp(cUnit, r2);
1774 oatFreeTemp(cUnit, r3);
1775 } else if (bb->blockType == kExitBlock) {
1776 newLIR0(cUnit, kArmPseudoMethodExit);
1777 opRegImm(cUnit, kOpAdd, rSP, cUnit->frameSize - (cUnit->numSpills * 4));
1778 /* Need to restore any FP callee saves? */
1779 if (cUnit->numFPSpills) {
1780 newLIR1(cUnit, kThumb2VPopCS, cUnit->numFPSpills);
1781 }
1782 if (cUnit->coreSpillMask & (1 << rLR)) {
1783 /* Unspill rLR to rPC */
1784 cUnit->coreSpillMask &= ~(1 << rLR);
1785 cUnit->coreSpillMask |= (1 << rPC);
1786 }
1787 newLIR1(cUnit, kThumb2Pop, cUnit->coreSpillMask);
1788 if (!(cUnit->coreSpillMask & (1 << rPC))) {
1789 /* We didn't pop to rPC, so must do a bv rLR */
1790 newLIR1(cUnit, kThumbBx, rLR);
1791 }
1792 }
1793
1794 for (mir = bb->firstMIRInsn; mir; mir = mir->next) {
1795
1796 oatResetRegPool(cUnit);
1797 if (cUnit->disableOpt & (1 << kTrackLiveTemps)) {
1798 oatClobberAllRegs(cUnit);
1799 }
1800
1801 if (cUnit->disableOpt & (1 << kSuppressLoads)) {
1802 oatResetDefTracking(cUnit);
1803 }
1804
1805 if ((int)mir->dalvikInsn.opcode >= (int)kMirOpFirst) {
1806 handleExtendedMethodMIR(cUnit, mir);
1807 continue;
1808 }
1809
1810 cUnit->currentDalvikOffset = mir->offset;
1811
1812 Opcode dalvikOpcode = mir->dalvikInsn.opcode;
1813 InstructionFormat dalvikFormat =
1814 dexGetFormatFromOpcode(dalvikOpcode);
1815
1816 ArmLIR* boundaryLIR;
1817
1818 /* Mark the beginning of a Dalvik instruction for line tracking */
1819 boundaryLIR = newLIR1(cUnit, kArmPseudoDalvikByteCodeBoundary,
1820 (int) oatGetDalvikDisassembly(
1821 &mir->dalvikInsn, ""));
1822 /* Remember the first LIR for this block */
1823 if (headLIR == NULL) {
1824 headLIR = boundaryLIR;
1825 /* Set the first boundaryLIR as a scheduling barrier */
1826 headLIR->defMask = ENCODE_ALL;
1827 }
1828
1829 /* Don't generate the SSA annotation unless verbose mode is on */
1830 if (cUnit->printMe && mir->ssaRep) {
1831 char *ssaString = oatGetSSAString(cUnit, mir->ssaRep);
1832 newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString);
1833 }
1834
1835 bool notHandled = compileDalvikInstruction(cUnit, mir, bb, labelList);
1836
1837 if (notHandled) {
1838 char buf[100];
1839 snprintf(buf, 100, "%#06x: Opcode %#x (%s) / Fmt %d not handled",
1840 mir->offset,
1841 dalvikOpcode, dexGetOpcodeName(dalvikOpcode),
1842 dalvikFormat);
1843 LOG(FATAL) << buf;
1844 }
1845 }
1846
1847 if (headLIR) {
1848 /*
1849 * Eliminate redundant loads/stores and delay stores into later
1850 * slots
1851 */
1852 oatApplyLocalOptimizations(cUnit, (LIR*) headLIR,
1853 cUnit->lastLIRInsn);
1854
1855 /*
1856 * Generate an unconditional branch to the fallthrough block.
1857 */
1858 if (bb->fallThrough) {
1859 genUnconditionalBranch(cUnit,
1860 &labelList[bb->fallThrough->id]);
1861 }
1862 }
1863 return false;
1864}
1865
1866/*
1867 * Nop any unconditional branches that go to the next instruction.
1868 * Note: new redundant branches may be inserted later, and we'll
1869 * use a check in final instruction assembly to nop those out.
1870 */
1871void removeRedundantBranches(CompilationUnit* cUnit)
1872{
1873 ArmLIR* thisLIR;
1874
1875 for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
1876 thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
1877 thisLIR = NEXT_LIR(thisLIR)) {
1878
1879 /* Branch to the next instruction */
1880 if ((thisLIR->opcode == kThumbBUncond) ||
1881 (thisLIR->opcode == kThumb2BUncond)) {
1882 ArmLIR* nextLIR = thisLIR;
1883
1884 while (true) {
1885 nextLIR = NEXT_LIR(nextLIR);
1886
1887 /*
1888 * Is the branch target the next instruction?
1889 */
1890 if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
1891 thisLIR->flags.isNop = true;
1892 break;
1893 }
1894
1895 /*
1896 * Found real useful stuff between the branch and the target.
1897 * Need to explicitly check the lastLIRInsn here because it
1898 * might be the last real instruction.
1899 */
1900 if (!isPseudoOpcode(nextLIR->opcode) ||
1901 (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
1902 break;
1903 }
1904 }
1905 }
1906}
1907
1908void oatMethodMIR2LIR(CompilationUnit* cUnit)
1909{
1910 /* Used to hold the labels of each block */
1911 cUnit->blockLabelList =
1912 (void *) oatNew(sizeof(ArmLIR) * cUnit->numBlocks, true);
1913
1914 oatDataFlowAnalysisDispatcher(cUnit, methodBlockCodeGen,
1915 kPreOrderDFSTraversal, false /* Iterative */);
1916 removeRedundantBranches(cUnit);
1917}
1918
1919/* Common initialization routine for an architecture family */
1920bool oatArchInit()
1921{
1922 int i;
1923
1924 for (i = 0; i < kArmLast; i++) {
1925 if (EncodingMap[i].opcode != i) {
1926 LOG(FATAL) << "Encoding order for " << EncodingMap[i].name <<
1927 " is wrong: expecting " << i << ", seeing " <<
1928 (int)EncodingMap[i].opcode;
1929 }
1930 }
1931
1932 return oatArchVariantInit();
1933}
1934
1935/* Needed by the Assembler */
1936void oatSetupResourceMasks(ArmLIR* lir)
1937{
1938 setupResourceMasks(lir);
1939}
1940
1941/* Needed by the ld/st optmizatons */
1942ArmLIR* oatRegCopyNoInsert(CompilationUnit* cUnit, int rDest, int rSrc)
1943{
1944 return genRegCopyNoInsert(cUnit, rDest, rSrc);
1945}
1946
1947/* Needed by the register allocator */
1948ArmLIR* oatRegCopy(CompilationUnit* cUnit, int rDest, int rSrc)
1949{
1950 return genRegCopy(cUnit, rDest, rSrc);
1951}
1952
1953/* Needed by the register allocator */
1954void oatRegCopyWide(CompilationUnit* cUnit, int destLo, int destHi,
1955 int srcLo, int srcHi)
1956{
1957 genRegCopyWide(cUnit, destLo, destHi, srcLo, srcHi);
1958}
1959
1960void oatFlushRegImpl(CompilationUnit* cUnit, int rBase,
1961 int displacement, int rSrc, OpSize size)
1962{
1963 storeBaseDisp(cUnit, rBase, displacement, rSrc, size);
1964}
1965
1966void oatFlushRegWideImpl(CompilationUnit* cUnit, int rBase,
1967 int displacement, int rSrcLo, int rSrcHi)
1968{
1969 storeBaseDispWide(cUnit, rBase, displacement, rSrcLo, rSrcHi);
1970}