blob: 14e395cdacd4b20be3556ba3b773f74f6996c028 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
18#include "dex_file-inl.h"
19#include "invoke_type.h"
20#include "mirror/array.h"
21#include "mirror/string.h"
22#include "mir_to_lir-inl.h"
23#include "oat/runtime/oat_support_entrypoints.h"
24#include "x86/codegen_x86.h"
25
26namespace art {
27
28/*
29 * This source files contains "gen" codegen routines that should
30 * be applicable to most targets. Only mid-level support utilities
31 * and "op" calls may be used here.
32 */
33
34/*
35 * To save scheduling time, helper calls are broken into two parts: generation of
36 * the helper target address, and the actuall call to the helper. Because x86
37 * has a memory call operation, part 1 is a NOP for x86. For other targets,
38 * load arguments between the two parts.
39 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070040int Mir2Lir::CallHelperSetup(int helper_offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070041 return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
42}
43
44/* NOTE: if r_tgt is a temp, it will be freed following use */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070045LIR* Mir2Lir::CallHelper(int r_tgt, int helper_offset, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070046 LIR* call_inst;
47 if (cu_->instruction_set == kX86) {
48 call_inst = OpThreadMem(kOpBlx, helper_offset);
49 } else {
50 call_inst = OpReg(kOpBlx, r_tgt);
51 FreeTemp(r_tgt);
52 }
53 if (safepoint_pc) {
54 MarkSafepointPC(call_inst);
55 }
56 return call_inst;
57}
58
59void Mir2Lir::CallRuntimeHelperImm(int helper_offset, int arg0, bool safepoint_pc) {
60 int r_tgt = CallHelperSetup(helper_offset);
61 LoadConstant(TargetReg(kArg0), arg0);
62 ClobberCalleeSave();
63 CallHelper(r_tgt, helper_offset, safepoint_pc);
64}
65
66void Mir2Lir::CallRuntimeHelperReg(int helper_offset, int arg0, bool safepoint_pc) {
67 int r_tgt = CallHelperSetup(helper_offset);
68 OpRegCopy(TargetReg(kArg0), arg0);
69 ClobberCalleeSave();
70 CallHelper(r_tgt, helper_offset, safepoint_pc);
71}
72
73void Mir2Lir::CallRuntimeHelperRegLocation(int helper_offset, RegLocation arg0, bool safepoint_pc) {
74 int r_tgt = CallHelperSetup(helper_offset);
75 if (arg0.wide == 0) {
76 LoadValueDirectFixed(arg0, TargetReg(kArg0));
77 } else {
78 LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
79 }
80 ClobberCalleeSave();
81 CallHelper(r_tgt, helper_offset, safepoint_pc);
82}
83
84void Mir2Lir::CallRuntimeHelperImmImm(int helper_offset, int arg0, int arg1,
85 bool safepoint_pc) {
86 int r_tgt = CallHelperSetup(helper_offset);
87 LoadConstant(TargetReg(kArg0), arg0);
88 LoadConstant(TargetReg(kArg1), arg1);
89 ClobberCalleeSave();
90 CallHelper(r_tgt, helper_offset, safepoint_pc);
91}
92
93void Mir2Lir::CallRuntimeHelperImmRegLocation(int helper_offset, int arg0,
94 RegLocation arg1, bool safepoint_pc) {
95 int r_tgt = CallHelperSetup(helper_offset);
96 if (arg1.wide == 0) {
97 LoadValueDirectFixed(arg1, TargetReg(kArg1));
98 } else {
99 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
100 }
101 LoadConstant(TargetReg(kArg0), arg0);
102 ClobberCalleeSave();
103 CallHelper(r_tgt, helper_offset, safepoint_pc);
104}
105
106void Mir2Lir::CallRuntimeHelperRegLocationImm(int helper_offset, RegLocation arg0, int arg1,
107 bool safepoint_pc) {
108 int r_tgt = CallHelperSetup(helper_offset);
109 LoadValueDirectFixed(arg0, TargetReg(kArg0));
110 LoadConstant(TargetReg(kArg1), arg1);
111 ClobberCalleeSave();
112 CallHelper(r_tgt, helper_offset, safepoint_pc);
113}
114
115void Mir2Lir::CallRuntimeHelperImmReg(int helper_offset, int arg0, int arg1,
116 bool safepoint_pc) {
117 int r_tgt = CallHelperSetup(helper_offset);
118 OpRegCopy(TargetReg(kArg1), arg1);
119 LoadConstant(TargetReg(kArg0), arg0);
120 ClobberCalleeSave();
121 CallHelper(r_tgt, helper_offset, safepoint_pc);
122}
123
124void Mir2Lir::CallRuntimeHelperRegImm(int helper_offset, int arg0, int arg1,
125 bool safepoint_pc) {
126 int r_tgt = CallHelperSetup(helper_offset);
127 OpRegCopy(TargetReg(kArg0), arg0);
128 LoadConstant(TargetReg(kArg1), arg1);
129 ClobberCalleeSave();
130 CallHelper(r_tgt, helper_offset, safepoint_pc);
131}
132
133void Mir2Lir::CallRuntimeHelperImmMethod(int helper_offset, int arg0, bool safepoint_pc) {
134 int r_tgt = CallHelperSetup(helper_offset);
135 LoadCurrMethodDirect(TargetReg(kArg1));
136 LoadConstant(TargetReg(kArg0), arg0);
137 ClobberCalleeSave();
138 CallHelper(r_tgt, helper_offset, safepoint_pc);
139}
140
141void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(int helper_offset, RegLocation arg0,
142 RegLocation arg1, bool safepoint_pc) {
143 int r_tgt = CallHelperSetup(helper_offset);
144 if (arg0.wide == 0) {
145 LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
146 if (arg1.wide == 0) {
147 if (cu_->instruction_set == kMips) {
148 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
149 } else {
150 LoadValueDirectFixed(arg1, TargetReg(kArg1));
151 }
152 } else {
153 if (cu_->instruction_set == kMips) {
154 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
155 } else {
156 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
157 }
158 }
159 } else {
160 LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
161 if (arg1.wide == 0) {
162 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
163 } else {
164 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
165 }
166 }
167 ClobberCalleeSave();
168 CallHelper(r_tgt, helper_offset, safepoint_pc);
169}
170
171void Mir2Lir::CallRuntimeHelperRegReg(int helper_offset, int arg0, int arg1, bool safepoint_pc) {
172 int r_tgt = CallHelperSetup(helper_offset);
173 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
174 OpRegCopy(TargetReg(kArg0), arg0);
175 OpRegCopy(TargetReg(kArg1), arg1);
176 ClobberCalleeSave();
177 CallHelper(r_tgt, helper_offset, safepoint_pc);
178}
179
180void Mir2Lir::CallRuntimeHelperRegRegImm(int helper_offset, int arg0, int arg1,
181 int arg2, bool safepoint_pc) {
182 int r_tgt = CallHelperSetup(helper_offset);
183 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
184 OpRegCopy(TargetReg(kArg0), arg0);
185 OpRegCopy(TargetReg(kArg1), arg1);
186 LoadConstant(TargetReg(kArg2), arg2);
187 ClobberCalleeSave();
188 CallHelper(r_tgt, helper_offset, safepoint_pc);
189}
190
191void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(int helper_offset,
192 int arg0, RegLocation arg2, bool safepoint_pc) {
193 int r_tgt = CallHelperSetup(helper_offset);
194 LoadValueDirectFixed(arg2, TargetReg(kArg2));
195 LoadCurrMethodDirect(TargetReg(kArg1));
196 LoadConstant(TargetReg(kArg0), arg0);
197 ClobberCalleeSave();
198 CallHelper(r_tgt, helper_offset, safepoint_pc);
199}
200
201void Mir2Lir::CallRuntimeHelperImmMethodImm(int helper_offset, int arg0,
202 int arg2, bool safepoint_pc) {
203 int r_tgt = CallHelperSetup(helper_offset);
204 LoadCurrMethodDirect(TargetReg(kArg1));
205 LoadConstant(TargetReg(kArg2), arg2);
206 LoadConstant(TargetReg(kArg0), arg0);
207 ClobberCalleeSave();
208 CallHelper(r_tgt, helper_offset, safepoint_pc);
209}
210
211void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(int helper_offset,
212 int arg0, RegLocation arg1,
213 RegLocation arg2, bool safepoint_pc) {
214 int r_tgt = CallHelperSetup(helper_offset);
215 LoadValueDirectFixed(arg1, TargetReg(kArg1));
216 if (arg2.wide == 0) {
217 LoadValueDirectFixed(arg2, TargetReg(kArg2));
218 } else {
219 LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
220 }
221 LoadConstant(TargetReg(kArg0), arg0);
222 ClobberCalleeSave();
223 CallHelper(r_tgt, helper_offset, safepoint_pc);
224}
225
226/*
227 * If there are any ins passed in registers that have not been promoted
228 * to a callee-save register, flush them to the frame. Perform intial
229 * assignment of promoted arguments.
230 *
231 * ArgLocs is an array of location records describing the incoming arguments
232 * with one location record per word of argument.
233 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700234void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700235 /*
236 * Dummy up a RegLocation for the incoming Method*
237 * It will attempt to keep kArg0 live (or copy it to home location
238 * if promoted).
239 */
240 RegLocation rl_src = rl_method;
241 rl_src.location = kLocPhysReg;
242 rl_src.low_reg = TargetReg(kArg0);
243 rl_src.home = false;
244 MarkLive(rl_src.low_reg, rl_src.s_reg_low);
245 StoreValue(rl_method, rl_src);
246 // If Method* has been promoted, explicitly flush
247 if (rl_method.location == kLocPhysReg) {
248 StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
249 }
250
251 if (cu_->num_ins == 0)
252 return;
253 const int num_arg_regs = 3;
254 static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
255 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
256 /*
257 * Copy incoming arguments to their proper home locations.
258 * NOTE: an older version of dx had an issue in which
259 * it would reuse static method argument registers.
260 * This could result in the same Dalvik virtual register
261 * being promoted to both core and fp regs. To account for this,
262 * we only copy to the corresponding promoted physical register
263 * if it matches the type of the SSA name for the incoming
264 * argument. It is also possible that long and double arguments
265 * end up half-promoted. In those cases, we must flush the promoted
266 * half to memory as well.
267 */
268 for (int i = 0; i < cu_->num_ins; i++) {
269 PromotionMap* v_map = &promotion_map_[start_vreg + i];
270 if (i < num_arg_regs) {
271 // If arriving in register
272 bool need_flush = true;
273 RegLocation* t_loc = &ArgLocs[i];
274 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
275 OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
276 need_flush = false;
277 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
278 OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
279 need_flush = false;
280 } else {
281 need_flush = true;
282 }
283
284 // For wide args, force flush if only half is promoted
285 if (t_loc->wide) {
286 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
287 need_flush |= (p_map->core_location != v_map->core_location) ||
288 (p_map->fp_location != v_map->fp_location);
289 }
290 if (need_flush) {
291 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
292 TargetReg(arg_regs[i]), kWord);
293 }
294 } else {
295 // If arriving in frame & promoted
296 if (v_map->core_location == kLocPhysReg) {
297 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
298 v_map->core_reg);
299 }
300 if (v_map->fp_location == kLocPhysReg) {
301 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
302 v_map->FpReg);
303 }
304 }
305 }
306}
307
308/*
309 * Bit of a hack here - in the absence of a real scheduling pass,
310 * emit the next instruction in static & direct invoke sequences.
311 */
312static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
313 int state, const MethodReference& target_method,
314 uint32_t unused,
315 uintptr_t direct_code, uintptr_t direct_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700316 InvokeType type) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700317 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
318 if (cu->instruction_set != kThumb2) {
319 // Disable sharpening
320 direct_code = 0;
321 direct_method = 0;
322 }
323 if (direct_code != 0 && direct_method != 0) {
324 switch (state) {
325 case 0: // Get the current Method* [sets kArg0]
326 if (direct_code != static_cast<unsigned int>(-1)) {
327 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
328 } else {
329 CHECK_EQ(cu->dex_file, target_method.dex_file);
330 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
331 target_method.dex_method_index, 0);
332 if (data_target == NULL) {
333 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
334 data_target->operands[1] = type;
335 }
336 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
337 cg->AppendLIR(load_pc_rel);
338 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
339 }
340 if (direct_method != static_cast<unsigned int>(-1)) {
341 cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
342 } else {
343 CHECK_EQ(cu->dex_file, target_method.dex_file);
344 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
345 target_method.dex_method_index, 0);
346 if (data_target == NULL) {
347 data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
348 data_target->operands[1] = type;
349 }
350 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
351 cg->AppendLIR(load_pc_rel);
352 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
353 }
354 break;
355 default:
356 return -1;
357 }
358 } else {
359 switch (state) {
360 case 0: // Get the current Method* [sets kArg0]
361 // TUNING: we can save a reg copy if Method* has been promoted.
362 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
363 break;
364 case 1: // Get method->dex_cache_resolved_methods_
365 cg->LoadWordDisp(cg->TargetReg(kArg0),
366 mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
367 // Set up direct code if known.
368 if (direct_code != 0) {
369 if (direct_code != static_cast<unsigned int>(-1)) {
370 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
371 } else {
372 CHECK_EQ(cu->dex_file, target_method.dex_file);
373 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
374 target_method.dex_method_index, 0);
375 if (data_target == NULL) {
376 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
377 data_target->operands[1] = type;
378 }
379 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
380 cg->AppendLIR(load_pc_rel);
381 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
382 }
383 }
384 break;
385 case 2: // Grab target method*
386 CHECK_EQ(cu->dex_file, target_method.dex_file);
387 cg->LoadWordDisp(cg->TargetReg(kArg0),
388 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
389 (target_method.dex_method_index * 4),
390 cg-> TargetReg(kArg0));
391 break;
392 case 3: // Grab the code from the method*
393 if (cu->instruction_set != kX86) {
394 if (direct_code == 0) {
395 cg->LoadWordDisp(cg->TargetReg(kArg0),
396 mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
397 cg->TargetReg(kInvokeTgt));
398 }
399 break;
400 }
401 // Intentional fallthrough for x86
402 default:
403 return -1;
404 }
405 }
406 return state + 1;
407}
408
409/*
410 * Bit of a hack here - in the absence of a real scheduling pass,
411 * emit the next instruction in a virtual invoke sequence.
412 * We can use kLr as a temp prior to target address loading
413 * Note also that we'll load the first argument ("this") into
414 * kArg1 here rather than the standard LoadArgRegs.
415 */
416static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
417 int state, const MethodReference& target_method,
418 uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700419 InvokeType unused3) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700420 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
421 /*
422 * This is the fast path in which the target virtual method is
423 * fully resolved at compile time.
424 */
425 switch (state) {
426 case 0: { // Get "this" [set kArg1]
427 RegLocation rl_arg = info->args[0];
428 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
429 break;
430 }
431 case 1: // Is "this" null? [use kArg1]
432 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
433 // get this->klass_ [use kArg1, set kInvokeTgt]
434 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
435 cg->TargetReg(kInvokeTgt));
436 break;
437 case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
438 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
439 cg->TargetReg(kInvokeTgt));
440 break;
441 case 3: // Get target method [use kInvokeTgt, set kArg0]
442 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
443 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
444 cg->TargetReg(kArg0));
445 break;
446 case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
447 if (cu->instruction_set != kX86) {
448 cg->LoadWordDisp(cg->TargetReg(kArg0),
449 mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
450 cg->TargetReg(kInvokeTgt));
451 break;
452 }
453 // Intentional fallthrough for X86
454 default:
455 return -1;
456 }
457 return state + 1;
458}
459
460/*
461 * All invoke-interface calls bounce off of art_quick_invoke_interface_trampoline,
462 * which will locate the target and continue on via a tail call.
463 */
464static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
465 const MethodReference& target_method,
466 uint32_t unused, uintptr_t unused2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700467 uintptr_t direct_method, InvokeType unused4) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700468 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
469 if (cu->instruction_set != kThumb2) {
470 // Disable sharpening
471 direct_method = 0;
472 }
473 int trampoline = (cu->instruction_set == kX86) ? 0
474 : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
475
476 if (direct_method != 0) {
477 switch (state) {
478 case 0: // Load the trampoline target [sets kInvokeTgt].
479 if (cu->instruction_set != kX86) {
480 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
481 }
482 // Get the interface Method* [sets kArg0]
483 if (direct_method != static_cast<unsigned int>(-1)) {
484 cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
485 } else {
486 CHECK_EQ(cu->dex_file, target_method.dex_file);
487 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
488 target_method.dex_method_index, 0);
489 if (data_target == NULL) {
490 data_target = cg->AddWordData(&cg->method_literal_list_,
491 target_method.dex_method_index);
492 data_target->operands[1] = kInterface;
493 }
494 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
495 cg->AppendLIR(load_pc_rel);
496 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
497 }
498 break;
499 default:
500 return -1;
501 }
502 } else {
503 switch (state) {
504 case 0:
505 // Get the current Method* [sets kArg0] - TUNING: remove copy of method if it is promoted.
506 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
507 // Load the trampoline target [sets kInvokeTgt].
508 if (cu->instruction_set != kX86) {
509 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
510 }
511 break;
512 case 1: // Get method->dex_cache_resolved_methods_ [set/use kArg0]
513 cg->LoadWordDisp(cg->TargetReg(kArg0),
514 mirror::AbstractMethod::DexCacheResolvedMethodsOffset().Int32Value(),
515 cg->TargetReg(kArg0));
516 break;
517 case 2: // Grab target method* [set/use kArg0]
518 CHECK_EQ(cu->dex_file, target_method.dex_file);
519 cg->LoadWordDisp(cg->TargetReg(kArg0),
520 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
521 (target_method.dex_method_index * 4),
522 cg->TargetReg(kArg0));
523 break;
524 default:
525 return -1;
526 }
527 }
528 return state + 1;
529}
530
531static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, int trampoline,
532 int state, const MethodReference& target_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700533 uint32_t method_idx) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700534 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
535 /*
536 * This handles the case in which the base method is not fully
537 * resolved at compile time, we bail to a runtime helper.
538 */
539 if (state == 0) {
540 if (cu->instruction_set != kX86) {
541 // Load trampoline target
542 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline, cg->TargetReg(kInvokeTgt));
543 }
544 // Load kArg0 with method index
545 CHECK_EQ(cu->dex_file, target_method.dex_file);
546 cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
547 return 1;
548 }
549 return -1;
550}
551
552static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
553 int state,
554 const MethodReference& target_method,
555 uint32_t method_idx,
556 uintptr_t unused, uintptr_t unused2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700557 InvokeType unused3) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700558 int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
559 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
560}
561
562static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
563 const MethodReference& target_method,
564 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700565 uintptr_t unused2, InvokeType unused3) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700566 int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
567 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
568}
569
570static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
571 const MethodReference& target_method,
572 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700573 uintptr_t unused2, InvokeType unused3) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700574 int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
575 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
576}
577
578static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
579 const MethodReference& target_method,
580 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700581 uintptr_t unused2, InvokeType unused3) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700582 int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
583 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
584}
585
586static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
587 CallInfo* info, int state,
588 const MethodReference& target_method,
589 uint32_t unused,
590 uintptr_t unused2, uintptr_t unused3,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700591 InvokeType unused4) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700592 int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
593 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
594}
595
596int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
597 NextCallInsn next_call_insn,
598 const MethodReference& target_method,
599 uint32_t vtable_idx, uintptr_t direct_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700600 uintptr_t direct_method, InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700601 int last_arg_reg = TargetReg(kArg3);
602 int next_reg = TargetReg(kArg1);
603 int next_arg = 0;
604 if (skip_this) {
605 next_reg++;
606 next_arg++;
607 }
608 for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
609 RegLocation rl_arg = info->args[next_arg++];
610 rl_arg = UpdateRawLoc(rl_arg);
611 if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
612 LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
613 next_reg++;
614 next_arg++;
615 } else {
616 if (rl_arg.wide) {
617 rl_arg.wide = false;
618 rl_arg.is_const = false;
619 }
620 LoadValueDirectFixed(rl_arg, next_reg);
621 }
622 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
623 direct_code, direct_method, type);
624 }
625 return call_state;
626}
627
628/*
629 * Load up to 5 arguments, the first three of which will be in
630 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
631 * and as part of the load sequence, it must be replaced with
632 * the target method pointer. Note, this may also be called
633 * for "range" variants if the number of arguments is 5 or fewer.
634 */
635int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
636 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
637 const MethodReference& target_method,
638 uint32_t vtable_idx, uintptr_t direct_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700639 uintptr_t direct_method, InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700640 RegLocation rl_arg;
641
642 /* If no arguments, just return */
643 if (info->num_arg_words == 0)
644 return call_state;
645
646 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
647 direct_code, direct_method, type);
648
649 DCHECK_LE(info->num_arg_words, 5);
650 if (info->num_arg_words > 3) {
651 int32_t next_use = 3;
652 //Detect special case of wide arg spanning arg3/arg4
653 RegLocation rl_use0 = info->args[0];
654 RegLocation rl_use1 = info->args[1];
655 RegLocation rl_use2 = info->args[2];
656 if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
657 rl_use2.wide) {
658 int reg = -1;
659 // Wide spans, we need the 2nd half of uses[2].
660 rl_arg = UpdateLocWide(rl_use2);
661 if (rl_arg.location == kLocPhysReg) {
662 reg = rl_arg.high_reg;
663 } else {
664 // kArg2 & rArg3 can safely be used here
665 reg = TargetReg(kArg3);
666 LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
667 call_state = next_call_insn(cu_, info, call_state, target_method,
668 vtable_idx, direct_code, direct_method, type);
669 }
670 StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
671 StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
672 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
673 direct_code, direct_method, type);
674 next_use++;
675 }
676 // Loop through the rest
677 while (next_use < info->num_arg_words) {
678 int low_reg;
679 int high_reg = -1;
680 rl_arg = info->args[next_use];
681 rl_arg = UpdateRawLoc(rl_arg);
682 if (rl_arg.location == kLocPhysReg) {
683 low_reg = rl_arg.low_reg;
684 high_reg = rl_arg.high_reg;
685 } else {
686 low_reg = TargetReg(kArg2);
687 if (rl_arg.wide) {
688 high_reg = TargetReg(kArg3);
689 LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
690 } else {
691 LoadValueDirectFixed(rl_arg, low_reg);
692 }
693 call_state = next_call_insn(cu_, info, call_state, target_method,
694 vtable_idx, direct_code, direct_method, type);
695 }
696 int outs_offset = (next_use + 1) * 4;
697 if (rl_arg.wide) {
698 StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
699 next_use += 2;
700 } else {
701 StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
702 next_use++;
703 }
704 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
705 direct_code, direct_method, type);
706 }
707 }
708
709 call_state = LoadArgRegs(info, call_state, next_call_insn,
710 target_method, vtable_idx, direct_code, direct_method,
711 type, skip_this);
712
713 if (pcrLabel) {
714 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
715 }
716 return call_state;
717}
718
719/*
720 * May have 0+ arguments (also used for jumbo). Note that
721 * source virtual registers may be in physical registers, so may
722 * need to be flushed to home location before copying. This
723 * applies to arg3 and above (see below).
724 *
725 * Two general strategies:
726 * If < 20 arguments
727 * Pass args 3-18 using vldm/vstm block copy
728 * Pass arg0, arg1 & arg2 in kArg1-kArg3
729 * If 20+ arguments
730 * Pass args arg19+ using memcpy block copy
731 * Pass arg0, arg1 & arg2 in kArg1-kArg3
732 *
733 */
734int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
735 LIR** pcrLabel, NextCallInsn next_call_insn,
736 const MethodReference& target_method,
737 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700738 InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700739
740 // If we can treat it as non-range (Jumbo ops will use range form)
741 if (info->num_arg_words <= 5)
742 return GenDalvikArgsNoRange(info, call_state, pcrLabel,
743 next_call_insn, target_method, vtable_idx,
744 direct_code, direct_method, type, skip_this);
745 /*
746 * First load the non-register arguments. Both forms expect all
747 * of the source arguments to be in their home frame location, so
748 * scan the s_reg names and flush any that have been promoted to
749 * frame backing storage.
750 */
751 // Scan the rest of the args - if in phys_reg flush to memory
752 for (int next_arg = 0; next_arg < info->num_arg_words;) {
753 RegLocation loc = info->args[next_arg];
754 if (loc.wide) {
755 loc = UpdateLocWide(loc);
756 if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
757 StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
758 loc.low_reg, loc.high_reg);
759 }
760 next_arg += 2;
761 } else {
762 loc = UpdateLoc(loc);
763 if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
764 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
765 loc.low_reg, kWord);
766 }
767 next_arg++;
768 }
769 }
770
771 int start_offset = SRegOffset(info->args[3].s_reg_low);
772 int outs_offset = 4 /* Method* */ + (3 * 4);
773 if (cu_->instruction_set != kThumb2) {
774 // Generate memcpy
775 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
776 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
777 CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
778 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
779 } else {
780 if (info->num_arg_words >= 20) {
781 // Generate memcpy
782 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
783 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
784 CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
785 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
786 } else {
787 // Use vldm/vstm pair using kArg3 as a temp
788 int regs_left = std::min(info->num_arg_words - 3, 16);
789 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
790 direct_code, direct_method, type);
791 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
792 LIR* ld = OpVldm(TargetReg(kArg3), regs_left);
793 //TUNING: loosen barrier
794 ld->def_mask = ENCODE_ALL;
795 SetMemRefType(ld, true /* is_load */, kDalvikReg);
796 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
797 direct_code, direct_method, type);
798 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
799 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
800 direct_code, direct_method, type);
801 LIR* st = OpVstm(TargetReg(kArg3), regs_left);
802 SetMemRefType(st, false /* is_load */, kDalvikReg);
803 st->def_mask = ENCODE_ALL;
804 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
805 direct_code, direct_method, type);
806 }
807 }
808
809 call_state = LoadArgRegs(info, call_state, next_call_insn,
810 target_method, vtable_idx, direct_code, direct_method,
811 type, skip_this);
812
813 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
814 direct_code, direct_method, type);
815 if (pcrLabel) {
816 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
817 }
818 return call_state;
819}
820
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700821RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700822 RegLocation res;
823 if (info->result.location == kLocInvalid) {
824 res = GetReturn(false);
825 } else {
826 res = info->result;
827 }
828 return res;
829}
830
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700831RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700832 RegLocation res;
833 if (info->result.location == kLocInvalid) {
834 res = GetReturnWide(false);
835 } else {
836 res = info->result;
837 }
838 return res;
839}
840
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700841bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700842 if (cu_->instruction_set == kMips) {
843 // TODO - add Mips implementation
844 return false;
845 }
846 // Location of reference to data array
847 int value_offset = mirror::String::ValueOffset().Int32Value();
848 // Location of count
849 int count_offset = mirror::String::CountOffset().Int32Value();
850 // Starting offset within data array
851 int offset_offset = mirror::String::OffsetOffset().Int32Value();
852 // Start of char data with array_
853 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
854
855 RegLocation rl_obj = info->args[0];
856 RegLocation rl_idx = info->args[1];
857 rl_obj = LoadValue(rl_obj, kCoreReg);
858 rl_idx = LoadValue(rl_idx, kCoreReg);
859 int reg_max;
860 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
861 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
862 LIR* launch_pad = NULL;
863 int reg_off = INVALID_REG;
864 int reg_ptr = INVALID_REG;
865 if (cu_->instruction_set != kX86) {
866 reg_off = AllocTemp();
867 reg_ptr = AllocTemp();
868 if (range_check) {
869 reg_max = AllocTemp();
870 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
871 }
872 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
873 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
874 if (range_check) {
875 // Set up a launch pad to allow retry in case of bounds violation */
876 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
877 intrinsic_launchpads_.Insert(launch_pad);
878 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
879 FreeTemp(reg_max);
880 OpCondBranch(kCondCs, launch_pad);
881 }
882 } else {
883 if (range_check) {
884 reg_max = AllocTemp();
885 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
886 // Set up a launch pad to allow retry in case of bounds violation */
887 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
888 intrinsic_launchpads_.Insert(launch_pad);
889 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
890 FreeTemp(reg_max);
891 OpCondBranch(kCondCc, launch_pad);
892 }
893 reg_off = AllocTemp();
894 reg_ptr = AllocTemp();
895 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
896 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
897 }
898 OpRegImm(kOpAdd, reg_ptr, data_offset);
899 OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
900 FreeTemp(rl_obj.low_reg);
901 FreeTemp(rl_idx.low_reg);
902 RegLocation rl_dest = InlineTarget(info);
903 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
904 LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
905 FreeTemp(reg_off);
906 FreeTemp(reg_ptr);
907 StoreValue(rl_dest, rl_result);
908 if (range_check) {
909 launch_pad->operands[2] = 0; // no resumption
910 }
911 // Record that we've already inlined & null checked
912 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
913 return true;
914}
915
916// Generates an inlined String.is_empty or String.length.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700917bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700918 if (cu_->instruction_set == kMips) {
919 // TODO - add Mips implementation
920 return false;
921 }
922 // dst = src.length();
923 RegLocation rl_obj = info->args[0];
924 rl_obj = LoadValue(rl_obj, kCoreReg);
925 RegLocation rl_dest = InlineTarget(info);
926 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
927 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
928 LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
929 if (is_empty) {
930 // dst = (dst == 0);
931 if (cu_->instruction_set == kThumb2) {
932 int t_reg = AllocTemp();
933 OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
934 OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
935 } else {
936 DCHECK_EQ(cu_->instruction_set, kX86);
937 OpRegImm(kOpSub, rl_result.low_reg, 1);
938 OpRegImm(kOpLsr, rl_result.low_reg, 31);
939 }
940 }
941 StoreValue(rl_dest, rl_result);
942 return true;
943}
944
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700945bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700946 if (cu_->instruction_set == kMips) {
947 // TODO - add Mips implementation
948 return false;
949 }
950 RegLocation rl_src = info->args[0];
951 rl_src = LoadValue(rl_src, kCoreReg);
952 RegLocation rl_dest = InlineTarget(info);
953 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
954 int sign_reg = AllocTemp();
955 // abs(x) = y<=x>>31, (x+y)^y.
956 OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
957 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
958 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
959 StoreValue(rl_dest, rl_result);
960 return true;
961}
962
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700963bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700964 if (cu_->instruction_set == kMips) {
965 // TODO - add Mips implementation
966 return false;
967 }
968 if (cu_->instruction_set == kThumb2) {
969 RegLocation rl_src = info->args[0];
970 rl_src = LoadValueWide(rl_src, kCoreReg);
971 RegLocation rl_dest = InlineTargetWide(info);
972 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
973 int sign_reg = AllocTemp();
974 // abs(x) = y<=x>>31, (x+y)^y.
975 OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
976 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
977 OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
978 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
979 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
980 StoreValueWide(rl_dest, rl_result);
981 return true;
982 } else {
983 DCHECK_EQ(cu_->instruction_set, kX86);
984 // Reuse source registers to avoid running out of temps
985 RegLocation rl_src = info->args[0];
986 rl_src = LoadValueWide(rl_src, kCoreReg);
987 RegLocation rl_dest = InlineTargetWide(info);
988 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
989 OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
990 FreeTemp(rl_src.low_reg);
991 FreeTemp(rl_src.high_reg);
992 int sign_reg = AllocTemp();
993 // abs(x) = y<=x>>31, (x+y)^y.
994 OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
995 OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
996 OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
997 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
998 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
999 StoreValueWide(rl_dest, rl_result);
1000 return true;
1001 }
1002}
1003
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001004bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001005 if (cu_->instruction_set == kMips) {
1006 // TODO - add Mips implementation
1007 return false;
1008 }
1009 RegLocation rl_src = info->args[0];
1010 RegLocation rl_dest = InlineTarget(info);
1011 StoreValue(rl_dest, rl_src);
1012 return true;
1013}
1014
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001015bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001016 if (cu_->instruction_set == kMips) {
1017 // TODO - add Mips implementation
1018 return false;
1019 }
1020 RegLocation rl_src = info->args[0];
1021 RegLocation rl_dest = InlineTargetWide(info);
1022 StoreValueWide(rl_dest, rl_src);
1023 return true;
1024}
1025
1026/*
1027 * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
1028 * otherwise bails to standard library code.
1029 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001030bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001031 if (cu_->instruction_set == kMips) {
1032 // TODO - add Mips implementation
1033 return false;
1034 }
1035 ClobberCalleeSave();
1036 LockCallTemps(); // Using fixed registers
1037 int reg_ptr = TargetReg(kArg0);
1038 int reg_char = TargetReg(kArg1);
1039 int reg_start = TargetReg(kArg2);
1040
1041 RegLocation rl_obj = info->args[0];
1042 RegLocation rl_char = info->args[1];
1043 RegLocation rl_start = info->args[2];
1044 LoadValueDirectFixed(rl_obj, reg_ptr);
1045 LoadValueDirectFixed(rl_char, reg_char);
1046 if (zero_based) {
1047 LoadConstant(reg_start, 0);
1048 } else {
1049 LoadValueDirectFixed(rl_start, reg_start);
1050 }
1051 int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
1052 GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
1053 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
1054 intrinsic_launchpads_.Insert(launch_pad);
1055 OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
1056 // NOTE: not a safepoint
1057 if (cu_->instruction_set != kX86) {
1058 OpReg(kOpBlx, r_tgt);
1059 } else {
1060 OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
1061 }
1062 LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
1063 launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
1064 // Record that we've already inlined & null checked
1065 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1066 RegLocation rl_return = GetReturn(false);
1067 RegLocation rl_dest = InlineTarget(info);
1068 StoreValue(rl_dest, rl_return);
1069 return true;
1070}
1071
1072/* Fast string.compareTo(Ljava/lang/string;)I. */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001073bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001074 if (cu_->instruction_set == kMips) {
1075 // TODO - add Mips implementation
1076 return false;
1077 }
1078 ClobberCalleeSave();
1079 LockCallTemps(); // Using fixed registers
1080 int reg_this = TargetReg(kArg0);
1081 int reg_cmp = TargetReg(kArg1);
1082
1083 RegLocation rl_this = info->args[0];
1084 RegLocation rl_cmp = info->args[1];
1085 LoadValueDirectFixed(rl_this, reg_this);
1086 LoadValueDirectFixed(rl_cmp, reg_cmp);
1087 int r_tgt = (cu_->instruction_set != kX86) ?
1088 LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
1089 GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
1090 //TUNING: check if rl_cmp.s_reg_low is already null checked
1091 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
1092 intrinsic_launchpads_.Insert(launch_pad);
1093 OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
1094 // NOTE: not a safepoint
1095 if (cu_->instruction_set != kX86) {
1096 OpReg(kOpBlx, r_tgt);
1097 } else {
1098 OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
1099 }
1100 launch_pad->operands[2] = 0; // No return possible
1101 // Record that we've already inlined & null checked
1102 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1103 RegLocation rl_return = GetReturn(false);
1104 RegLocation rl_dest = InlineTarget(info);
1105 StoreValue(rl_dest, rl_return);
1106 return true;
1107}
1108
1109bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1110 RegLocation rl_dest = InlineTarget(info);
1111 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1112 int offset = Thread::PeerOffset().Int32Value();
1113 if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
1114 LoadWordDisp(TargetReg(kSelf), offset, rl_result.low_reg);
1115 } else {
1116 CHECK(cu_->instruction_set == kX86);
1117 ((X86Mir2Lir*)this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
1118 }
1119 StoreValue(rl_dest, rl_result);
1120 return true;
1121}
1122
1123bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1124 bool is_long, bool is_volatile) {
1125 if (cu_->instruction_set == kMips) {
1126 // TODO - add Mips implementation
1127 return false;
1128 }
1129 // Unused - RegLocation rl_src_unsafe = info->args[0];
1130 RegLocation rl_src_obj = info->args[1]; // Object
1131 RegLocation rl_src_offset = info->args[2]; // long low
1132 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1133 RegLocation rl_dest = InlineTarget(info); // result reg
1134 if (is_volatile) {
1135 GenMemBarrier(kLoadLoad);
1136 }
1137 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1138 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1139 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1140 if (is_long) {
1141 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1142 LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
1143 StoreValueWide(rl_dest, rl_result);
1144 } else {
1145 LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
1146 StoreValue(rl_dest, rl_result);
1147 }
1148 return true;
1149}
1150
1151bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1152 bool is_object, bool is_volatile, bool is_ordered) {
1153 if (cu_->instruction_set == kMips) {
1154 // TODO - add Mips implementation
1155 return false;
1156 }
1157 if (cu_->instruction_set == kX86 && is_object) {
1158 // TODO: fix X86, it exhausts registers for card marking.
1159 return false;
1160 }
1161 // Unused - RegLocation rl_src_unsafe = info->args[0];
1162 RegLocation rl_src_obj = info->args[1]; // Object
1163 RegLocation rl_src_offset = info->args[2]; // long low
1164 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1165 RegLocation rl_src_value = info->args[4]; // value to store
1166 if (is_volatile || is_ordered) {
1167 GenMemBarrier(kStoreStore);
1168 }
1169 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1170 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1171 RegLocation rl_value;
1172 if (is_long) {
1173 rl_value = LoadValueWide(rl_src_value, kCoreReg);
1174 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1175 StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
1176 } else {
1177 rl_value = LoadValue(rl_src_value, kCoreReg);
1178 StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
1179 }
1180 if (is_volatile) {
1181 GenMemBarrier(kStoreLoad);
1182 }
1183 if (is_object) {
1184 MarkGCCard(rl_value.low_reg, rl_object.low_reg);
1185 }
1186 return true;
1187}
1188
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001189bool Mir2Lir::GenIntrinsic(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001190 if (info->opt_flags & MIR_INLINED) {
1191 return false;
1192 }
1193 /*
1194 * TODO: move these to a target-specific structured constant array
1195 * and use a generic match function. The list of intrinsics may be
1196 * slightly different depending on target.
1197 * TODO: Fold this into a matching function that runs during
1198 * basic block building. This should be part of the action for
1199 * small method inlining and recognition of the special object init
1200 * method. By doing this during basic block construction, we can also
1201 * take advantage of/generate new useful dataflow info.
1202 */
1203 StringPiece tgt_methods_declaring_class(
1204 cu_->dex_file->GetMethodDeclaringClassDescriptor(cu_->dex_file->GetMethodId(info->index)));
1205 if (tgt_methods_declaring_class.starts_with("Ljava/lang/Double;")) {
1206 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1207 if (tgt_method == "long java.lang.Double.doubleToRawLongBits(double)") {
1208 return GenInlinedDoubleCvt(info);
1209 }
1210 if (tgt_method == "double java.lang.Double.longBitsToDouble(long)") {
1211 return GenInlinedDoubleCvt(info);
1212 }
1213 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Float;")) {
1214 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1215 if (tgt_method == "int java.lang.Float.float_to_raw_int_bits(float)") {
1216 return GenInlinedFloatCvt(info);
1217 }
1218 if (tgt_method == "float java.lang.Float.intBitsToFloat(int)") {
1219 return GenInlinedFloatCvt(info);
1220 }
1221 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Math;") ||
1222 tgt_methods_declaring_class.starts_with("Ljava/lang/StrictMath;")) {
1223 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1224 if (tgt_method == "int java.lang.Math.abs(int)" ||
1225 tgt_method == "int java.lang.StrictMath.abs(int)") {
1226 return GenInlinedAbsInt(info);
1227 }
1228 if (tgt_method == "long java.lang.Math.abs(long)" ||
1229 tgt_method == "long java.lang.StrictMath.abs(long)") {
1230 return GenInlinedAbsLong(info);
1231 }
1232 if (tgt_method == "int java.lang.Math.max(int, int)" ||
1233 tgt_method == "int java.lang.StrictMath.max(int, int)") {
1234 return GenInlinedMinMaxInt(info, false /* is_min */);
1235 }
1236 if (tgt_method == "int java.lang.Math.min(int, int)" ||
1237 tgt_method == "int java.lang.StrictMath.min(int, int)") {
1238 return GenInlinedMinMaxInt(info, true /* is_min */);
1239 }
1240 if (tgt_method == "double java.lang.Math.sqrt(double)" ||
1241 tgt_method == "double java.lang.StrictMath.sqrt(double)") {
1242 return GenInlinedSqrt(info);
1243 }
1244 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/String;")) {
1245 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1246 if (tgt_method == "char java.lang.String.charAt(int)") {
1247 return GenInlinedCharAt(info);
1248 }
1249 if (tgt_method == "int java.lang.String.compareTo(java.lang.String)") {
1250 return GenInlinedStringCompareTo(info);
1251 }
1252 if (tgt_method == "boolean java.lang.String.is_empty()") {
1253 return GenInlinedStringIsEmptyOrLength(info, true /* is_empty */);
1254 }
1255 if (tgt_method == "int java.lang.String.index_of(int, int)") {
1256 return GenInlinedIndexOf(info, false /* base 0 */);
1257 }
1258 if (tgt_method == "int java.lang.String.index_of(int)") {
1259 return GenInlinedIndexOf(info, true /* base 0 */);
1260 }
1261 if (tgt_method == "int java.lang.String.length()") {
1262 return GenInlinedStringIsEmptyOrLength(info, false /* is_empty */);
1263 }
1264 } else if (tgt_methods_declaring_class.starts_with("Ljava/lang/Thread;")) {
1265 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1266 if (tgt_method == "java.lang.Thread java.lang.Thread.currentThread()") {
1267 return GenInlinedCurrentThread(info);
1268 }
1269 } else if (tgt_methods_declaring_class.starts_with("Lsun/misc/Unsafe;")) {
1270 std::string tgt_method(PrettyMethod(info->index, *cu_->dex_file));
1271 if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapInt(java.lang.Object, long, int, int)") {
1272 return GenInlinedCas32(info, false);
1273 }
1274 if (tgt_method == "boolean sun.misc.Unsafe.compareAndSwapObject(java.lang.Object, long, java.lang.Object, java.lang.Object)") {
1275 return GenInlinedCas32(info, true);
1276 }
1277 if (tgt_method == "int sun.misc.Unsafe.getInt(java.lang.Object, long)") {
1278 return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
1279 }
1280 if (tgt_method == "int sun.misc.Unsafe.getIntVolatile(java.lang.Object, long)") {
1281 return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
1282 }
1283 if (tgt_method == "void sun.misc.Unsafe.putInt(java.lang.Object, long, int)") {
1284 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1285 false /* is_volatile */, false /* is_ordered */);
1286 }
1287 if (tgt_method == "void sun.misc.Unsafe.putIntVolatile(java.lang.Object, long, int)") {
1288 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1289 true /* is_volatile */, false /* is_ordered */);
1290 }
1291 if (tgt_method == "void sun.misc.Unsafe.putOrderedInt(java.lang.Object, long, int)") {
1292 return GenInlinedUnsafePut(info, false /* is_long */, false /* is_object */,
1293 false /* is_volatile */, true /* is_ordered */);
1294 }
1295 if (tgt_method == "long sun.misc.Unsafe.getLong(java.lang.Object, long)") {
1296 return GenInlinedUnsafeGet(info, true /* is_long */, false /* is_volatile */);
1297 }
1298 if (tgt_method == "long sun.misc.Unsafe.getLongVolatile(java.lang.Object, long)") {
1299 return GenInlinedUnsafeGet(info, true /* is_long */, true /* is_volatile */);
1300 }
1301 if (tgt_method == "void sun.misc.Unsafe.putLong(java.lang.Object, long, long)") {
1302 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1303 false /* is_volatile */, false /* is_ordered */);
1304 }
1305 if (tgt_method == "void sun.misc.Unsafe.putLongVolatile(java.lang.Object, long, long)") {
1306 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1307 true /* is_volatile */, false /* is_ordered */);
1308 }
1309 if (tgt_method == "void sun.misc.Unsafe.putOrderedLong(java.lang.Object, long, long)") {
1310 return GenInlinedUnsafePut(info, true /* is_long */, false /* is_object */,
1311 false /* is_volatile */, true /* is_ordered */);
1312 }
1313 if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObject(java.lang.Object, long)") {
1314 return GenInlinedUnsafeGet(info, false /* is_long */, false /* is_volatile */);
1315 }
1316 if (tgt_method == "java.lang.Object sun.misc.Unsafe.getObjectVolatile(java.lang.Object, long)") {
1317 return GenInlinedUnsafeGet(info, false /* is_long */, true /* is_volatile */);
1318 }
1319 if (tgt_method == "void sun.misc.Unsafe.putObject(java.lang.Object, long, java.lang.Object)") {
1320 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1321 false /* is_volatile */, false /* is_ordered */);
1322 }
1323 if (tgt_method == "void sun.misc.Unsafe.putObjectVolatile(java.lang.Object, long, java.lang.Object)") {
1324 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1325 true /* is_volatile */, false /* is_ordered */);
1326 }
1327 if (tgt_method == "void sun.misc.Unsafe.putOrderedObject(java.lang.Object, long, java.lang.Object)") {
1328 return GenInlinedUnsafePut(info, false /* is_long */, true /* is_object */,
1329 false /* is_volatile */, true /* is_ordered */);
1330 }
1331 }
1332 return false;
1333}
1334
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001335void Mir2Lir::GenInvoke(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001336 if (GenIntrinsic(info)) {
1337 return;
1338 }
1339 InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo
1340 int call_state = 0;
1341 LIR* null_ck;
1342 LIR** p_null_ck = NULL;
1343 NextCallInsn next_call_insn;
1344 FlushAllRegs(); /* Everything to home location */
1345 // Explicit register usage
1346 LockCallTemps();
1347
1348 DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
1349 MethodReference target_method(cUnit->GetDexFile(), info->index);
1350 int vtable_idx;
1351 uintptr_t direct_code;
1352 uintptr_t direct_method;
1353 bool skip_this;
1354 bool fast_path =
1355 cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
1356 current_dalvik_offset_,
1357 info->type, target_method,
1358 vtable_idx,
1359 direct_code, direct_method,
1360 true) && !SLOW_INVOKE_PATH;
1361 if (info->type == kInterface) {
1362 if (fast_path) {
1363 p_null_ck = &null_ck;
1364 }
1365 next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
1366 skip_this = false;
1367 } else if (info->type == kDirect) {
1368 if (fast_path) {
1369 p_null_ck = &null_ck;
1370 }
1371 next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1372 skip_this = false;
1373 } else if (info->type == kStatic) {
1374 next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1375 skip_this = false;
1376 } else if (info->type == kSuper) {
1377 DCHECK(!fast_path); // Fast path is a direct call.
1378 next_call_insn = NextSuperCallInsnSP;
1379 skip_this = false;
1380 } else {
1381 DCHECK_EQ(info->type, kVirtual);
1382 next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1383 skip_this = fast_path;
1384 }
1385 if (!info->is_range) {
1386 call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1387 next_call_insn, target_method,
1388 vtable_idx, direct_code, direct_method,
1389 original_type, skip_this);
1390 } else {
1391 call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1392 next_call_insn, target_method, vtable_idx,
1393 direct_code, direct_method, original_type,
1394 skip_this);
1395 }
1396 // Finish up any of the call sequence not interleaved in arg loading
1397 while (call_state >= 0) {
1398 call_state = next_call_insn(cu_, info, call_state, target_method,
1399 vtable_idx, direct_code, direct_method,
1400 original_type);
1401 }
1402 LIR* call_inst;
1403 if (cu_->instruction_set != kX86) {
1404 call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1405 } else {
1406 if (fast_path && info->type != kInterface) {
1407 call_inst = OpMem(kOpBlx, TargetReg(kArg0),
1408 mirror::AbstractMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
1409 } else {
1410 int trampoline = 0;
1411 switch (info->type) {
1412 case kInterface:
1413 trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
1414 : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
1415 break;
1416 case kDirect:
1417 trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
1418 break;
1419 case kStatic:
1420 trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
1421 break;
1422 case kSuper:
1423 trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
1424 break;
1425 case kVirtual:
1426 trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
1427 break;
1428 default:
1429 LOG(FATAL) << "Unexpected invoke type";
1430 }
1431 call_inst = OpThreadMem(kOpBlx, trampoline);
1432 }
1433 }
1434 MarkSafepointPC(call_inst);
1435
1436 ClobberCalleeSave();
1437 if (info->result.location != kLocInvalid) {
1438 // We have a following MOVE_RESULT - do it now.
1439 if (info->result.wide) {
1440 RegLocation ret_loc = GetReturnWide(info->result.fp);
1441 StoreValueWide(info->result, ret_loc);
1442 } else {
1443 RegLocation ret_loc = GetReturn(info->result.fp);
1444 StoreValue(info->result, ret_loc);
1445 }
1446 }
1447}
1448
1449} // namespace art