blob: 6382dd6608f180d1e70cb2e17588e5f930f7d1e9 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
Vladimir Marko5c96e6b2013-11-14 15:34:17 +000018#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070021#include "dex_file-inl.h"
Ian Rogers166db042013-07-26 12:05:57 -070022#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070023#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/string.h"
26#include "mir_to_lir-inl.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070027#include "x86/codegen_x86.h"
28
29namespace art {
30
31/*
32 * This source files contains "gen" codegen routines that should
33 * be applicable to most targets. Only mid-level support utilities
34 * and "op" calls may be used here.
35 */
36
37/*
38 * To save scheduling time, helper calls are broken into two parts: generation of
39 * the helper target address, and the actuall call to the helper. Because x86
40 * has a memory call operation, part 1 is a NOP for x86. For other targets,
41 * load arguments between the two parts.
42 */
Ian Rogers848871b2013-08-05 10:56:33 -070043int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070044 return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
45}
46
47/* NOTE: if r_tgt is a temp, it will be freed following use */
Ian Rogers848871b2013-08-05 10:56:33 -070048LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070049 LIR* call_inst;
50 if (cu_->instruction_set == kX86) {
51 call_inst = OpThreadMem(kOpBlx, helper_offset);
52 } else {
53 call_inst = OpReg(kOpBlx, r_tgt);
54 FreeTemp(r_tgt);
55 }
56 if (safepoint_pc) {
57 MarkSafepointPC(call_inst);
58 }
59 return call_inst;
60}
61
Ian Rogers848871b2013-08-05 10:56:33 -070062void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070063 int r_tgt = CallHelperSetup(helper_offset);
64 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +000065 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070066 CallHelper(r_tgt, helper_offset, safepoint_pc);
67}
68
Ian Rogers848871b2013-08-05 10:56:33 -070069void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070070 int r_tgt = CallHelperSetup(helper_offset);
71 OpRegCopy(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +000072 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070073 CallHelper(r_tgt, helper_offset, safepoint_pc);
74}
75
Ian Rogers848871b2013-08-05 10:56:33 -070076void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
77 bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070078 int r_tgt = CallHelperSetup(helper_offset);
79 if (arg0.wide == 0) {
80 LoadValueDirectFixed(arg0, TargetReg(kArg0));
81 } else {
82 LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
83 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +000084 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070085 CallHelper(r_tgt, helper_offset, safepoint_pc);
86}
87
Ian Rogers848871b2013-08-05 10:56:33 -070088void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -070089 bool safepoint_pc) {
90 int r_tgt = CallHelperSetup(helper_offset);
91 LoadConstant(TargetReg(kArg0), arg0);
92 LoadConstant(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +000093 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070094 CallHelper(r_tgt, helper_offset, safepoint_pc);
95}
96
Ian Rogers848871b2013-08-05 10:56:33 -070097void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
Brian Carlstrom7940e442013-07-12 13:46:57 -070098 RegLocation arg1, bool safepoint_pc) {
99 int r_tgt = CallHelperSetup(helper_offset);
100 if (arg1.wide == 0) {
101 LoadValueDirectFixed(arg1, TargetReg(kArg1));
102 } else {
103 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
104 }
105 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000106 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700107 CallHelper(r_tgt, helper_offset, safepoint_pc);
108}
109
Ian Rogers848871b2013-08-05 10:56:33 -0700110void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700111 bool safepoint_pc) {
112 int r_tgt = CallHelperSetup(helper_offset);
113 LoadValueDirectFixed(arg0, TargetReg(kArg0));
114 LoadConstant(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000115 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700116 CallHelper(r_tgt, helper_offset, safepoint_pc);
117}
118
Ian Rogers848871b2013-08-05 10:56:33 -0700119void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120 bool safepoint_pc) {
121 int r_tgt = CallHelperSetup(helper_offset);
122 OpRegCopy(TargetReg(kArg1), arg1);
123 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000124 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700125 CallHelper(r_tgt, helper_offset, safepoint_pc);
126}
127
Ian Rogers848871b2013-08-05 10:56:33 -0700128void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
129 bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700130 int r_tgt = CallHelperSetup(helper_offset);
131 OpRegCopy(TargetReg(kArg0), arg0);
132 LoadConstant(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000133 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700134 CallHelper(r_tgt, helper_offset, safepoint_pc);
135}
136
Ian Rogers848871b2013-08-05 10:56:33 -0700137void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700138 int r_tgt = CallHelperSetup(helper_offset);
139 LoadCurrMethodDirect(TargetReg(kArg1));
140 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000141 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700142 CallHelper(r_tgt, helper_offset, safepoint_pc);
143}
144
Hiroshi Yamauchibe1ca552014-01-15 11:46:48 -0800145void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
146 int r_tgt = CallHelperSetup(helper_offset);
147 DCHECK_NE(TargetReg(kArg1), arg0);
148 if (TargetReg(kArg0) != arg0) {
149 OpRegCopy(TargetReg(kArg0), arg0);
150 }
151 LoadCurrMethodDirect(TargetReg(kArg1));
152 ClobberCallerSave();
153 CallHelper(r_tgt, helper_offset, safepoint_pc);
154}
155
Hiroshi Yamauchibb8f0ab2014-01-27 16:50:29 -0800156void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0,
157 RegLocation arg2, bool safepoint_pc) {
158 int r_tgt = CallHelperSetup(helper_offset);
159 DCHECK_NE(TargetReg(kArg1), arg0);
160 if (TargetReg(kArg0) != arg0) {
161 OpRegCopy(TargetReg(kArg0), arg0);
162 }
163 LoadCurrMethodDirect(TargetReg(kArg1));
164 LoadValueDirectFixed(arg2, TargetReg(kArg2));
165 ClobberCallerSave();
166 CallHelper(r_tgt, helper_offset, safepoint_pc);
167}
168
Ian Rogers848871b2013-08-05 10:56:33 -0700169void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700170 RegLocation arg1, bool safepoint_pc) {
171 int r_tgt = CallHelperSetup(helper_offset);
172 if (arg0.wide == 0) {
173 LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
174 if (arg1.wide == 0) {
175 if (cu_->instruction_set == kMips) {
176 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
177 } else {
178 LoadValueDirectFixed(arg1, TargetReg(kArg1));
179 }
180 } else {
181 if (cu_->instruction_set == kMips) {
182 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
183 } else {
184 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
185 }
186 }
187 } else {
188 LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
189 if (arg1.wide == 0) {
190 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
191 } else {
192 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
193 }
194 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000195 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700196 CallHelper(r_tgt, helper_offset, safepoint_pc);
197}
198
Ian Rogers848871b2013-08-05 10:56:33 -0700199void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
200 bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700201 int r_tgt = CallHelperSetup(helper_offset);
202 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
203 OpRegCopy(TargetReg(kArg0), arg0);
204 OpRegCopy(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000205 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700206 CallHelper(r_tgt, helper_offset, safepoint_pc);
207}
208
Ian Rogers848871b2013-08-05 10:56:33 -0700209void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700210 int arg2, bool safepoint_pc) {
211 int r_tgt = CallHelperSetup(helper_offset);
212 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
213 OpRegCopy(TargetReg(kArg0), arg0);
214 OpRegCopy(TargetReg(kArg1), arg1);
215 LoadConstant(TargetReg(kArg2), arg2);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000216 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700217 CallHelper(r_tgt, helper_offset, safepoint_pc);
218}
219
Ian Rogers848871b2013-08-05 10:56:33 -0700220void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700221 int arg0, RegLocation arg2, bool safepoint_pc) {
222 int r_tgt = CallHelperSetup(helper_offset);
223 LoadValueDirectFixed(arg2, TargetReg(kArg2));
224 LoadCurrMethodDirect(TargetReg(kArg1));
225 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000226 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700227 CallHelper(r_tgt, helper_offset, safepoint_pc);
228}
229
Ian Rogers848871b2013-08-05 10:56:33 -0700230void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700231 int arg2, bool safepoint_pc) {
232 int r_tgt = CallHelperSetup(helper_offset);
233 LoadCurrMethodDirect(TargetReg(kArg1));
234 LoadConstant(TargetReg(kArg2), arg2);
235 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000236 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700237 CallHelper(r_tgt, helper_offset, safepoint_pc);
238}
239
Ian Rogers848871b2013-08-05 10:56:33 -0700240void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700241 int arg0, RegLocation arg1,
242 RegLocation arg2, bool safepoint_pc) {
243 int r_tgt = CallHelperSetup(helper_offset);
Ian Rogersa9a82542013-10-04 11:17:26 -0700244 DCHECK_EQ(arg1.wide, 0U);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700245 LoadValueDirectFixed(arg1, TargetReg(kArg1));
246 if (arg2.wide == 0) {
247 LoadValueDirectFixed(arg2, TargetReg(kArg2));
248 } else {
249 LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
250 }
251 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000252 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700253 CallHelper(r_tgt, helper_offset, safepoint_pc);
254}
255
Ian Rogersa9a82542013-10-04 11:17:26 -0700256void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset helper_offset,
257 RegLocation arg0, RegLocation arg1,
258 RegLocation arg2,
259 bool safepoint_pc) {
260 int r_tgt = CallHelperSetup(helper_offset);
261 DCHECK_EQ(arg0.wide, 0U);
262 LoadValueDirectFixed(arg0, TargetReg(kArg0));
263 DCHECK_EQ(arg1.wide, 0U);
264 LoadValueDirectFixed(arg1, TargetReg(kArg1));
265 DCHECK_EQ(arg1.wide, 0U);
266 LoadValueDirectFixed(arg2, TargetReg(kArg2));
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000267 ClobberCallerSave();
Ian Rogersa9a82542013-10-04 11:17:26 -0700268 CallHelper(r_tgt, helper_offset, safepoint_pc);
269}
270
Brian Carlstrom7940e442013-07-12 13:46:57 -0700271/*
272 * If there are any ins passed in registers that have not been promoted
273 * to a callee-save register, flush them to the frame. Perform intial
274 * assignment of promoted arguments.
275 *
276 * ArgLocs is an array of location records describing the incoming arguments
277 * with one location record per word of argument.
278 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700279void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700280 /*
281 * Dummy up a RegLocation for the incoming Method*
282 * It will attempt to keep kArg0 live (or copy it to home location
283 * if promoted).
284 */
285 RegLocation rl_src = rl_method;
286 rl_src.location = kLocPhysReg;
287 rl_src.low_reg = TargetReg(kArg0);
288 rl_src.home = false;
289 MarkLive(rl_src.low_reg, rl_src.s_reg_low);
290 StoreValue(rl_method, rl_src);
291 // If Method* has been promoted, explicitly flush
292 if (rl_method.location == kLocPhysReg) {
293 StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
294 }
295
296 if (cu_->num_ins == 0)
297 return;
298 const int num_arg_regs = 3;
299 static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
300 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
301 /*
302 * Copy incoming arguments to their proper home locations.
303 * NOTE: an older version of dx had an issue in which
304 * it would reuse static method argument registers.
305 * This could result in the same Dalvik virtual register
306 * being promoted to both core and fp regs. To account for this,
307 * we only copy to the corresponding promoted physical register
308 * if it matches the type of the SSA name for the incoming
309 * argument. It is also possible that long and double arguments
310 * end up half-promoted. In those cases, we must flush the promoted
311 * half to memory as well.
312 */
313 for (int i = 0; i < cu_->num_ins; i++) {
314 PromotionMap* v_map = &promotion_map_[start_vreg + i];
315 if (i < num_arg_regs) {
316 // If arriving in register
317 bool need_flush = true;
318 RegLocation* t_loc = &ArgLocs[i];
319 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
320 OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
321 need_flush = false;
322 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
323 OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
324 need_flush = false;
325 } else {
326 need_flush = true;
327 }
328
buzbeed0a03b82013-09-14 08:21:05 -0700329 // For wide args, force flush if not fully promoted
Brian Carlstrom7940e442013-07-12 13:46:57 -0700330 if (t_loc->wide) {
331 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
buzbeed0a03b82013-09-14 08:21:05 -0700332 // Is only half promoted?
Brian Carlstrom7940e442013-07-12 13:46:57 -0700333 need_flush |= (p_map->core_location != v_map->core_location) ||
334 (p_map->fp_location != v_map->fp_location);
buzbeed0a03b82013-09-14 08:21:05 -0700335 if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
336 /*
337 * In Arm, a double is represented as a pair of consecutive single float
338 * registers starting at an even number. It's possible that both Dalvik vRegs
339 * representing the incoming double were independently promoted as singles - but
340 * not in a form usable as a double. If so, we need to flush - even though the
341 * incoming arg appears fully in register. At this point in the code, both
342 * halves of the double are promoted. Make sure they are in a usable form.
343 */
344 int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
345 int low_reg = promotion_map_[lowreg_index].FpReg;
346 int high_reg = promotion_map_[lowreg_index + 1].FpReg;
347 if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
348 need_flush = true;
349 }
350 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700351 }
352 if (need_flush) {
353 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
354 TargetReg(arg_regs[i]), kWord);
355 }
356 } else {
357 // If arriving in frame & promoted
358 if (v_map->core_location == kLocPhysReg) {
359 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
360 v_map->core_reg);
361 }
362 if (v_map->fp_location == kLocPhysReg) {
363 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
364 v_map->FpReg);
365 }
366 }
367 }
368}
369
370/*
371 * Bit of a hack here - in the absence of a real scheduling pass,
372 * emit the next instruction in static & direct invoke sequences.
373 */
374static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
375 int state, const MethodReference& target_method,
376 uint32_t unused,
377 uintptr_t direct_code, uintptr_t direct_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700378 InvokeType type) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700379 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700380 if (direct_code != 0 && direct_method != 0) {
381 switch (state) {
382 case 0: // Get the current Method* [sets kArg0]
383 if (direct_code != static_cast<unsigned int>(-1)) {
Ian Rogers83883d72013-10-21 21:07:24 -0700384 if (cu->instruction_set != kX86) {
385 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
386 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700387 } else {
388 CHECK_EQ(cu->dex_file, target_method.dex_file);
389 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
390 target_method.dex_method_index, 0);
391 if (data_target == NULL) {
392 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
393 data_target->operands[1] = type;
394 }
395 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
396 cg->AppendLIR(load_pc_rel);
397 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
398 }
399 if (direct_method != static_cast<unsigned int>(-1)) {
400 cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
401 } else {
402 CHECK_EQ(cu->dex_file, target_method.dex_file);
403 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
404 target_method.dex_method_index, 0);
405 if (data_target == NULL) {
406 data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
407 data_target->operands[1] = type;
408 }
409 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
410 cg->AppendLIR(load_pc_rel);
411 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
412 }
413 break;
414 default:
415 return -1;
416 }
417 } else {
418 switch (state) {
419 case 0: // Get the current Method* [sets kArg0]
420 // TUNING: we can save a reg copy if Method* has been promoted.
421 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
422 break;
423 case 1: // Get method->dex_cache_resolved_methods_
424 cg->LoadWordDisp(cg->TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -0700425 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426 // Set up direct code if known.
427 if (direct_code != 0) {
428 if (direct_code != static_cast<unsigned int>(-1)) {
429 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
430 } else {
431 CHECK_EQ(cu->dex_file, target_method.dex_file);
Ian Rogers83883d72013-10-21 21:07:24 -0700432 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700433 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
434 target_method.dex_method_index, 0);
435 if (data_target == NULL) {
436 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
437 data_target->operands[1] = type;
438 }
439 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
440 cg->AppendLIR(load_pc_rel);
441 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
442 }
443 }
444 break;
445 case 2: // Grab target method*
446 CHECK_EQ(cu->dex_file, target_method.dex_file);
447 cg->LoadWordDisp(cg->TargetReg(kArg0),
448 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
449 (target_method.dex_method_index * 4),
450 cg-> TargetReg(kArg0));
451 break;
452 case 3: // Grab the code from the method*
453 if (cu->instruction_set != kX86) {
454 if (direct_code == 0) {
455 cg->LoadWordDisp(cg->TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -0700456 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700457 cg->TargetReg(kInvokeTgt));
458 }
459 break;
460 }
461 // Intentional fallthrough for x86
462 default:
463 return -1;
464 }
465 }
466 return state + 1;
467}
468
469/*
470 * Bit of a hack here - in the absence of a real scheduling pass,
471 * emit the next instruction in a virtual invoke sequence.
472 * We can use kLr as a temp prior to target address loading
473 * Note also that we'll load the first argument ("this") into
474 * kArg1 here rather than the standard LoadArgRegs.
475 */
476static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
477 int state, const MethodReference& target_method,
478 uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700479 InvokeType unused3) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700480 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
481 /*
482 * This is the fast path in which the target virtual method is
483 * fully resolved at compile time.
484 */
485 switch (state) {
486 case 0: { // Get "this" [set kArg1]
487 RegLocation rl_arg = info->args[0];
488 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
489 break;
490 }
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700491 case 1: // Is "this" null? [use kArg1]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700492 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
493 // get this->klass_ [use kArg1, set kInvokeTgt]
494 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
495 cg->TargetReg(kInvokeTgt));
496 break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700497 case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700498 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
499 cg->TargetReg(kInvokeTgt));
500 break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700501 case 3: // Get target method [use kInvokeTgt, set kArg0]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700502 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
503 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
504 cg->TargetReg(kArg0));
505 break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700506 case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700507 if (cu->instruction_set != kX86) {
508 cg->LoadWordDisp(cg->TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -0700509 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700510 cg->TargetReg(kInvokeTgt));
511 break;
512 }
513 // Intentional fallthrough for X86
514 default:
515 return -1;
516 }
517 return state + 1;
518}
519
520/*
Jeff Hao88474b42013-10-23 16:24:40 -0700521 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
522 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
523 * more than one interface method map to the same index. Note also that we'll load the first
524 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700525 */
526static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
527 const MethodReference& target_method,
Jeff Hao88474b42013-10-23 16:24:40 -0700528 uint32_t method_idx, uintptr_t unused,
529 uintptr_t direct_method, InvokeType unused2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700530 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700531
Jeff Hao88474b42013-10-23 16:24:40 -0700532 switch (state) {
533 case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700534 CHECK_EQ(cu->dex_file, target_method.dex_file);
Jeff Hao88474b42013-10-23 16:24:40 -0700535 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
536 cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
537 if (cu->instruction_set == kX86) {
538 cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
539 }
540 break;
541 case 1: { // Get "this" [set kArg1]
542 RegLocation rl_arg = info->args[0];
543 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
544 break;
545 }
546 case 2: // Is "this" null? [use kArg1]
547 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
548 // Get this->klass_ [use kArg1, set kInvokeTgt]
549 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
550 cg->TargetReg(kInvokeTgt));
551 break;
552 case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
553 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
554 cg->TargetReg(kInvokeTgt));
555 break;
556 case 4: // Get target method [use kInvokeTgt, set kArg0]
557 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
558 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700559 cg->TargetReg(kArg0));
560 break;
Jeff Hao88474b42013-10-23 16:24:40 -0700561 case 5: // Get the compiled code address [use kArg0, set kInvokeTgt]
562 if (cu->instruction_set != kX86) {
563 cg->LoadWordDisp(cg->TargetReg(kArg0),
564 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
565 cg->TargetReg(kInvokeTgt));
566 break;
567 }
568 // Intentional fallthrough for X86
Brian Carlstrom7940e442013-07-12 13:46:57 -0700569 default:
570 return -1;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700571 }
572 return state + 1;
573}
574
Ian Rogers848871b2013-08-05 10:56:33 -0700575static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700576 int state, const MethodReference& target_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700577 uint32_t method_idx) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700578 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
579 /*
580 * This handles the case in which the base method is not fully
581 * resolved at compile time, we bail to a runtime helper.
582 */
583 if (state == 0) {
584 if (cu->instruction_set != kX86) {
585 // Load trampoline target
Ian Rogers848871b2013-08-05 10:56:33 -0700586 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700587 }
588 // Load kArg0 with method index
589 CHECK_EQ(cu->dex_file, target_method.dex_file);
590 cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
591 return 1;
592 }
593 return -1;
594}
595
596static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
597 int state,
598 const MethodReference& target_method,
599 uint32_t method_idx,
600 uintptr_t unused, uintptr_t unused2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700601 InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700602 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700603 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
604}
605
606static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
607 const MethodReference& target_method,
608 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700609 uintptr_t unused2, InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700610 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700611 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
612}
613
614static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
615 const MethodReference& target_method,
616 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700617 uintptr_t unused2, InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700618 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700619 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
620}
621
622static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
623 const MethodReference& target_method,
624 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700625 uintptr_t unused2, InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700626 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700627 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
628}
629
630static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
631 CallInfo* info, int state,
632 const MethodReference& target_method,
633 uint32_t unused,
634 uintptr_t unused2, uintptr_t unused3,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700635 InvokeType unused4) {
Ian Rogers848871b2013-08-05 10:56:33 -0700636 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700637 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
638}
639
640int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
641 NextCallInsn next_call_insn,
642 const MethodReference& target_method,
643 uint32_t vtable_idx, uintptr_t direct_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700644 uintptr_t direct_method, InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700645 int last_arg_reg = TargetReg(kArg3);
646 int next_reg = TargetReg(kArg1);
647 int next_arg = 0;
648 if (skip_this) {
649 next_reg++;
650 next_arg++;
651 }
652 for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
653 RegLocation rl_arg = info->args[next_arg++];
654 rl_arg = UpdateRawLoc(rl_arg);
655 if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
656 LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
657 next_reg++;
658 next_arg++;
659 } else {
660 if (rl_arg.wide) {
661 rl_arg.wide = false;
662 rl_arg.is_const = false;
663 }
664 LoadValueDirectFixed(rl_arg, next_reg);
665 }
666 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
667 direct_code, direct_method, type);
668 }
669 return call_state;
670}
671
672/*
673 * Load up to 5 arguments, the first three of which will be in
674 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
675 * and as part of the load sequence, it must be replaced with
676 * the target method pointer. Note, this may also be called
677 * for "range" variants if the number of arguments is 5 or fewer.
678 */
679int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
680 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
681 const MethodReference& target_method,
682 uint32_t vtable_idx, uintptr_t direct_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700683 uintptr_t direct_method, InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700684 RegLocation rl_arg;
685
686 /* If no arguments, just return */
687 if (info->num_arg_words == 0)
688 return call_state;
689
690 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
691 direct_code, direct_method, type);
692
693 DCHECK_LE(info->num_arg_words, 5);
694 if (info->num_arg_words > 3) {
695 int32_t next_use = 3;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700696 // Detect special case of wide arg spanning arg3/arg4
Brian Carlstrom7940e442013-07-12 13:46:57 -0700697 RegLocation rl_use0 = info->args[0];
698 RegLocation rl_use1 = info->args[1];
699 RegLocation rl_use2 = info->args[2];
700 if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
701 rl_use2.wide) {
702 int reg = -1;
703 // Wide spans, we need the 2nd half of uses[2].
704 rl_arg = UpdateLocWide(rl_use2);
705 if (rl_arg.location == kLocPhysReg) {
706 reg = rl_arg.high_reg;
707 } else {
708 // kArg2 & rArg3 can safely be used here
709 reg = TargetReg(kArg3);
710 LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
711 call_state = next_call_insn(cu_, info, call_state, target_method,
712 vtable_idx, direct_code, direct_method, type);
713 }
714 StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
715 StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
716 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
717 direct_code, direct_method, type);
718 next_use++;
719 }
720 // Loop through the rest
721 while (next_use < info->num_arg_words) {
722 int low_reg;
723 int high_reg = -1;
724 rl_arg = info->args[next_use];
725 rl_arg = UpdateRawLoc(rl_arg);
726 if (rl_arg.location == kLocPhysReg) {
727 low_reg = rl_arg.low_reg;
728 high_reg = rl_arg.high_reg;
729 } else {
730 low_reg = TargetReg(kArg2);
731 if (rl_arg.wide) {
732 high_reg = TargetReg(kArg3);
733 LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
734 } else {
735 LoadValueDirectFixed(rl_arg, low_reg);
736 }
737 call_state = next_call_insn(cu_, info, call_state, target_method,
738 vtable_idx, direct_code, direct_method, type);
739 }
740 int outs_offset = (next_use + 1) * 4;
741 if (rl_arg.wide) {
742 StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
743 next_use += 2;
744 } else {
745 StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
746 next_use++;
747 }
748 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
749 direct_code, direct_method, type);
750 }
751 }
752
753 call_state = LoadArgRegs(info, call_state, next_call_insn,
754 target_method, vtable_idx, direct_code, direct_method,
755 type, skip_this);
756
757 if (pcrLabel) {
758 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
759 }
760 return call_state;
761}
762
763/*
764 * May have 0+ arguments (also used for jumbo). Note that
765 * source virtual registers may be in physical registers, so may
766 * need to be flushed to home location before copying. This
767 * applies to arg3 and above (see below).
768 *
769 * Two general strategies:
770 * If < 20 arguments
771 * Pass args 3-18 using vldm/vstm block copy
772 * Pass arg0, arg1 & arg2 in kArg1-kArg3
773 * If 20+ arguments
774 * Pass args arg19+ using memcpy block copy
775 * Pass arg0, arg1 & arg2 in kArg1-kArg3
776 *
777 */
778int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
779 LIR** pcrLabel, NextCallInsn next_call_insn,
780 const MethodReference& target_method,
781 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700782 InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700783 // If we can treat it as non-range (Jumbo ops will use range form)
784 if (info->num_arg_words <= 5)
785 return GenDalvikArgsNoRange(info, call_state, pcrLabel,
786 next_call_insn, target_method, vtable_idx,
787 direct_code, direct_method, type, skip_this);
788 /*
789 * First load the non-register arguments. Both forms expect all
790 * of the source arguments to be in their home frame location, so
791 * scan the s_reg names and flush any that have been promoted to
792 * frame backing storage.
793 */
794 // Scan the rest of the args - if in phys_reg flush to memory
795 for (int next_arg = 0; next_arg < info->num_arg_words;) {
796 RegLocation loc = info->args[next_arg];
797 if (loc.wide) {
798 loc = UpdateLocWide(loc);
799 if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
800 StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
801 loc.low_reg, loc.high_reg);
802 }
803 next_arg += 2;
804 } else {
805 loc = UpdateLoc(loc);
806 if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
807 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
808 loc.low_reg, kWord);
809 }
810 next_arg++;
811 }
812 }
813
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800814 // Logic below assumes that Method pointer is at offset zero from SP.
815 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
816
817 // The first 3 arguments are passed via registers.
818 // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
819 // get size of uintptr_t or size of object reference according to model being used.
820 int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700821 int start_offset = SRegOffset(info->args[3].s_reg_low);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800822 int regs_left_to_pass_via_stack = info->num_arg_words - 3;
823 DCHECK_GT(regs_left_to_pass_via_stack, 0);
824
825 if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
826 // Use vldm/vstm pair using kArg3 as a temp
827 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
828 direct_code, direct_method, type);
829 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
830 LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
831 // TUNING: loosen barrier
832 ld->u.m.def_mask = ENCODE_ALL;
833 SetMemRefType(ld, true /* is_load */, kDalvikReg);
834 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
835 direct_code, direct_method, type);
836 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
837 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
838 direct_code, direct_method, type);
839 LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
840 SetMemRefType(st, false /* is_load */, kDalvikReg);
841 st->u.m.def_mask = ENCODE_ALL;
842 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
843 direct_code, direct_method, type);
844 } else if (cu_->instruction_set == kX86) {
845 int current_src_offset = start_offset;
846 int current_dest_offset = outs_offset;
847
848 while (regs_left_to_pass_via_stack > 0) {
849 // This is based on the knowledge that the stack itself is 16-byte aligned.
850 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
851 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
852 size_t bytes_to_move;
853
854 /*
855 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
856 * a 128-bit move because we won't get the chance to try to aligned. If there are more than
857 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
858 * We do this because we could potentially do a smaller move to align.
859 */
860 if (regs_left_to_pass_via_stack == 4 ||
861 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
862 // Moving 128-bits via xmm register.
863 bytes_to_move = sizeof(uint32_t) * 4;
864
865 // Allocate a free xmm temp. Since we are working through the calling sequence,
866 // we expect to have an xmm temporary available.
867 int temp = AllocTempDouble();
868 CHECK_GT(temp, 0);
869
870 LIR* ld1 = nullptr;
871 LIR* ld2 = nullptr;
872 LIR* st1 = nullptr;
873 LIR* st2 = nullptr;
874
875 /*
876 * The logic is similar for both loads and stores. If we have 16-byte alignment,
877 * do an aligned move. If we have 8-byte alignment, then do the move in two
878 * parts. This approach prevents possible cache line splits. Finally, fall back
879 * to doing an unaligned move. In most cases we likely won't split the cache
880 * line but we cannot prove it and thus take a conservative approach.
881 */
882 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
883 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
884
885 if (src_is_16b_aligned) {
886 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
887 } else if (src_is_8b_aligned) {
888 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
889 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), kMovHi128FP);
890 } else {
891 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
892 }
893
894 if (dest_is_16b_aligned) {
895 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
896 } else if (dest_is_8b_aligned) {
897 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
898 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP);
899 } else {
900 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
901 }
902
903 // TODO If we could keep track of aliasing information for memory accesses that are wider
904 // than 64-bit, we wouldn't need to set up a barrier.
905 if (ld1 != nullptr) {
906 if (ld2 != nullptr) {
907 // For 64-bit load we can actually set up the aliasing information.
908 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
909 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
910 } else {
911 // Set barrier for 128-bit load.
912 SetMemRefType(ld1, true /* is_load */, kDalvikReg);
913 ld1->u.m.def_mask = ENCODE_ALL;
914 }
915 }
916 if (st1 != nullptr) {
917 if (st2 != nullptr) {
918 // For 64-bit store we can actually set up the aliasing information.
919 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
920 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
921 } else {
922 // Set barrier for 128-bit store.
923 SetMemRefType(st1, false /* is_load */, kDalvikReg);
924 st1->u.m.def_mask = ENCODE_ALL;
925 }
926 }
927
928 // Free the temporary used for the data movement.
929 FreeTemp(temp);
930 } else {
931 // Moving 32-bits via general purpose register.
932 bytes_to_move = sizeof(uint32_t);
933
934 // Instead of allocating a new temp, simply reuse one of the registers being used
935 // for argument passing.
936 int temp = TargetReg(kArg3);
937
938 // Now load the argument VR and store to the outs.
939 LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
940 StoreWordDisp(TargetReg(kSp), current_dest_offset, temp);
941 }
942
943 current_src_offset += bytes_to_move;
944 current_dest_offset += bytes_to_move;
945 regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
946 }
947 } else {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700948 // Generate memcpy
949 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
950 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
Ian Rogers7655f292013-07-29 11:07:13 -0700951 CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700952 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700953 }
954
955 call_state = LoadArgRegs(info, call_state, next_call_insn,
956 target_method, vtable_idx, direct_code, direct_method,
957 type, skip_this);
958
959 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
960 direct_code, direct_method, type);
961 if (pcrLabel) {
962 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
963 }
964 return call_state;
965}
966
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700967RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700968 RegLocation res;
969 if (info->result.location == kLocInvalid) {
970 res = GetReturn(false);
971 } else {
972 res = info->result;
973 }
974 return res;
975}
976
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700977RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700978 RegLocation res;
979 if (info->result.location == kLocInvalid) {
980 res = GetReturnWide(false);
981 } else {
982 res = info->result;
983 }
984 return res;
985}
986
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700987bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700988 if (cu_->instruction_set == kMips) {
989 // TODO - add Mips implementation
990 return false;
991 }
992 // Location of reference to data array
993 int value_offset = mirror::String::ValueOffset().Int32Value();
994 // Location of count
995 int count_offset = mirror::String::CountOffset().Int32Value();
996 // Starting offset within data array
997 int offset_offset = mirror::String::OffsetOffset().Int32Value();
998 // Start of char data with array_
999 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1000
1001 RegLocation rl_obj = info->args[0];
1002 RegLocation rl_idx = info->args[1];
1003 rl_obj = LoadValue(rl_obj, kCoreReg);
1004 rl_idx = LoadValue(rl_idx, kCoreReg);
1005 int reg_max;
1006 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
1007 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1008 LIR* launch_pad = NULL;
1009 int reg_off = INVALID_REG;
1010 int reg_ptr = INVALID_REG;
1011 if (cu_->instruction_set != kX86) {
1012 reg_off = AllocTemp();
1013 reg_ptr = AllocTemp();
1014 if (range_check) {
1015 reg_max = AllocTemp();
1016 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
1017 }
1018 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
1019 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
1020 if (range_check) {
1021 // Set up a launch pad to allow retry in case of bounds violation */
buzbee0d829482013-10-11 15:24:55 -07001022 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001023 intrinsic_launchpads_.Insert(launch_pad);
1024 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
1025 FreeTemp(reg_max);
Vladimir Marko58af1f92013-12-19 13:31:15 +00001026 OpCondBranch(kCondUge, launch_pad);
Brian Carlstrom6f485c62013-07-18 15:35:35 -07001027 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001028 } else {
1029 if (range_check) {
1030 reg_max = AllocTemp();
1031 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
1032 // Set up a launch pad to allow retry in case of bounds violation */
buzbee0d829482013-10-11 15:24:55 -07001033 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001034 intrinsic_launchpads_.Insert(launch_pad);
1035 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
1036 FreeTemp(reg_max);
Vladimir Marko58af1f92013-12-19 13:31:15 +00001037 OpCondBranch(kCondUge, launch_pad);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001038 }
1039 reg_off = AllocTemp();
1040 reg_ptr = AllocTemp();
1041 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
1042 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
1043 }
1044 OpRegImm(kOpAdd, reg_ptr, data_offset);
1045 OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
1046 FreeTemp(rl_obj.low_reg);
1047 FreeTemp(rl_idx.low_reg);
1048 RegLocation rl_dest = InlineTarget(info);
1049 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1050 LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
1051 FreeTemp(reg_off);
1052 FreeTemp(reg_ptr);
1053 StoreValue(rl_dest, rl_result);
1054 if (range_check) {
1055 launch_pad->operands[2] = 0; // no resumption
1056 }
1057 // Record that we've already inlined & null checked
1058 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1059 return true;
1060}
1061
1062// Generates an inlined String.is_empty or String.length.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001063bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001064 if (cu_->instruction_set == kMips) {
1065 // TODO - add Mips implementation
1066 return false;
1067 }
1068 // dst = src.length();
1069 RegLocation rl_obj = info->args[0];
1070 rl_obj = LoadValue(rl_obj, kCoreReg);
1071 RegLocation rl_dest = InlineTarget(info);
1072 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1073 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
1074 LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
1075 if (is_empty) {
1076 // dst = (dst == 0);
1077 if (cu_->instruction_set == kThumb2) {
1078 int t_reg = AllocTemp();
1079 OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
1080 OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
1081 } else {
1082 DCHECK_EQ(cu_->instruction_set, kX86);
1083 OpRegImm(kOpSub, rl_result.low_reg, 1);
1084 OpRegImm(kOpLsr, rl_result.low_reg, 31);
1085 }
1086 }
1087 StoreValue(rl_dest, rl_result);
1088 return true;
1089}
1090
Vladimir Marko6bdf1ff2013-10-29 17:40:46 +00001091bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1092 if (cu_->instruction_set == kMips) {
1093 // TODO - add Mips implementation
1094 return false;
1095 }
1096 RegLocation rl_src_i = info->args[0];
1097 RegLocation rl_dest = InlineTarget(info); // result reg
1098 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1099 if (size == kLong) {
1100 RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
Vladimir Markof246af22013-11-27 12:30:15 +00001101 int r_i_low = rl_i.low_reg;
1102 if (rl_i.low_reg == rl_result.low_reg) {
1103 // First REV shall clobber rl_result.low_reg, save the value in a temp for the second REV.
1104 r_i_low = AllocTemp();
1105 OpRegCopy(r_i_low, rl_i.low_reg);
1106 }
Vladimir Marko6bdf1ff2013-10-29 17:40:46 +00001107 OpRegReg(kOpRev, rl_result.low_reg, rl_i.high_reg);
Vladimir Markof246af22013-11-27 12:30:15 +00001108 OpRegReg(kOpRev, rl_result.high_reg, r_i_low);
1109 if (rl_i.low_reg == rl_result.low_reg) {
1110 FreeTemp(r_i_low);
1111 }
Vladimir Marko6bdf1ff2013-10-29 17:40:46 +00001112 StoreValueWide(rl_dest, rl_result);
1113 } else {
1114 DCHECK(size == kWord || size == kSignedHalf);
1115 OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
1116 RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
1117 OpRegReg(op, rl_result.low_reg, rl_i.low_reg);
1118 StoreValue(rl_dest, rl_result);
1119 }
1120 return true;
1121}
1122
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001123bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001124 if (cu_->instruction_set == kMips) {
1125 // TODO - add Mips implementation
1126 return false;
1127 }
1128 RegLocation rl_src = info->args[0];
1129 rl_src = LoadValue(rl_src, kCoreReg);
1130 RegLocation rl_dest = InlineTarget(info);
1131 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1132 int sign_reg = AllocTemp();
1133 // abs(x) = y<=x>>31, (x+y)^y.
1134 OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
1135 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
1136 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1137 StoreValue(rl_dest, rl_result);
1138 return true;
1139}
1140
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001141bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001142 if (cu_->instruction_set == kMips) {
1143 // TODO - add Mips implementation
1144 return false;
1145 }
1146 if (cu_->instruction_set == kThumb2) {
1147 RegLocation rl_src = info->args[0];
1148 rl_src = LoadValueWide(rl_src, kCoreReg);
1149 RegLocation rl_dest = InlineTargetWide(info);
1150 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1151 int sign_reg = AllocTemp();
1152 // abs(x) = y<=x>>31, (x+y)^y.
1153 OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
1154 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
1155 OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
1156 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1157 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
1158 StoreValueWide(rl_dest, rl_result);
1159 return true;
1160 } else {
1161 DCHECK_EQ(cu_->instruction_set, kX86);
1162 // Reuse source registers to avoid running out of temps
1163 RegLocation rl_src = info->args[0];
1164 rl_src = LoadValueWide(rl_src, kCoreReg);
1165 RegLocation rl_dest = InlineTargetWide(info);
1166 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1167 OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
1168 FreeTemp(rl_src.low_reg);
1169 FreeTemp(rl_src.high_reg);
1170 int sign_reg = AllocTemp();
1171 // abs(x) = y<=x>>31, (x+y)^y.
1172 OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
1173 OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
1174 OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
1175 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1176 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
1177 StoreValueWide(rl_dest, rl_result);
1178 return true;
1179 }
1180}
1181
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001182bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001183 if (cu_->instruction_set == kMips) {
1184 // TODO - add Mips implementation
1185 return false;
1186 }
1187 RegLocation rl_src = info->args[0];
1188 RegLocation rl_dest = InlineTarget(info);
1189 StoreValue(rl_dest, rl_src);
1190 return true;
1191}
1192
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001193bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001194 if (cu_->instruction_set == kMips) {
1195 // TODO - add Mips implementation
1196 return false;
1197 }
1198 RegLocation rl_src = info->args[0];
1199 RegLocation rl_dest = InlineTargetWide(info);
1200 StoreValueWide(rl_dest, rl_src);
1201 return true;
1202}
1203
1204/*
1205 * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
1206 * otherwise bails to standard library code.
1207 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001208bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001209 if (cu_->instruction_set == kMips) {
1210 // TODO - add Mips implementation
1211 return false;
1212 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +00001213 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001214 LockCallTemps(); // Using fixed registers
1215 int reg_ptr = TargetReg(kArg0);
1216 int reg_char = TargetReg(kArg1);
1217 int reg_start = TargetReg(kArg2);
1218
1219 RegLocation rl_obj = info->args[0];
1220 RegLocation rl_char = info->args[1];
1221 RegLocation rl_start = info->args[2];
1222 LoadValueDirectFixed(rl_obj, reg_ptr);
1223 LoadValueDirectFixed(rl_char, reg_char);
1224 if (zero_based) {
1225 LoadConstant(reg_start, 0);
1226 } else {
1227 LoadValueDirectFixed(rl_start, reg_start);
1228 }
Ian Rogers7655f292013-07-29 11:07:13 -07001229 int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001230 GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
buzbee0d829482013-10-11 15:24:55 -07001231 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001232 intrinsic_launchpads_.Insert(launch_pad);
1233 OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
1234 // NOTE: not a safepoint
1235 if (cu_->instruction_set != kX86) {
1236 OpReg(kOpBlx, r_tgt);
1237 } else {
Ian Rogers7655f292013-07-29 11:07:13 -07001238 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001239 }
1240 LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
buzbee0d829482013-10-11 15:24:55 -07001241 launch_pad->operands[2] = WrapPointer(resume_tgt);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001242 // Record that we've already inlined & null checked
1243 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1244 RegLocation rl_return = GetReturn(false);
1245 RegLocation rl_dest = InlineTarget(info);
1246 StoreValue(rl_dest, rl_return);
1247 return true;
1248}
1249
1250/* Fast string.compareTo(Ljava/lang/string;)I. */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001251bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001252 if (cu_->instruction_set == kMips) {
1253 // TODO - add Mips implementation
1254 return false;
1255 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +00001256 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001257 LockCallTemps(); // Using fixed registers
1258 int reg_this = TargetReg(kArg0);
1259 int reg_cmp = TargetReg(kArg1);
1260
1261 RegLocation rl_this = info->args[0];
1262 RegLocation rl_cmp = info->args[1];
1263 LoadValueDirectFixed(rl_this, reg_this);
1264 LoadValueDirectFixed(rl_cmp, reg_cmp);
1265 int r_tgt = (cu_->instruction_set != kX86) ?
Ian Rogers7655f292013-07-29 11:07:13 -07001266 LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001267 GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001268 // TUNING: check if rl_cmp.s_reg_low is already null checked
buzbee0d829482013-10-11 15:24:55 -07001269 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001270 intrinsic_launchpads_.Insert(launch_pad);
1271 OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
1272 // NOTE: not a safepoint
1273 if (cu_->instruction_set != kX86) {
1274 OpReg(kOpBlx, r_tgt);
1275 } else {
Ian Rogers7655f292013-07-29 11:07:13 -07001276 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001277 }
1278 launch_pad->operands[2] = 0; // No return possible
1279 // Record that we've already inlined & null checked
1280 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1281 RegLocation rl_return = GetReturn(false);
1282 RegLocation rl_dest = InlineTarget(info);
1283 StoreValue(rl_dest, rl_return);
1284 return true;
1285}
1286
1287bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1288 RegLocation rl_dest = InlineTarget(info);
1289 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Ian Rogers848871b2013-08-05 10:56:33 -07001290 ThreadOffset offset = Thread::PeerOffset();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001291 if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
Ian Rogers848871b2013-08-05 10:56:33 -07001292 LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001293 } else {
1294 CHECK(cu_->instruction_set == kX86);
Brian Carlstrom2d888622013-07-18 17:02:00 -07001295 reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001296 }
1297 StoreValue(rl_dest, rl_result);
1298 return true;
1299}
1300
1301bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1302 bool is_long, bool is_volatile) {
1303 if (cu_->instruction_set == kMips) {
1304 // TODO - add Mips implementation
1305 return false;
1306 }
1307 // Unused - RegLocation rl_src_unsafe = info->args[0];
1308 RegLocation rl_src_obj = info->args[1]; // Object
1309 RegLocation rl_src_offset = info->args[2]; // long low
1310 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1311 RegLocation rl_dest = InlineTarget(info); // result reg
1312 if (is_volatile) {
1313 GenMemBarrier(kLoadLoad);
1314 }
1315 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1316 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1317 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1318 if (is_long) {
1319 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1320 LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
1321 StoreValueWide(rl_dest, rl_result);
1322 } else {
1323 LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
1324 StoreValue(rl_dest, rl_result);
1325 }
1326 return true;
1327}
1328
1329bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1330 bool is_object, bool is_volatile, bool is_ordered) {
1331 if (cu_->instruction_set == kMips) {
1332 // TODO - add Mips implementation
1333 return false;
1334 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001335 // Unused - RegLocation rl_src_unsafe = info->args[0];
1336 RegLocation rl_src_obj = info->args[1]; // Object
1337 RegLocation rl_src_offset = info->args[2]; // long low
1338 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1339 RegLocation rl_src_value = info->args[4]; // value to store
1340 if (is_volatile || is_ordered) {
1341 GenMemBarrier(kStoreStore);
1342 }
1343 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1344 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1345 RegLocation rl_value;
1346 if (is_long) {
1347 rl_value = LoadValueWide(rl_src_value, kCoreReg);
1348 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1349 StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
1350 } else {
1351 rl_value = LoadValue(rl_src_value, kCoreReg);
1352 StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
1353 }
Mark Mendelldf8ee2e2014-01-27 16:37:47 -08001354
1355 // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1356 FreeTemp(rl_offset.low_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001357 if (is_volatile) {
1358 GenMemBarrier(kStoreLoad);
1359 }
1360 if (is_object) {
1361 MarkGCCard(rl_value.low_reg, rl_object.low_reg);
1362 }
1363 return true;
1364}
1365
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001366void Mir2Lir::GenInvoke(CallInfo* info) {
Vladimir Marko5c96e6b2013-11-14 15:34:17 +00001367 if (!(info->opt_flags & MIR_INLINED)) {
Vladimir Marko5816ed42013-11-27 17:04:20 +00001368 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1369 if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1370 ->GenIntrinsic(this, info)) {
Vladimir Marko5c96e6b2013-11-14 15:34:17 +00001371 return;
1372 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001373 }
1374 InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo
1375 int call_state = 0;
1376 LIR* null_ck;
1377 LIR** p_null_ck = NULL;
1378 NextCallInsn next_call_insn;
1379 FlushAllRegs(); /* Everything to home location */
1380 // Explicit register usage
1381 LockCallTemps();
1382
1383 DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
1384 MethodReference target_method(cUnit->GetDexFile(), info->index);
1385 int vtable_idx;
1386 uintptr_t direct_code;
1387 uintptr_t direct_method;
1388 bool skip_this;
1389 bool fast_path =
1390 cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
1391 current_dalvik_offset_,
Ian Rogers65ec92c2013-09-06 10:49:58 -07001392 true, true,
1393 &info->type, &target_method,
1394 &vtable_idx,
1395 &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001396 if (info->type == kInterface) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001397 next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
Jeff Hao88474b42013-10-23 16:24:40 -07001398 skip_this = fast_path;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001399 } else if (info->type == kDirect) {
1400 if (fast_path) {
1401 p_null_ck = &null_ck;
1402 }
1403 next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1404 skip_this = false;
1405 } else if (info->type == kStatic) {
1406 next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1407 skip_this = false;
1408 } else if (info->type == kSuper) {
1409 DCHECK(!fast_path); // Fast path is a direct call.
1410 next_call_insn = NextSuperCallInsnSP;
1411 skip_this = false;
1412 } else {
1413 DCHECK_EQ(info->type, kVirtual);
1414 next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1415 skip_this = fast_path;
1416 }
1417 if (!info->is_range) {
1418 call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1419 next_call_insn, target_method,
1420 vtable_idx, direct_code, direct_method,
1421 original_type, skip_this);
1422 } else {
1423 call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1424 next_call_insn, target_method, vtable_idx,
1425 direct_code, direct_method, original_type,
1426 skip_this);
1427 }
1428 // Finish up any of the call sequence not interleaved in arg loading
1429 while (call_state >= 0) {
1430 call_state = next_call_insn(cu_, info, call_state, target_method,
1431 vtable_idx, direct_code, direct_method,
1432 original_type);
1433 }
1434 LIR* call_inst;
1435 if (cu_->instruction_set != kX86) {
1436 call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1437 } else {
Jeff Hao88474b42013-10-23 16:24:40 -07001438 if (fast_path) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001439 call_inst = OpMem(kOpBlx, TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -07001440 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -07001441 } else {
Ian Rogers848871b2013-08-05 10:56:33 -07001442 ThreadOffset trampoline(-1);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001443 switch (info->type) {
1444 case kInterface:
Jeff Hao88474b42013-10-23 16:24:40 -07001445 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001446 break;
1447 case kDirect:
Ian Rogers7655f292013-07-29 11:07:13 -07001448 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001449 break;
1450 case kStatic:
Ian Rogers7655f292013-07-29 11:07:13 -07001451 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001452 break;
1453 case kSuper:
Ian Rogers7655f292013-07-29 11:07:13 -07001454 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001455 break;
1456 case kVirtual:
Ian Rogers7655f292013-07-29 11:07:13 -07001457 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001458 break;
1459 default:
1460 LOG(FATAL) << "Unexpected invoke type";
1461 }
1462 call_inst = OpThreadMem(kOpBlx, trampoline);
1463 }
1464 }
1465 MarkSafepointPC(call_inst);
1466
Vladimir Marko31c2aac2013-12-09 16:31:19 +00001467 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001468 if (info->result.location != kLocInvalid) {
1469 // We have a following MOVE_RESULT - do it now.
1470 if (info->result.wide) {
1471 RegLocation ret_loc = GetReturnWide(info->result.fp);
1472 StoreValueWide(info->result, ret_loc);
1473 } else {
1474 RegLocation ret_loc = GetReturn(info->result.fp);
1475 StoreValue(info->result, ret_loc);
1476 }
1477 }
1478}
1479
1480} // namespace art