blob: 9644c6e4c8041ca1cd156a85d14597caae9a1b27 [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "dex/compiler_ir.h"
Vladimir Marko5c96e6b2013-11-14 15:34:17 +000018#include "dex/frontend.h"
19#include "dex/quick/dex_file_method_inliner.h"
20#include "dex/quick/dex_file_to_method_inliner_map.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070021#include "dex_file-inl.h"
Ian Rogers166db042013-07-26 12:05:57 -070022#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070023#include "invoke_type.h"
24#include "mirror/array.h"
25#include "mirror/string.h"
26#include "mir_to_lir-inl.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070027#include "x86/codegen_x86.h"
28
29namespace art {
30
31/*
32 * This source files contains "gen" codegen routines that should
33 * be applicable to most targets. Only mid-level support utilities
34 * and "op" calls may be used here.
35 */
36
37/*
38 * To save scheduling time, helper calls are broken into two parts: generation of
39 * the helper target address, and the actuall call to the helper. Because x86
40 * has a memory call operation, part 1 is a NOP for x86. For other targets,
41 * load arguments between the two parts.
42 */
Ian Rogers848871b2013-08-05 10:56:33 -070043int Mir2Lir::CallHelperSetup(ThreadOffset helper_offset) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070044 return (cu_->instruction_set == kX86) ? 0 : LoadHelper(helper_offset);
45}
46
47/* NOTE: if r_tgt is a temp, it will be freed following use */
Ian Rogers848871b2013-08-05 10:56:33 -070048LIR* Mir2Lir::CallHelper(int r_tgt, ThreadOffset helper_offset, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070049 LIR* call_inst;
50 if (cu_->instruction_set == kX86) {
51 call_inst = OpThreadMem(kOpBlx, helper_offset);
52 } else {
53 call_inst = OpReg(kOpBlx, r_tgt);
54 FreeTemp(r_tgt);
55 }
56 if (safepoint_pc) {
57 MarkSafepointPC(call_inst);
58 }
59 return call_inst;
60}
61
Ian Rogers848871b2013-08-05 10:56:33 -070062void Mir2Lir::CallRuntimeHelperImm(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070063 int r_tgt = CallHelperSetup(helper_offset);
64 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +000065 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070066 CallHelper(r_tgt, helper_offset, safepoint_pc);
67}
68
Ian Rogers848871b2013-08-05 10:56:33 -070069void Mir2Lir::CallRuntimeHelperReg(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070070 int r_tgt = CallHelperSetup(helper_offset);
71 OpRegCopy(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +000072 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070073 CallHelper(r_tgt, helper_offset, safepoint_pc);
74}
75
Ian Rogers848871b2013-08-05 10:56:33 -070076void Mir2Lir::CallRuntimeHelperRegLocation(ThreadOffset helper_offset, RegLocation arg0,
77 bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070078 int r_tgt = CallHelperSetup(helper_offset);
79 if (arg0.wide == 0) {
80 LoadValueDirectFixed(arg0, TargetReg(kArg0));
81 } else {
82 LoadValueDirectWideFixed(arg0, TargetReg(kArg0), TargetReg(kArg1));
83 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +000084 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070085 CallHelper(r_tgt, helper_offset, safepoint_pc);
86}
87
Ian Rogers848871b2013-08-05 10:56:33 -070088void Mir2Lir::CallRuntimeHelperImmImm(ThreadOffset helper_offset, int arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -070089 bool safepoint_pc) {
90 int r_tgt = CallHelperSetup(helper_offset);
91 LoadConstant(TargetReg(kArg0), arg0);
92 LoadConstant(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +000093 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -070094 CallHelper(r_tgt, helper_offset, safepoint_pc);
95}
96
Ian Rogers848871b2013-08-05 10:56:33 -070097void Mir2Lir::CallRuntimeHelperImmRegLocation(ThreadOffset helper_offset, int arg0,
Brian Carlstrom7940e442013-07-12 13:46:57 -070098 RegLocation arg1, bool safepoint_pc) {
99 int r_tgt = CallHelperSetup(helper_offset);
100 if (arg1.wide == 0) {
101 LoadValueDirectFixed(arg1, TargetReg(kArg1));
102 } else {
103 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
104 }
105 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000106 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700107 CallHelper(r_tgt, helper_offset, safepoint_pc);
108}
109
Ian Rogers848871b2013-08-05 10:56:33 -0700110void Mir2Lir::CallRuntimeHelperRegLocationImm(ThreadOffset helper_offset, RegLocation arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700111 bool safepoint_pc) {
112 int r_tgt = CallHelperSetup(helper_offset);
113 LoadValueDirectFixed(arg0, TargetReg(kArg0));
114 LoadConstant(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000115 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700116 CallHelper(r_tgt, helper_offset, safepoint_pc);
117}
118
Ian Rogers848871b2013-08-05 10:56:33 -0700119void Mir2Lir::CallRuntimeHelperImmReg(ThreadOffset helper_offset, int arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700120 bool safepoint_pc) {
121 int r_tgt = CallHelperSetup(helper_offset);
122 OpRegCopy(TargetReg(kArg1), arg1);
123 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000124 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700125 CallHelper(r_tgt, helper_offset, safepoint_pc);
126}
127
Ian Rogers848871b2013-08-05 10:56:33 -0700128void Mir2Lir::CallRuntimeHelperRegImm(ThreadOffset helper_offset, int arg0, int arg1,
129 bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700130 int r_tgt = CallHelperSetup(helper_offset);
131 OpRegCopy(TargetReg(kArg0), arg0);
132 LoadConstant(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000133 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700134 CallHelper(r_tgt, helper_offset, safepoint_pc);
135}
136
Ian Rogers848871b2013-08-05 10:56:33 -0700137void Mir2Lir::CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700138 int r_tgt = CallHelperSetup(helper_offset);
139 LoadCurrMethodDirect(TargetReg(kArg1));
140 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000141 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700142 CallHelper(r_tgt, helper_offset, safepoint_pc);
143}
144
Hiroshi Yamauchibe1ca552014-01-15 11:46:48 -0800145void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc) {
146 int r_tgt = CallHelperSetup(helper_offset);
147 DCHECK_NE(TargetReg(kArg1), arg0);
148 if (TargetReg(kArg0) != arg0) {
149 OpRegCopy(TargetReg(kArg0), arg0);
150 }
151 LoadCurrMethodDirect(TargetReg(kArg1));
152 ClobberCallerSave();
153 CallHelper(r_tgt, helper_offset, safepoint_pc);
154}
155
Hiroshi Yamauchibb8f0ab2014-01-27 16:50:29 -0800156void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0,
157 RegLocation arg2, bool safepoint_pc) {
158 int r_tgt = CallHelperSetup(helper_offset);
159 DCHECK_NE(TargetReg(kArg1), arg0);
160 if (TargetReg(kArg0) != arg0) {
161 OpRegCopy(TargetReg(kArg0), arg0);
162 }
163 LoadCurrMethodDirect(TargetReg(kArg1));
164 LoadValueDirectFixed(arg2, TargetReg(kArg2));
165 ClobberCallerSave();
166 CallHelper(r_tgt, helper_offset, safepoint_pc);
167}
168
Ian Rogers848871b2013-08-05 10:56:33 -0700169void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700170 RegLocation arg1, bool safepoint_pc) {
171 int r_tgt = CallHelperSetup(helper_offset);
172 if (arg0.wide == 0) {
173 LoadValueDirectFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0));
174 if (arg1.wide == 0) {
175 if (cu_->instruction_set == kMips) {
176 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1));
177 } else {
178 LoadValueDirectFixed(arg1, TargetReg(kArg1));
179 }
180 } else {
181 if (cu_->instruction_set == kMips) {
182 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg1), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg2));
183 } else {
184 LoadValueDirectWideFixed(arg1, TargetReg(kArg1), TargetReg(kArg2));
185 }
186 }
187 } else {
188 LoadValueDirectWideFixed(arg0, arg0.fp ? TargetReg(kFArg0) : TargetReg(kArg0), arg0.fp ? TargetReg(kFArg1) : TargetReg(kArg1));
189 if (arg1.wide == 0) {
190 LoadValueDirectFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2));
191 } else {
192 LoadValueDirectWideFixed(arg1, arg1.fp ? TargetReg(kFArg2) : TargetReg(kArg2), arg1.fp ? TargetReg(kFArg3) : TargetReg(kArg3));
193 }
194 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000195 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700196 CallHelper(r_tgt, helper_offset, safepoint_pc);
197}
198
Ian Rogers848871b2013-08-05 10:56:33 -0700199void Mir2Lir::CallRuntimeHelperRegReg(ThreadOffset helper_offset, int arg0, int arg1,
200 bool safepoint_pc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700201 int r_tgt = CallHelperSetup(helper_offset);
202 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
203 OpRegCopy(TargetReg(kArg0), arg0);
204 OpRegCopy(TargetReg(kArg1), arg1);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000205 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700206 CallHelper(r_tgt, helper_offset, safepoint_pc);
207}
208
Ian Rogers848871b2013-08-05 10:56:33 -0700209void Mir2Lir::CallRuntimeHelperRegRegImm(ThreadOffset helper_offset, int arg0, int arg1,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700210 int arg2, bool safepoint_pc) {
211 int r_tgt = CallHelperSetup(helper_offset);
212 DCHECK_NE(TargetReg(kArg0), arg1); // check copy into arg0 won't clobber arg1
213 OpRegCopy(TargetReg(kArg0), arg0);
214 OpRegCopy(TargetReg(kArg1), arg1);
215 LoadConstant(TargetReg(kArg2), arg2);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000216 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700217 CallHelper(r_tgt, helper_offset, safepoint_pc);
218}
219
Ian Rogers848871b2013-08-05 10:56:33 -0700220void Mir2Lir::CallRuntimeHelperImmMethodRegLocation(ThreadOffset helper_offset,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700221 int arg0, RegLocation arg2, bool safepoint_pc) {
222 int r_tgt = CallHelperSetup(helper_offset);
223 LoadValueDirectFixed(arg2, TargetReg(kArg2));
224 LoadCurrMethodDirect(TargetReg(kArg1));
225 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000226 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700227 CallHelper(r_tgt, helper_offset, safepoint_pc);
228}
229
Ian Rogers848871b2013-08-05 10:56:33 -0700230void Mir2Lir::CallRuntimeHelperImmMethodImm(ThreadOffset helper_offset, int arg0,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700231 int arg2, bool safepoint_pc) {
232 int r_tgt = CallHelperSetup(helper_offset);
233 LoadCurrMethodDirect(TargetReg(kArg1));
234 LoadConstant(TargetReg(kArg2), arg2);
235 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000236 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700237 CallHelper(r_tgt, helper_offset, safepoint_pc);
238}
239
Ian Rogers848871b2013-08-05 10:56:33 -0700240void Mir2Lir::CallRuntimeHelperImmRegLocationRegLocation(ThreadOffset helper_offset,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700241 int arg0, RegLocation arg1,
242 RegLocation arg2, bool safepoint_pc) {
243 int r_tgt = CallHelperSetup(helper_offset);
Ian Rogersa9a82542013-10-04 11:17:26 -0700244 DCHECK_EQ(arg1.wide, 0U);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700245 LoadValueDirectFixed(arg1, TargetReg(kArg1));
246 if (arg2.wide == 0) {
247 LoadValueDirectFixed(arg2, TargetReg(kArg2));
248 } else {
249 LoadValueDirectWideFixed(arg2, TargetReg(kArg2), TargetReg(kArg3));
250 }
251 LoadConstant(TargetReg(kArg0), arg0);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000252 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700253 CallHelper(r_tgt, helper_offset, safepoint_pc);
254}
255
Ian Rogersa9a82542013-10-04 11:17:26 -0700256void Mir2Lir::CallRuntimeHelperRegLocationRegLocationRegLocation(ThreadOffset helper_offset,
257 RegLocation arg0, RegLocation arg1,
258 RegLocation arg2,
259 bool safepoint_pc) {
260 int r_tgt = CallHelperSetup(helper_offset);
261 DCHECK_EQ(arg0.wide, 0U);
262 LoadValueDirectFixed(arg0, TargetReg(kArg0));
263 DCHECK_EQ(arg1.wide, 0U);
264 LoadValueDirectFixed(arg1, TargetReg(kArg1));
265 DCHECK_EQ(arg1.wide, 0U);
266 LoadValueDirectFixed(arg2, TargetReg(kArg2));
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000267 ClobberCallerSave();
Ian Rogersa9a82542013-10-04 11:17:26 -0700268 CallHelper(r_tgt, helper_offset, safepoint_pc);
269}
270
Brian Carlstrom7940e442013-07-12 13:46:57 -0700271/*
272 * If there are any ins passed in registers that have not been promoted
273 * to a callee-save register, flush them to the frame. Perform intial
274 * assignment of promoted arguments.
275 *
276 * ArgLocs is an array of location records describing the incoming arguments
277 * with one location record per word of argument.
278 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700279void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700280 /*
281 * Dummy up a RegLocation for the incoming Method*
282 * It will attempt to keep kArg0 live (or copy it to home location
283 * if promoted).
284 */
285 RegLocation rl_src = rl_method;
286 rl_src.location = kLocPhysReg;
287 rl_src.low_reg = TargetReg(kArg0);
288 rl_src.home = false;
289 MarkLive(rl_src.low_reg, rl_src.s_reg_low);
290 StoreValue(rl_method, rl_src);
291 // If Method* has been promoted, explicitly flush
292 if (rl_method.location == kLocPhysReg) {
293 StoreWordDisp(TargetReg(kSp), 0, TargetReg(kArg0));
294 }
295
296 if (cu_->num_ins == 0)
297 return;
298 const int num_arg_regs = 3;
299 static SpecialTargetRegister arg_regs[] = {kArg1, kArg2, kArg3};
300 int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
301 /*
302 * Copy incoming arguments to their proper home locations.
303 * NOTE: an older version of dx had an issue in which
304 * it would reuse static method argument registers.
305 * This could result in the same Dalvik virtual register
306 * being promoted to both core and fp regs. To account for this,
307 * we only copy to the corresponding promoted physical register
308 * if it matches the type of the SSA name for the incoming
309 * argument. It is also possible that long and double arguments
310 * end up half-promoted. In those cases, we must flush the promoted
311 * half to memory as well.
312 */
313 for (int i = 0; i < cu_->num_ins; i++) {
314 PromotionMap* v_map = &promotion_map_[start_vreg + i];
315 if (i < num_arg_regs) {
316 // If arriving in register
317 bool need_flush = true;
318 RegLocation* t_loc = &ArgLocs[i];
319 if ((v_map->core_location == kLocPhysReg) && !t_loc->fp) {
320 OpRegCopy(v_map->core_reg, TargetReg(arg_regs[i]));
321 need_flush = false;
322 } else if ((v_map->fp_location == kLocPhysReg) && t_loc->fp) {
323 OpRegCopy(v_map->FpReg, TargetReg(arg_regs[i]));
324 need_flush = false;
325 } else {
326 need_flush = true;
327 }
328
buzbeed0a03b82013-09-14 08:21:05 -0700329 // For wide args, force flush if not fully promoted
Brian Carlstrom7940e442013-07-12 13:46:57 -0700330 if (t_loc->wide) {
331 PromotionMap* p_map = v_map + (t_loc->high_word ? -1 : +1);
buzbeed0a03b82013-09-14 08:21:05 -0700332 // Is only half promoted?
Brian Carlstrom7940e442013-07-12 13:46:57 -0700333 need_flush |= (p_map->core_location != v_map->core_location) ||
334 (p_map->fp_location != v_map->fp_location);
buzbeed0a03b82013-09-14 08:21:05 -0700335 if ((cu_->instruction_set == kThumb2) && t_loc->fp && !need_flush) {
336 /*
337 * In Arm, a double is represented as a pair of consecutive single float
338 * registers starting at an even number. It's possible that both Dalvik vRegs
339 * representing the incoming double were independently promoted as singles - but
340 * not in a form usable as a double. If so, we need to flush - even though the
341 * incoming arg appears fully in register. At this point in the code, both
342 * halves of the double are promoted. Make sure they are in a usable form.
343 */
344 int lowreg_index = start_vreg + i + (t_loc->high_word ? -1 : 0);
345 int low_reg = promotion_map_[lowreg_index].FpReg;
346 int high_reg = promotion_map_[lowreg_index + 1].FpReg;
347 if (((low_reg & 0x1) != 0) || (high_reg != (low_reg + 1))) {
348 need_flush = true;
349 }
350 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700351 }
352 if (need_flush) {
353 StoreBaseDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
354 TargetReg(arg_regs[i]), kWord);
355 }
356 } else {
357 // If arriving in frame & promoted
358 if (v_map->core_location == kLocPhysReg) {
359 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
360 v_map->core_reg);
361 }
362 if (v_map->fp_location == kLocPhysReg) {
363 LoadWordDisp(TargetReg(kSp), SRegOffset(start_vreg + i),
364 v_map->FpReg);
365 }
366 }
367 }
368}
369
370/*
371 * Bit of a hack here - in the absence of a real scheduling pass,
372 * emit the next instruction in static & direct invoke sequences.
373 */
374static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info,
375 int state, const MethodReference& target_method,
376 uint32_t unused,
377 uintptr_t direct_code, uintptr_t direct_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700378 InvokeType type) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700379 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700380 if (direct_code != 0 && direct_method != 0) {
381 switch (state) {
382 case 0: // Get the current Method* [sets kArg0]
383 if (direct_code != static_cast<unsigned int>(-1)) {
Ian Rogers83883d72013-10-21 21:07:24 -0700384 if (cu->instruction_set != kX86) {
385 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
386 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700387 } else {
388 CHECK_EQ(cu->dex_file, target_method.dex_file);
389 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
390 target_method.dex_method_index, 0);
391 if (data_target == NULL) {
392 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
393 data_target->operands[1] = type;
394 }
395 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
396 cg->AppendLIR(load_pc_rel);
397 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
398 }
399 if (direct_method != static_cast<unsigned int>(-1)) {
400 cg->LoadConstant(cg->TargetReg(kArg0), direct_method);
401 } else {
402 CHECK_EQ(cu->dex_file, target_method.dex_file);
403 LIR* data_target = cg->ScanLiteralPool(cg->method_literal_list_,
404 target_method.dex_method_index, 0);
405 if (data_target == NULL) {
406 data_target = cg->AddWordData(&cg->method_literal_list_, target_method.dex_method_index);
407 data_target->operands[1] = type;
408 }
409 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kArg0), data_target);
410 cg->AppendLIR(load_pc_rel);
411 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
412 }
413 break;
414 default:
415 return -1;
416 }
417 } else {
418 switch (state) {
419 case 0: // Get the current Method* [sets kArg0]
420 // TUNING: we can save a reg copy if Method* has been promoted.
421 cg->LoadCurrMethodDirect(cg->TargetReg(kArg0));
422 break;
423 case 1: // Get method->dex_cache_resolved_methods_
424 cg->LoadWordDisp(cg->TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -0700425 mirror::ArtMethod::DexCacheResolvedMethodsOffset().Int32Value(), cg->TargetReg(kArg0));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700426 // Set up direct code if known.
427 if (direct_code != 0) {
428 if (direct_code != static_cast<unsigned int>(-1)) {
429 cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code);
430 } else {
431 CHECK_EQ(cu->dex_file, target_method.dex_file);
Ian Rogers83883d72013-10-21 21:07:24 -0700432 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700433 LIR* data_target = cg->ScanLiteralPool(cg->code_literal_list_,
434 target_method.dex_method_index, 0);
435 if (data_target == NULL) {
436 data_target = cg->AddWordData(&cg->code_literal_list_, target_method.dex_method_index);
437 data_target->operands[1] = type;
438 }
439 LIR* load_pc_rel = cg->OpPcRelLoad(cg->TargetReg(kInvokeTgt), data_target);
440 cg->AppendLIR(load_pc_rel);
441 DCHECK_EQ(cu->instruction_set, kThumb2) << reinterpret_cast<void*>(data_target);
442 }
443 }
444 break;
445 case 2: // Grab target method*
446 CHECK_EQ(cu->dex_file, target_method.dex_file);
447 cg->LoadWordDisp(cg->TargetReg(kArg0),
448 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value() +
449 (target_method.dex_method_index * 4),
450 cg-> TargetReg(kArg0));
451 break;
452 case 3: // Grab the code from the method*
453 if (cu->instruction_set != kX86) {
454 if (direct_code == 0) {
455 cg->LoadWordDisp(cg->TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -0700456 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700457 cg->TargetReg(kInvokeTgt));
458 }
459 break;
460 }
461 // Intentional fallthrough for x86
462 default:
463 return -1;
464 }
465 }
466 return state + 1;
467}
468
469/*
470 * Bit of a hack here - in the absence of a real scheduling pass,
471 * emit the next instruction in a virtual invoke sequence.
472 * We can use kLr as a temp prior to target address loading
473 * Note also that we'll load the first argument ("this") into
474 * kArg1 here rather than the standard LoadArgRegs.
475 */
476static int NextVCallInsn(CompilationUnit* cu, CallInfo* info,
477 int state, const MethodReference& target_method,
478 uint32_t method_idx, uintptr_t unused, uintptr_t unused2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700479 InvokeType unused3) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700480 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
481 /*
482 * This is the fast path in which the target virtual method is
483 * fully resolved at compile time.
484 */
485 switch (state) {
486 case 0: { // Get "this" [set kArg1]
487 RegLocation rl_arg = info->args[0];
488 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
489 break;
490 }
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700491 case 1: // Is "this" null? [use kArg1]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700492 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
493 // get this->klass_ [use kArg1, set kInvokeTgt]
494 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
495 cg->TargetReg(kInvokeTgt));
496 break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700497 case 2: // Get this->klass_->vtable [usr kInvokeTgt, set kInvokeTgt]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700498 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::VTableOffset().Int32Value(),
499 cg->TargetReg(kInvokeTgt));
500 break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700501 case 3: // Get target method [use kInvokeTgt, set kArg0]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700502 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), (method_idx * 4) +
503 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
504 cg->TargetReg(kArg0));
505 break;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700506 case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700507 if (cu->instruction_set != kX86) {
508 cg->LoadWordDisp(cg->TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -0700509 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700510 cg->TargetReg(kInvokeTgt));
511 break;
512 }
513 // Intentional fallthrough for X86
514 default:
515 return -1;
516 }
517 return state + 1;
518}
519
520/*
Jeff Hao88474b42013-10-23 16:24:40 -0700521 * Emit the next instruction in an invoke interface sequence. This will do a lookup in the
522 * class's IMT, calling either the actual method or art_quick_imt_conflict_trampoline if
523 * more than one interface method map to the same index. Note also that we'll load the first
524 * argument ("this") into kArg1 here rather than the standard LoadArgRegs.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700525 */
526static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state,
527 const MethodReference& target_method,
Jeff Hao88474b42013-10-23 16:24:40 -0700528 uint32_t method_idx, uintptr_t unused,
529 uintptr_t direct_method, InvokeType unused2) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700530 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
Brian Carlstrom7940e442013-07-12 13:46:57 -0700531
Jeff Hao88474b42013-10-23 16:24:40 -0700532 switch (state) {
533 case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)]
Brian Carlstrom7940e442013-07-12 13:46:57 -0700534 CHECK_EQ(cu->dex_file, target_method.dex_file);
Jeff Hao88474b42013-10-23 16:24:40 -0700535 CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds());
536 cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index);
537 if (cu->instruction_set == kX86) {
538 cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg));
539 }
540 break;
541 case 1: { // Get "this" [set kArg1]
542 RegLocation rl_arg = info->args[0];
543 cg->LoadValueDirectFixed(rl_arg, cg->TargetReg(kArg1));
544 break;
545 }
546 case 2: // Is "this" null? [use kArg1]
547 cg->GenNullCheck(info->args[0].s_reg_low, cg->TargetReg(kArg1), info->opt_flags);
548 // Get this->klass_ [use kArg1, set kInvokeTgt]
549 cg->LoadWordDisp(cg->TargetReg(kArg1), mirror::Object::ClassOffset().Int32Value(),
550 cg->TargetReg(kInvokeTgt));
551 break;
552 case 3: // Get this->klass_->imtable [use kInvokeTgt, set kInvokeTgt]
553 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), mirror::Class::ImTableOffset().Int32Value(),
554 cg->TargetReg(kInvokeTgt));
555 break;
556 case 4: // Get target method [use kInvokeTgt, set kArg0]
557 cg->LoadWordDisp(cg->TargetReg(kInvokeTgt), ((method_idx % ClassLinker::kImtSize) * 4) +
558 mirror::Array::DataOffset(sizeof(mirror::Object*)).Int32Value(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700559 cg->TargetReg(kArg0));
560 break;
Jeff Hao88474b42013-10-23 16:24:40 -0700561 case 5: // Get the compiled code address [use kArg0, set kInvokeTgt]
562 if (cu->instruction_set != kX86) {
563 cg->LoadWordDisp(cg->TargetReg(kArg0),
564 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value(),
565 cg->TargetReg(kInvokeTgt));
566 break;
567 }
568 // Intentional fallthrough for X86
Brian Carlstrom7940e442013-07-12 13:46:57 -0700569 default:
570 return -1;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700571 }
572 return state + 1;
573}
574
Ian Rogers848871b2013-08-05 10:56:33 -0700575static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset trampoline,
Brian Carlstrom7940e442013-07-12 13:46:57 -0700576 int state, const MethodReference& target_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700577 uint32_t method_idx) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700578 Mir2Lir* cg = static_cast<Mir2Lir*>(cu->cg.get());
579 /*
580 * This handles the case in which the base method is not fully
581 * resolved at compile time, we bail to a runtime helper.
582 */
583 if (state == 0) {
584 if (cu->instruction_set != kX86) {
585 // Load trampoline target
Ian Rogers848871b2013-08-05 10:56:33 -0700586 cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700587 }
588 // Load kArg0 with method index
589 CHECK_EQ(cu->dex_file, target_method.dex_file);
590 cg->LoadConstant(cg->TargetReg(kArg0), target_method.dex_method_index);
591 return 1;
592 }
593 return -1;
594}
595
596static int NextStaticCallInsnSP(CompilationUnit* cu, CallInfo* info,
597 int state,
598 const MethodReference& target_method,
599 uint32_t method_idx,
600 uintptr_t unused, uintptr_t unused2,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700601 InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700602 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700603 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
604}
605
606static int NextDirectCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
607 const MethodReference& target_method,
608 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700609 uintptr_t unused2, InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700610 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700611 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
612}
613
614static int NextSuperCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
615 const MethodReference& target_method,
616 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700617 uintptr_t unused2, InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700618 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700619 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
620}
621
622static int NextVCallInsnSP(CompilationUnit* cu, CallInfo* info, int state,
623 const MethodReference& target_method,
624 uint32_t method_idx, uintptr_t unused,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700625 uintptr_t unused2, InvokeType unused3) {
Ian Rogers848871b2013-08-05 10:56:33 -0700626 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700627 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
628}
629
630static int NextInterfaceCallInsnWithAccessCheck(CompilationUnit* cu,
631 CallInfo* info, int state,
632 const MethodReference& target_method,
633 uint32_t unused,
634 uintptr_t unused2, uintptr_t unused3,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700635 InvokeType unused4) {
Ian Rogers848871b2013-08-05 10:56:33 -0700636 ThreadOffset trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700637 return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
638}
639
640int Mir2Lir::LoadArgRegs(CallInfo* info, int call_state,
641 NextCallInsn next_call_insn,
642 const MethodReference& target_method,
643 uint32_t vtable_idx, uintptr_t direct_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700644 uintptr_t direct_method, InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700645 int last_arg_reg = TargetReg(kArg3);
646 int next_reg = TargetReg(kArg1);
647 int next_arg = 0;
648 if (skip_this) {
649 next_reg++;
650 next_arg++;
651 }
652 for (; (next_reg <= last_arg_reg) && (next_arg < info->num_arg_words); next_reg++) {
653 RegLocation rl_arg = info->args[next_arg++];
654 rl_arg = UpdateRawLoc(rl_arg);
655 if (rl_arg.wide && (next_reg <= TargetReg(kArg2))) {
656 LoadValueDirectWideFixed(rl_arg, next_reg, next_reg + 1);
657 next_reg++;
658 next_arg++;
659 } else {
660 if (rl_arg.wide) {
661 rl_arg.wide = false;
662 rl_arg.is_const = false;
663 }
664 LoadValueDirectFixed(rl_arg, next_reg);
665 }
666 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
667 direct_code, direct_method, type);
668 }
669 return call_state;
670}
671
672/*
673 * Load up to 5 arguments, the first three of which will be in
674 * kArg1 .. kArg3. On entry kArg0 contains the current method pointer,
675 * and as part of the load sequence, it must be replaced with
676 * the target method pointer. Note, this may also be called
677 * for "range" variants if the number of arguments is 5 or fewer.
678 */
679int Mir2Lir::GenDalvikArgsNoRange(CallInfo* info,
680 int call_state, LIR** pcrLabel, NextCallInsn next_call_insn,
681 const MethodReference& target_method,
682 uint32_t vtable_idx, uintptr_t direct_code,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700683 uintptr_t direct_method, InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700684 RegLocation rl_arg;
685
686 /* If no arguments, just return */
687 if (info->num_arg_words == 0)
688 return call_state;
689
690 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
691 direct_code, direct_method, type);
692
693 DCHECK_LE(info->num_arg_words, 5);
694 if (info->num_arg_words > 3) {
695 int32_t next_use = 3;
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700696 // Detect special case of wide arg spanning arg3/arg4
Brian Carlstrom7940e442013-07-12 13:46:57 -0700697 RegLocation rl_use0 = info->args[0];
698 RegLocation rl_use1 = info->args[1];
699 RegLocation rl_use2 = info->args[2];
700 if (((!rl_use0.wide && !rl_use1.wide) || rl_use0.wide) &&
701 rl_use2.wide) {
702 int reg = -1;
703 // Wide spans, we need the 2nd half of uses[2].
704 rl_arg = UpdateLocWide(rl_use2);
705 if (rl_arg.location == kLocPhysReg) {
706 reg = rl_arg.high_reg;
707 } else {
708 // kArg2 & rArg3 can safely be used here
709 reg = TargetReg(kArg3);
710 LoadWordDisp(TargetReg(kSp), SRegOffset(rl_arg.s_reg_low) + 4, reg);
711 call_state = next_call_insn(cu_, info, call_state, target_method,
712 vtable_idx, direct_code, direct_method, type);
713 }
714 StoreBaseDisp(TargetReg(kSp), (next_use + 1) * 4, reg, kWord);
715 StoreBaseDisp(TargetReg(kSp), 16 /* (3+1)*4 */, reg, kWord);
716 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
717 direct_code, direct_method, type);
718 next_use++;
719 }
720 // Loop through the rest
721 while (next_use < info->num_arg_words) {
722 int low_reg;
723 int high_reg = -1;
724 rl_arg = info->args[next_use];
725 rl_arg = UpdateRawLoc(rl_arg);
726 if (rl_arg.location == kLocPhysReg) {
727 low_reg = rl_arg.low_reg;
728 high_reg = rl_arg.high_reg;
729 } else {
730 low_reg = TargetReg(kArg2);
731 if (rl_arg.wide) {
732 high_reg = TargetReg(kArg3);
733 LoadValueDirectWideFixed(rl_arg, low_reg, high_reg);
734 } else {
735 LoadValueDirectFixed(rl_arg, low_reg);
736 }
737 call_state = next_call_insn(cu_, info, call_state, target_method,
738 vtable_idx, direct_code, direct_method, type);
739 }
740 int outs_offset = (next_use + 1) * 4;
741 if (rl_arg.wide) {
742 StoreBaseDispWide(TargetReg(kSp), outs_offset, low_reg, high_reg);
743 next_use += 2;
744 } else {
745 StoreWordDisp(TargetReg(kSp), outs_offset, low_reg);
746 next_use++;
747 }
748 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
749 direct_code, direct_method, type);
750 }
751 }
752
753 call_state = LoadArgRegs(info, call_state, next_call_insn,
754 target_method, vtable_idx, direct_code, direct_method,
755 type, skip_this);
756
757 if (pcrLabel) {
758 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
759 }
760 return call_state;
761}
762
763/*
764 * May have 0+ arguments (also used for jumbo). Note that
765 * source virtual registers may be in physical registers, so may
766 * need to be flushed to home location before copying. This
767 * applies to arg3 and above (see below).
768 *
769 * Two general strategies:
770 * If < 20 arguments
771 * Pass args 3-18 using vldm/vstm block copy
772 * Pass arg0, arg1 & arg2 in kArg1-kArg3
773 * If 20+ arguments
774 * Pass args arg19+ using memcpy block copy
775 * Pass arg0, arg1 & arg2 in kArg1-kArg3
776 *
777 */
778int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
779 LIR** pcrLabel, NextCallInsn next_call_insn,
780 const MethodReference& target_method,
781 uint32_t vtable_idx, uintptr_t direct_code, uintptr_t direct_method,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700782 InvokeType type, bool skip_this) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700783 // If we can treat it as non-range (Jumbo ops will use range form)
784 if (info->num_arg_words <= 5)
785 return GenDalvikArgsNoRange(info, call_state, pcrLabel,
786 next_call_insn, target_method, vtable_idx,
787 direct_code, direct_method, type, skip_this);
788 /*
789 * First load the non-register arguments. Both forms expect all
790 * of the source arguments to be in their home frame location, so
791 * scan the s_reg names and flush any that have been promoted to
792 * frame backing storage.
793 */
794 // Scan the rest of the args - if in phys_reg flush to memory
795 for (int next_arg = 0; next_arg < info->num_arg_words;) {
796 RegLocation loc = info->args[next_arg];
797 if (loc.wide) {
798 loc = UpdateLocWide(loc);
799 if ((next_arg >= 2) && (loc.location == kLocPhysReg)) {
800 StoreBaseDispWide(TargetReg(kSp), SRegOffset(loc.s_reg_low),
801 loc.low_reg, loc.high_reg);
802 }
803 next_arg += 2;
804 } else {
805 loc = UpdateLoc(loc);
806 if ((next_arg >= 3) && (loc.location == kLocPhysReg)) {
807 StoreBaseDisp(TargetReg(kSp), SRegOffset(loc.s_reg_low),
808 loc.low_reg, kWord);
809 }
810 next_arg++;
811 }
812 }
813
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800814 // Logic below assumes that Method pointer is at offset zero from SP.
815 DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
816
817 // The first 3 arguments are passed via registers.
818 // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
819 // get size of uintptr_t or size of object reference according to model being used.
820 int outs_offset = 4 /* Method* */ + (3 * sizeof(uint32_t));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700821 int start_offset = SRegOffset(info->args[3].s_reg_low);
Razvan A Lupusoru2c498d12014-01-29 16:02:57 -0800822 int regs_left_to_pass_via_stack = info->num_arg_words - 3;
823 DCHECK_GT(regs_left_to_pass_via_stack, 0);
824
825 if (cu_->instruction_set == kThumb2 && regs_left_to_pass_via_stack <= 16) {
826 // Use vldm/vstm pair using kArg3 as a temp
827 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
828 direct_code, direct_method, type);
829 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), start_offset);
830 LIR* ld = OpVldm(TargetReg(kArg3), regs_left_to_pass_via_stack);
831 // TUNING: loosen barrier
832 ld->u.m.def_mask = ENCODE_ALL;
833 SetMemRefType(ld, true /* is_load */, kDalvikReg);
834 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
835 direct_code, direct_method, type);
836 OpRegRegImm(kOpAdd, TargetReg(kArg3), TargetReg(kSp), 4 /* Method* */ + (3 * 4));
837 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
838 direct_code, direct_method, type);
839 LIR* st = OpVstm(TargetReg(kArg3), regs_left_to_pass_via_stack);
840 SetMemRefType(st, false /* is_load */, kDalvikReg);
841 st->u.m.def_mask = ENCODE_ALL;
842 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
843 direct_code, direct_method, type);
844 } else if (cu_->instruction_set == kX86) {
845 int current_src_offset = start_offset;
846 int current_dest_offset = outs_offset;
847
848 while (regs_left_to_pass_via_stack > 0) {
849 // This is based on the knowledge that the stack itself is 16-byte aligned.
850 bool src_is_16b_aligned = (current_src_offset & 0xF) == 0;
851 bool dest_is_16b_aligned = (current_dest_offset & 0xF) == 0;
852 size_t bytes_to_move;
853
854 /*
855 * The amount to move defaults to 32-bit. If there are 4 registers left to move, then do a
856 * a 128-bit move because we won't get the chance to try to aligned. If there are more than
857 * 4 registers left to move, consider doing a 128-bit only if either src or dest are aligned.
858 * We do this because we could potentially do a smaller move to align.
859 */
860 if (regs_left_to_pass_via_stack == 4 ||
861 (regs_left_to_pass_via_stack > 4 && (src_is_16b_aligned || dest_is_16b_aligned))) {
862 // Moving 128-bits via xmm register.
863 bytes_to_move = sizeof(uint32_t) * 4;
864
865 // Allocate a free xmm temp. Since we are working through the calling sequence,
866 // we expect to have an xmm temporary available.
867 int temp = AllocTempDouble();
868 CHECK_GT(temp, 0);
869
870 LIR* ld1 = nullptr;
871 LIR* ld2 = nullptr;
872 LIR* st1 = nullptr;
873 LIR* st2 = nullptr;
874
875 /*
876 * The logic is similar for both loads and stores. If we have 16-byte alignment,
877 * do an aligned move. If we have 8-byte alignment, then do the move in two
878 * parts. This approach prevents possible cache line splits. Finally, fall back
879 * to doing an unaligned move. In most cases we likely won't split the cache
880 * line but we cannot prove it and thus take a conservative approach.
881 */
882 bool src_is_8b_aligned = (current_src_offset & 0x7) == 0;
883 bool dest_is_8b_aligned = (current_dest_offset & 0x7) == 0;
884
885 if (src_is_16b_aligned) {
886 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovA128FP);
887 } else if (src_is_8b_aligned) {
888 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovLo128FP);
889 ld2 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset + (bytes_to_move >> 1), kMovHi128FP);
890 } else {
891 ld1 = OpMovRegMem(temp, TargetReg(kSp), current_src_offset, kMovU128FP);
892 }
893
894 if (dest_is_16b_aligned) {
895 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovA128FP);
896 } else if (dest_is_8b_aligned) {
897 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovLo128FP);
898 st2 = OpMovMemReg(TargetReg(kSp), current_dest_offset + (bytes_to_move >> 1), temp, kMovHi128FP);
899 } else {
900 st1 = OpMovMemReg(TargetReg(kSp), current_dest_offset, temp, kMovU128FP);
901 }
902
903 // TODO If we could keep track of aliasing information for memory accesses that are wider
904 // than 64-bit, we wouldn't need to set up a barrier.
905 if (ld1 != nullptr) {
906 if (ld2 != nullptr) {
907 // For 64-bit load we can actually set up the aliasing information.
908 AnnotateDalvikRegAccess(ld1, current_src_offset >> 2, true, true);
909 AnnotateDalvikRegAccess(ld2, (current_src_offset + (bytes_to_move >> 1)) >> 2, true, true);
910 } else {
911 // Set barrier for 128-bit load.
912 SetMemRefType(ld1, true /* is_load */, kDalvikReg);
913 ld1->u.m.def_mask = ENCODE_ALL;
914 }
915 }
916 if (st1 != nullptr) {
917 if (st2 != nullptr) {
918 // For 64-bit store we can actually set up the aliasing information.
919 AnnotateDalvikRegAccess(st1, current_dest_offset >> 2, false, true);
920 AnnotateDalvikRegAccess(st2, (current_dest_offset + (bytes_to_move >> 1)) >> 2, false, true);
921 } else {
922 // Set barrier for 128-bit store.
923 SetMemRefType(st1, false /* is_load */, kDalvikReg);
924 st1->u.m.def_mask = ENCODE_ALL;
925 }
926 }
927
928 // Free the temporary used for the data movement.
929 FreeTemp(temp);
930 } else {
931 // Moving 32-bits via general purpose register.
932 bytes_to_move = sizeof(uint32_t);
933
934 // Instead of allocating a new temp, simply reuse one of the registers being used
935 // for argument passing.
936 int temp = TargetReg(kArg3);
937
938 // Now load the argument VR and store to the outs.
939 LoadWordDisp(TargetReg(kSp), current_src_offset, temp);
940 StoreWordDisp(TargetReg(kSp), current_dest_offset, temp);
941 }
942
943 current_src_offset += bytes_to_move;
944 current_dest_offset += bytes_to_move;
945 regs_left_to_pass_via_stack -= (bytes_to_move >> 2);
946 }
947 } else {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700948 // Generate memcpy
949 OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
950 OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
Ian Rogers7655f292013-07-29 11:07:13 -0700951 CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700952 TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700953 }
954
955 call_state = LoadArgRegs(info, call_state, next_call_insn,
956 target_method, vtable_idx, direct_code, direct_method,
957 type, skip_this);
958
959 call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx,
960 direct_code, direct_method, type);
961 if (pcrLabel) {
962 *pcrLabel = GenNullCheck(info->args[0].s_reg_low, TargetReg(kArg1), info->opt_flags);
963 }
964 return call_state;
965}
966
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700967RegLocation Mir2Lir::InlineTarget(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700968 RegLocation res;
969 if (info->result.location == kLocInvalid) {
970 res = GetReturn(false);
971 } else {
972 res = info->result;
973 }
974 return res;
975}
976
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700977RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700978 RegLocation res;
979 if (info->result.location == kLocInvalid) {
980 res = GetReturnWide(false);
981 } else {
982 res = info->result;
983 }
984 return res;
985}
986
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700987bool Mir2Lir::GenInlinedCharAt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700988 if (cu_->instruction_set == kMips) {
989 // TODO - add Mips implementation
990 return false;
991 }
992 // Location of reference to data array
993 int value_offset = mirror::String::ValueOffset().Int32Value();
994 // Location of count
995 int count_offset = mirror::String::CountOffset().Int32Value();
996 // Starting offset within data array
997 int offset_offset = mirror::String::OffsetOffset().Int32Value();
998 // Start of char data with array_
999 int data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Int32Value();
1000
1001 RegLocation rl_obj = info->args[0];
1002 RegLocation rl_idx = info->args[1];
1003 rl_obj = LoadValue(rl_obj, kCoreReg);
Mark Mendell2b724cb2014-02-06 05:24:20 -08001004 // X86 wants to avoid putting a constant index into a register.
1005 if (!(cu_->instruction_set == kX86 && rl_idx.is_const)) {
1006 rl_idx = LoadValue(rl_idx, kCoreReg);
1007 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001008 int reg_max;
1009 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
1010 bool range_check = (!(info->opt_flags & MIR_IGNORE_RANGE_CHECK));
1011 LIR* launch_pad = NULL;
1012 int reg_off = INVALID_REG;
1013 int reg_ptr = INVALID_REG;
1014 if (cu_->instruction_set != kX86) {
1015 reg_off = AllocTemp();
1016 reg_ptr = AllocTemp();
1017 if (range_check) {
1018 reg_max = AllocTemp();
1019 LoadWordDisp(rl_obj.low_reg, count_offset, reg_max);
1020 }
1021 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
1022 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
1023 if (range_check) {
1024 // Set up a launch pad to allow retry in case of bounds violation */
buzbee0d829482013-10-11 15:24:55 -07001025 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001026 intrinsic_launchpads_.Insert(launch_pad);
1027 OpRegReg(kOpCmp, rl_idx.low_reg, reg_max);
1028 FreeTemp(reg_max);
Vladimir Marko58af1f92013-12-19 13:31:15 +00001029 OpCondBranch(kCondUge, launch_pad);
Brian Carlstrom6f485c62013-07-18 15:35:35 -07001030 }
Mark Mendell2b724cb2014-02-06 05:24:20 -08001031 OpRegImm(kOpAdd, reg_ptr, data_offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001032 } else {
1033 if (range_check) {
Mark Mendell2b724cb2014-02-06 05:24:20 -08001034 // On x86, we can compare to memory directly
Brian Carlstrom7940e442013-07-12 13:46:57 -07001035 // Set up a launch pad to allow retry in case of bounds violation */
buzbee0d829482013-10-11 15:24:55 -07001036 launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001037 intrinsic_launchpads_.Insert(launch_pad);
Mark Mendell2b724cb2014-02-06 05:24:20 -08001038 if (rl_idx.is_const) {
1039 OpCmpMemImmBranch(kCondUlt, INVALID_REG, rl_obj.low_reg, count_offset,
1040 mir_graph_->ConstantValue(rl_idx.orig_sreg), launch_pad);
1041 } else {
1042 OpRegMem(kOpCmp, rl_idx.low_reg, rl_obj.low_reg, count_offset);
1043 OpCondBranch(kCondUge, launch_pad);
1044 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001045 }
1046 reg_off = AllocTemp();
1047 reg_ptr = AllocTemp();
1048 LoadWordDisp(rl_obj.low_reg, offset_offset, reg_off);
1049 LoadWordDisp(rl_obj.low_reg, value_offset, reg_ptr);
1050 }
Mark Mendell2b724cb2014-02-06 05:24:20 -08001051 if (rl_idx.is_const) {
1052 OpRegImm(kOpAdd, reg_off, mir_graph_->ConstantValue(rl_idx.orig_sreg));
1053 } else {
1054 OpRegReg(kOpAdd, reg_off, rl_idx.low_reg);
1055 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001056 FreeTemp(rl_obj.low_reg);
Mark Mendell2b724cb2014-02-06 05:24:20 -08001057 if (rl_idx.low_reg != INVALID_REG) {
1058 FreeTemp(rl_idx.low_reg);
1059 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001060 RegLocation rl_dest = InlineTarget(info);
1061 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Mark Mendell2b724cb2014-02-06 05:24:20 -08001062 if (cu_->instruction_set != kX86) {
1063 LoadBaseIndexed(reg_ptr, reg_off, rl_result.low_reg, 1, kUnsignedHalf);
1064 } else {
1065 LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.low_reg,
1066 INVALID_REG, kUnsignedHalf, INVALID_SREG);
1067 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001068 FreeTemp(reg_off);
1069 FreeTemp(reg_ptr);
1070 StoreValue(rl_dest, rl_result);
1071 if (range_check) {
1072 launch_pad->operands[2] = 0; // no resumption
1073 }
1074 // Record that we've already inlined & null checked
1075 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1076 return true;
1077}
1078
1079// Generates an inlined String.is_empty or String.length.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001080bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001081 if (cu_->instruction_set == kMips) {
1082 // TODO - add Mips implementation
1083 return false;
1084 }
1085 // dst = src.length();
1086 RegLocation rl_obj = info->args[0];
1087 rl_obj = LoadValue(rl_obj, kCoreReg);
1088 RegLocation rl_dest = InlineTarget(info);
1089 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1090 GenNullCheck(rl_obj.s_reg_low, rl_obj.low_reg, info->opt_flags);
1091 LoadWordDisp(rl_obj.low_reg, mirror::String::CountOffset().Int32Value(), rl_result.low_reg);
1092 if (is_empty) {
1093 // dst = (dst == 0);
1094 if (cu_->instruction_set == kThumb2) {
1095 int t_reg = AllocTemp();
1096 OpRegReg(kOpNeg, t_reg, rl_result.low_reg);
1097 OpRegRegReg(kOpAdc, rl_result.low_reg, rl_result.low_reg, t_reg);
1098 } else {
1099 DCHECK_EQ(cu_->instruction_set, kX86);
1100 OpRegImm(kOpSub, rl_result.low_reg, 1);
1101 OpRegImm(kOpLsr, rl_result.low_reg, 31);
1102 }
1103 }
1104 StoreValue(rl_dest, rl_result);
1105 return true;
1106}
1107
Vladimir Marko6bdf1ff2013-10-29 17:40:46 +00001108bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
1109 if (cu_->instruction_set == kMips) {
1110 // TODO - add Mips implementation
1111 return false;
1112 }
1113 RegLocation rl_src_i = info->args[0];
1114 RegLocation rl_dest = InlineTarget(info); // result reg
1115 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1116 if (size == kLong) {
1117 RegLocation rl_i = LoadValueWide(rl_src_i, kCoreReg);
Vladimir Markof246af22013-11-27 12:30:15 +00001118 int r_i_low = rl_i.low_reg;
1119 if (rl_i.low_reg == rl_result.low_reg) {
1120 // First REV shall clobber rl_result.low_reg, save the value in a temp for the second REV.
1121 r_i_low = AllocTemp();
1122 OpRegCopy(r_i_low, rl_i.low_reg);
1123 }
Vladimir Marko6bdf1ff2013-10-29 17:40:46 +00001124 OpRegReg(kOpRev, rl_result.low_reg, rl_i.high_reg);
Vladimir Markof246af22013-11-27 12:30:15 +00001125 OpRegReg(kOpRev, rl_result.high_reg, r_i_low);
1126 if (rl_i.low_reg == rl_result.low_reg) {
1127 FreeTemp(r_i_low);
1128 }
Vladimir Marko6bdf1ff2013-10-29 17:40:46 +00001129 StoreValueWide(rl_dest, rl_result);
1130 } else {
1131 DCHECK(size == kWord || size == kSignedHalf);
1132 OpKind op = (size == kWord) ? kOpRev : kOpRevsh;
1133 RegLocation rl_i = LoadValue(rl_src_i, kCoreReg);
1134 OpRegReg(op, rl_result.low_reg, rl_i.low_reg);
1135 StoreValue(rl_dest, rl_result);
1136 }
1137 return true;
1138}
1139
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001140bool Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001141 if (cu_->instruction_set == kMips) {
1142 // TODO - add Mips implementation
1143 return false;
1144 }
1145 RegLocation rl_src = info->args[0];
1146 rl_src = LoadValue(rl_src, kCoreReg);
1147 RegLocation rl_dest = InlineTarget(info);
1148 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1149 int sign_reg = AllocTemp();
1150 // abs(x) = y<=x>>31, (x+y)^y.
1151 OpRegRegImm(kOpAsr, sign_reg, rl_src.low_reg, 31);
1152 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
1153 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1154 StoreValue(rl_dest, rl_result);
1155 return true;
1156}
1157
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001158bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001159 if (cu_->instruction_set == kMips) {
1160 // TODO - add Mips implementation
1161 return false;
1162 }
1163 if (cu_->instruction_set == kThumb2) {
1164 RegLocation rl_src = info->args[0];
1165 rl_src = LoadValueWide(rl_src, kCoreReg);
1166 RegLocation rl_dest = InlineTargetWide(info);
1167 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1168 int sign_reg = AllocTemp();
1169 // abs(x) = y<=x>>31, (x+y)^y.
1170 OpRegRegImm(kOpAsr, sign_reg, rl_src.high_reg, 31);
1171 OpRegRegReg(kOpAdd, rl_result.low_reg, rl_src.low_reg, sign_reg);
1172 OpRegRegReg(kOpAdc, rl_result.high_reg, rl_src.high_reg, sign_reg);
1173 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1174 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
1175 StoreValueWide(rl_dest, rl_result);
1176 return true;
1177 } else {
1178 DCHECK_EQ(cu_->instruction_set, kX86);
1179 // Reuse source registers to avoid running out of temps
1180 RegLocation rl_src = info->args[0];
1181 rl_src = LoadValueWide(rl_src, kCoreReg);
1182 RegLocation rl_dest = InlineTargetWide(info);
1183 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1184 OpRegCopyWide(rl_result.low_reg, rl_result.high_reg, rl_src.low_reg, rl_src.high_reg);
1185 FreeTemp(rl_src.low_reg);
1186 FreeTemp(rl_src.high_reg);
1187 int sign_reg = AllocTemp();
1188 // abs(x) = y<=x>>31, (x+y)^y.
1189 OpRegRegImm(kOpAsr, sign_reg, rl_result.high_reg, 31);
1190 OpRegReg(kOpAdd, rl_result.low_reg, sign_reg);
1191 OpRegReg(kOpAdc, rl_result.high_reg, sign_reg);
1192 OpRegReg(kOpXor, rl_result.low_reg, sign_reg);
1193 OpRegReg(kOpXor, rl_result.high_reg, sign_reg);
1194 StoreValueWide(rl_dest, rl_result);
1195 return true;
1196 }
1197}
1198
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001199bool Mir2Lir::GenInlinedFloatCvt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001200 if (cu_->instruction_set == kMips) {
1201 // TODO - add Mips implementation
1202 return false;
1203 }
1204 RegLocation rl_src = info->args[0];
1205 RegLocation rl_dest = InlineTarget(info);
1206 StoreValue(rl_dest, rl_src);
1207 return true;
1208}
1209
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001210bool Mir2Lir::GenInlinedDoubleCvt(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001211 if (cu_->instruction_set == kMips) {
1212 // TODO - add Mips implementation
1213 return false;
1214 }
1215 RegLocation rl_src = info->args[0];
1216 RegLocation rl_dest = InlineTargetWide(info);
1217 StoreValueWide(rl_dest, rl_src);
1218 return true;
1219}
1220
1221/*
1222 * Fast string.index_of(I) & (II). Tests for simple case of char <= 0xffff,
1223 * otherwise bails to standard library code.
1224 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001225bool Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001226 if (cu_->instruction_set == kMips) {
1227 // TODO - add Mips implementation
1228 return false;
1229 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +00001230 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001231 LockCallTemps(); // Using fixed registers
1232 int reg_ptr = TargetReg(kArg0);
1233 int reg_char = TargetReg(kArg1);
1234 int reg_start = TargetReg(kArg2);
1235
1236 RegLocation rl_obj = info->args[0];
1237 RegLocation rl_char = info->args[1];
1238 RegLocation rl_start = info->args[2];
1239 LoadValueDirectFixed(rl_obj, reg_ptr);
1240 LoadValueDirectFixed(rl_char, reg_char);
1241 if (zero_based) {
1242 LoadConstant(reg_start, 0);
1243 } else {
1244 LoadValueDirectFixed(rl_start, reg_start);
1245 }
Ian Rogers7655f292013-07-29 11:07:13 -07001246 int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001247 GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
buzbee0d829482013-10-11 15:24:55 -07001248 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001249 intrinsic_launchpads_.Insert(launch_pad);
1250 OpCmpImmBranch(kCondGt, reg_char, 0xFFFF, launch_pad);
1251 // NOTE: not a safepoint
1252 if (cu_->instruction_set != kX86) {
1253 OpReg(kOpBlx, r_tgt);
1254 } else {
Ian Rogers7655f292013-07-29 11:07:13 -07001255 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001256 }
1257 LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
buzbee0d829482013-10-11 15:24:55 -07001258 launch_pad->operands[2] = WrapPointer(resume_tgt);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001259 // Record that we've already inlined & null checked
1260 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1261 RegLocation rl_return = GetReturn(false);
1262 RegLocation rl_dest = InlineTarget(info);
1263 StoreValue(rl_dest, rl_return);
1264 return true;
1265}
1266
1267/* Fast string.compareTo(Ljava/lang/string;)I. */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001268bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001269 if (cu_->instruction_set == kMips) {
1270 // TODO - add Mips implementation
1271 return false;
1272 }
Vladimir Marko31c2aac2013-12-09 16:31:19 +00001273 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001274 LockCallTemps(); // Using fixed registers
1275 int reg_this = TargetReg(kArg0);
1276 int reg_cmp = TargetReg(kArg1);
1277
1278 RegLocation rl_this = info->args[0];
1279 RegLocation rl_cmp = info->args[1];
1280 LoadValueDirectFixed(rl_this, reg_this);
1281 LoadValueDirectFixed(rl_cmp, reg_cmp);
1282 int r_tgt = (cu_->instruction_set != kX86) ?
Ian Rogers7655f292013-07-29 11:07:13 -07001283 LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001284 GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
Brian Carlstrom7934ac22013-07-26 10:54:15 -07001285 // TUNING: check if rl_cmp.s_reg_low is already null checked
buzbee0d829482013-10-11 15:24:55 -07001286 LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, WrapPointer(info));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001287 intrinsic_launchpads_.Insert(launch_pad);
1288 OpCmpImmBranch(kCondEq, reg_cmp, 0, launch_pad);
1289 // NOTE: not a safepoint
1290 if (cu_->instruction_set != kX86) {
1291 OpReg(kOpBlx, r_tgt);
1292 } else {
Ian Rogers7655f292013-07-29 11:07:13 -07001293 OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
Brian Carlstrom7940e442013-07-12 13:46:57 -07001294 }
1295 launch_pad->operands[2] = 0; // No return possible
1296 // Record that we've already inlined & null checked
1297 info->opt_flags |= (MIR_INLINED | MIR_IGNORE_NULL_CHECK);
1298 RegLocation rl_return = GetReturn(false);
1299 RegLocation rl_dest = InlineTarget(info);
1300 StoreValue(rl_dest, rl_return);
1301 return true;
1302}
1303
1304bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) {
1305 RegLocation rl_dest = InlineTarget(info);
1306 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
Ian Rogers848871b2013-08-05 10:56:33 -07001307 ThreadOffset offset = Thread::PeerOffset();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001308 if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) {
Ian Rogers848871b2013-08-05 10:56:33 -07001309 LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.low_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001310 } else {
1311 CHECK(cu_->instruction_set == kX86);
Brian Carlstrom2d888622013-07-18 17:02:00 -07001312 reinterpret_cast<X86Mir2Lir*>(this)->OpRegThreadMem(kOpMov, rl_result.low_reg, offset);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001313 }
1314 StoreValue(rl_dest, rl_result);
1315 return true;
1316}
1317
1318bool Mir2Lir::GenInlinedUnsafeGet(CallInfo* info,
1319 bool is_long, bool is_volatile) {
1320 if (cu_->instruction_set == kMips) {
1321 // TODO - add Mips implementation
1322 return false;
1323 }
1324 // Unused - RegLocation rl_src_unsafe = info->args[0];
1325 RegLocation rl_src_obj = info->args[1]; // Object
1326 RegLocation rl_src_offset = info->args[2]; // long low
1327 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1328 RegLocation rl_dest = InlineTarget(info); // result reg
1329 if (is_volatile) {
1330 GenMemBarrier(kLoadLoad);
1331 }
1332 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1333 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1334 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
1335 if (is_long) {
1336 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1337 LoadBaseDispWide(rl_object.low_reg, 0, rl_result.low_reg, rl_result.high_reg, INVALID_SREG);
1338 StoreValueWide(rl_dest, rl_result);
1339 } else {
1340 LoadBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_result.low_reg, 0, kWord);
1341 StoreValue(rl_dest, rl_result);
1342 }
1343 return true;
1344}
1345
1346bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long,
1347 bool is_object, bool is_volatile, bool is_ordered) {
1348 if (cu_->instruction_set == kMips) {
1349 // TODO - add Mips implementation
1350 return false;
1351 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001352 // Unused - RegLocation rl_src_unsafe = info->args[0];
1353 RegLocation rl_src_obj = info->args[1]; // Object
1354 RegLocation rl_src_offset = info->args[2]; // long low
1355 rl_src_offset.wide = 0; // ignore high half in info->args[3]
1356 RegLocation rl_src_value = info->args[4]; // value to store
1357 if (is_volatile || is_ordered) {
1358 GenMemBarrier(kStoreStore);
1359 }
1360 RegLocation rl_object = LoadValue(rl_src_obj, kCoreReg);
1361 RegLocation rl_offset = LoadValue(rl_src_offset, kCoreReg);
1362 RegLocation rl_value;
1363 if (is_long) {
1364 rl_value = LoadValueWide(rl_src_value, kCoreReg);
1365 OpRegReg(kOpAdd, rl_object.low_reg, rl_offset.low_reg);
1366 StoreBaseDispWide(rl_object.low_reg, 0, rl_value.low_reg, rl_value.high_reg);
1367 } else {
1368 rl_value = LoadValue(rl_src_value, kCoreReg);
1369 StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord);
1370 }
Mark Mendelldf8ee2e2014-01-27 16:37:47 -08001371
1372 // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard.
1373 FreeTemp(rl_offset.low_reg);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001374 if (is_volatile) {
1375 GenMemBarrier(kStoreLoad);
1376 }
1377 if (is_object) {
1378 MarkGCCard(rl_value.low_reg, rl_object.low_reg);
1379 }
1380 return true;
1381}
1382
Brian Carlstrom2ce745c2013-07-17 17:44:30 -07001383void Mir2Lir::GenInvoke(CallInfo* info) {
Vladimir Marko5c96e6b2013-11-14 15:34:17 +00001384 if (!(info->opt_flags & MIR_INLINED)) {
Vladimir Marko5816ed42013-11-27 17:04:20 +00001385 DCHECK(cu_->compiler_driver->GetMethodInlinerMap() != nullptr);
1386 if (cu_->compiler_driver->GetMethodInlinerMap()->GetMethodInliner(cu_->dex_file)
1387 ->GenIntrinsic(this, info)) {
Vladimir Marko5c96e6b2013-11-14 15:34:17 +00001388 return;
1389 }
Brian Carlstrom7940e442013-07-12 13:46:57 -07001390 }
1391 InvokeType original_type = info->type; // avoiding mutation by ComputeInvokeInfo
1392 int call_state = 0;
1393 LIR* null_ck;
1394 LIR** p_null_ck = NULL;
1395 NextCallInsn next_call_insn;
1396 FlushAllRegs(); /* Everything to home location */
1397 // Explicit register usage
1398 LockCallTemps();
1399
1400 DexCompilationUnit* cUnit = mir_graph_->GetCurrentDexCompilationUnit();
1401 MethodReference target_method(cUnit->GetDexFile(), info->index);
1402 int vtable_idx;
1403 uintptr_t direct_code;
1404 uintptr_t direct_method;
1405 bool skip_this;
1406 bool fast_path =
1407 cu_->compiler_driver->ComputeInvokeInfo(mir_graph_->GetCurrentDexCompilationUnit(),
1408 current_dalvik_offset_,
Ian Rogers65ec92c2013-09-06 10:49:58 -07001409 true, true,
1410 &info->type, &target_method,
1411 &vtable_idx,
1412 &direct_code, &direct_method) && !SLOW_INVOKE_PATH;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001413 if (info->type == kInterface) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001414 next_call_insn = fast_path ? NextInterfaceCallInsn : NextInterfaceCallInsnWithAccessCheck;
Jeff Hao88474b42013-10-23 16:24:40 -07001415 skip_this = fast_path;
Brian Carlstrom7940e442013-07-12 13:46:57 -07001416 } else if (info->type == kDirect) {
1417 if (fast_path) {
1418 p_null_ck = &null_ck;
1419 }
1420 next_call_insn = fast_path ? NextSDCallInsn : NextDirectCallInsnSP;
1421 skip_this = false;
1422 } else if (info->type == kStatic) {
1423 next_call_insn = fast_path ? NextSDCallInsn : NextStaticCallInsnSP;
1424 skip_this = false;
1425 } else if (info->type == kSuper) {
1426 DCHECK(!fast_path); // Fast path is a direct call.
1427 next_call_insn = NextSuperCallInsnSP;
1428 skip_this = false;
1429 } else {
1430 DCHECK_EQ(info->type, kVirtual);
1431 next_call_insn = fast_path ? NextVCallInsn : NextVCallInsnSP;
1432 skip_this = fast_path;
1433 }
1434 if (!info->is_range) {
1435 call_state = GenDalvikArgsNoRange(info, call_state, p_null_ck,
1436 next_call_insn, target_method,
1437 vtable_idx, direct_code, direct_method,
1438 original_type, skip_this);
1439 } else {
1440 call_state = GenDalvikArgsRange(info, call_state, p_null_ck,
1441 next_call_insn, target_method, vtable_idx,
1442 direct_code, direct_method, original_type,
1443 skip_this);
1444 }
1445 // Finish up any of the call sequence not interleaved in arg loading
1446 while (call_state >= 0) {
1447 call_state = next_call_insn(cu_, info, call_state, target_method,
1448 vtable_idx, direct_code, direct_method,
1449 original_type);
1450 }
1451 LIR* call_inst;
1452 if (cu_->instruction_set != kX86) {
1453 call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt));
1454 } else {
Jeff Hao88474b42013-10-23 16:24:40 -07001455 if (fast_path) {
Brian Carlstrom7940e442013-07-12 13:46:57 -07001456 call_inst = OpMem(kOpBlx, TargetReg(kArg0),
Brian Carlstromea46f952013-07-30 01:26:50 -07001457 mirror::ArtMethod::GetEntryPointFromCompiledCodeOffset().Int32Value());
Brian Carlstrom7940e442013-07-12 13:46:57 -07001458 } else {
Ian Rogers848871b2013-08-05 10:56:33 -07001459 ThreadOffset trampoline(-1);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001460 switch (info->type) {
1461 case kInterface:
Jeff Hao88474b42013-10-23 16:24:40 -07001462 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001463 break;
1464 case kDirect:
Ian Rogers7655f292013-07-29 11:07:13 -07001465 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001466 break;
1467 case kStatic:
Ian Rogers7655f292013-07-29 11:07:13 -07001468 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001469 break;
1470 case kSuper:
Ian Rogers7655f292013-07-29 11:07:13 -07001471 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001472 break;
1473 case kVirtual:
Ian Rogers7655f292013-07-29 11:07:13 -07001474 trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
Brian Carlstrom7940e442013-07-12 13:46:57 -07001475 break;
1476 default:
1477 LOG(FATAL) << "Unexpected invoke type";
1478 }
1479 call_inst = OpThreadMem(kOpBlx, trampoline);
1480 }
1481 }
1482 MarkSafepointPC(call_inst);
1483
Vladimir Marko31c2aac2013-12-09 16:31:19 +00001484 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -07001485 if (info->result.location != kLocInvalid) {
1486 // We have a following MOVE_RESULT - do it now.
1487 if (info->result.wide) {
1488 RegLocation ret_loc = GetReturnWide(info->result.fp);
1489 StoreValueWide(info->result, ret_loc);
1490 } else {
1491 RegLocation ret_loc = GetReturn(info->result.fp);
1492 StoreValue(info->result, ret_loc);
1493 }
1494 }
1495}
1496
1497} // namespace art