blob: 775b25d0e4c33ef27b4175ebcbab76c0f9316b92 [file] [log] [blame]
buzbeeefc63692012-11-14 16:31:52 -08001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "oat_compilation_unit.h"
20#include "oat/runtime/oat_support_entrypoints.h"
buzbee1bc37c62012-11-20 13:35:41 -080021#include "arm_lir.h"
22#include "../codegen_util.h"
23#include "../ralloc_util.h"
buzbeeefc63692012-11-14 16:31:52 -080024
25namespace art {
26
27
28/* Return the position of an ssa name within the argument list */
buzbeefa57c472012-11-21 12:06:18 -080029static int InPosition(CompilationUnit* cu, int s_reg)
buzbeeefc63692012-11-14 16:31:52 -080030{
buzbeefa57c472012-11-21 12:06:18 -080031 int v_reg = SRegToVReg(cu, s_reg);
32 return v_reg - cu->num_regs;
buzbeeefc63692012-11-14 16:31:52 -080033}
34
35/*
36 * Describe an argument. If it's already in an arg register, just leave it
37 * there. NOTE: all live arg registers must be locked prior to this call
38 * to avoid having them allocated as a temp by downstream utilities.
39 */
buzbeefa57c472012-11-21 12:06:18 -080040RegLocation ArgLoc(CompilationUnit* cu, RegLocation loc)
buzbeeefc63692012-11-14 16:31:52 -080041{
buzbeefa57c472012-11-21 12:06:18 -080042 int arg_num = InPosition(cu, loc.s_reg_low);
buzbeeefc63692012-11-14 16:31:52 -080043 if (loc.wide) {
buzbeefa57c472012-11-21 12:06:18 -080044 if (arg_num == 2) {
buzbeeefc63692012-11-14 16:31:52 -080045 // Bad case - half in register, half in frame. Just punt
46 loc.location = kLocInvalid;
buzbeefa57c472012-11-21 12:06:18 -080047 } else if (arg_num < 2) {
48 loc.low_reg = rARM_ARG1 + arg_num;
49 loc.high_reg = loc.low_reg + 1;
buzbeeefc63692012-11-14 16:31:52 -080050 loc.location = kLocPhysReg;
51 } else {
52 loc.location = kLocDalvikFrame;
53 }
54 } else {
buzbeefa57c472012-11-21 12:06:18 -080055 if (arg_num < 3) {
56 loc.low_reg = rARM_ARG1 + arg_num;
buzbeeefc63692012-11-14 16:31:52 -080057 loc.location = kLocPhysReg;
58 } else {
59 loc.location = kLocDalvikFrame;
60 }
61 }
62 return loc;
63}
64
65/*
66 * Load an argument. If already in a register, just return. If in
buzbee52a77fc2012-11-20 19:50:46 -080067 * the frame, we can't use the normal LoadValue() because it assumed
buzbeeefc63692012-11-14 16:31:52 -080068 * a proper frame - and we're frameless.
69 */
buzbeefa57c472012-11-21 12:06:18 -080070RegLocation LoadArg(CompilationUnit* cu, RegLocation loc)
buzbeeefc63692012-11-14 16:31:52 -080071{
72 if (loc.location == kLocDalvikFrame) {
buzbeefa57c472012-11-21 12:06:18 -080073 int start = (InPosition(cu, loc.s_reg_low) + 1) * sizeof(uint32_t);
74 loc.low_reg = AllocTemp(cu);
75 LoadWordDisp(cu, rARM_SP, start, loc.low_reg);
buzbeeefc63692012-11-14 16:31:52 -080076 if (loc.wide) {
buzbeefa57c472012-11-21 12:06:18 -080077 loc.high_reg = AllocTemp(cu);
78 LoadWordDisp(cu, rARM_SP, start + sizeof(uint32_t), loc.high_reg);
buzbeeefc63692012-11-14 16:31:52 -080079 }
80 loc.location = kLocPhysReg;
81 }
82 return loc;
83}
84
85/* Lock any referenced arguments that arrive in registers */
buzbeefa57c472012-11-21 12:06:18 -080086static void LockLiveArgs(CompilationUnit* cu, MIR* mir)
buzbeeefc63692012-11-14 16:31:52 -080087{
buzbeefa57c472012-11-21 12:06:18 -080088 int first_in = cu->num_regs;
89 const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc
90 for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
91 int v_reg = SRegToVReg(cu, mir->ssa_rep->uses[i]);
92 int InPosition = v_reg - first_in;
93 if (InPosition < num_arg_regs) {
94 LockTemp(cu, rARM_ARG1 + InPosition);
buzbeeefc63692012-11-14 16:31:52 -080095 }
96 }
97}
98
99/* Find the next MIR, which may be in a following basic block */
buzbeefa57c472012-11-21 12:06:18 -0800100static MIR* GetNextMir(CompilationUnit* cu, BasicBlock** p_bb, MIR* mir)
buzbeeefc63692012-11-14 16:31:52 -0800101{
buzbeefa57c472012-11-21 12:06:18 -0800102 BasicBlock* bb = *p_bb;
103 MIR* orig_mir = mir;
buzbeeefc63692012-11-14 16:31:52 -0800104 while (bb != NULL) {
105 if (mir != NULL) {
106 mir = mir->next;
107 }
108 if (mir != NULL) {
109 return mir;
110 } else {
buzbeefa57c472012-11-21 12:06:18 -0800111 bb = bb->fall_through;
112 *p_bb = bb;
buzbeeefc63692012-11-14 16:31:52 -0800113 if (bb) {
buzbeefa57c472012-11-21 12:06:18 -0800114 mir = bb->first_mir_insn;
buzbeeefc63692012-11-14 16:31:52 -0800115 if (mir != NULL) {
116 return mir;
117 }
118 }
119 }
120 }
buzbeefa57c472012-11-21 12:06:18 -0800121 return orig_mir;
buzbeeefc63692012-11-14 16:31:52 -0800122}
123
buzbeefa57c472012-11-21 12:06:18 -0800124/* Used for the "verbose" listing */
125void GenPrintLabel(CompilationUnit *cu, MIR* mir)
buzbeeefc63692012-11-14 16:31:52 -0800126{
127 /* Mark the beginning of a Dalvik instruction for line tracking */
buzbeefa57c472012-11-21 12:06:18 -0800128 char* inst_str = cu->verbose ?
129 GetDalvikDisassembly(cu, mir->dalvikInsn, "") : NULL;
130 MarkBoundary(cu, mir->offset, inst_str);
buzbeeefc63692012-11-14 16:31:52 -0800131 /* Don't generate the SSA annotation unless verbose mode is on */
buzbeefa57c472012-11-21 12:06:18 -0800132 if (cu->verbose && mir->ssa_rep) {
133 char* ssa_string = GetSSAString(cu, mir->ssa_rep);
134 NewLIR1(cu, kPseudoSSARep, reinterpret_cast<uintptr_t>(ssa_string));
buzbeeefc63692012-11-14 16:31:52 -0800135 }
136}
137
buzbeefa57c472012-11-21 12:06:18 -0800138static MIR* SpecialIGet(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
139 OpSize size, bool long_or_double, bool is_object)
buzbeeefc63692012-11-14 16:31:52 -0800140{
buzbeefa57c472012-11-21 12:06:18 -0800141 int field_offset;
142 bool is_volatile;
143 uint32_t field_idx = mir->dalvikInsn.vC;
144 bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
145 if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
buzbeeefc63692012-11-14 16:31:52 -0800146 return NULL;
147 }
buzbeefa57c472012-11-21 12:06:18 -0800148 RegLocation rl_obj = GetSrc(cu, mir, 0);
149 LockLiveArgs(cu, mir);
150 rl_obj = ArgLoc(cu, rl_obj);
151 RegLocation rl_dest;
152 if (long_or_double) {
153 rl_dest = GetReturnWide(cu, false);
buzbeeefc63692012-11-14 16:31:52 -0800154 } else {
buzbeefa57c472012-11-21 12:06:18 -0800155 rl_dest = GetReturn(cu, false);
buzbeeefc63692012-11-14 16:31:52 -0800156 }
157 // Point of no return - no aborts after this
buzbeefa57c472012-11-21 12:06:18 -0800158 GenPrintLabel(cu, mir);
159 rl_obj = LoadArg(cu, rl_obj);
160 GenIGet(cu, field_idx, mir->optimization_flags, size, rl_dest, rl_obj,
161 long_or_double, is_object);
162 return GetNextMir(cu, bb, mir);
buzbeeefc63692012-11-14 16:31:52 -0800163}
164
buzbeefa57c472012-11-21 12:06:18 -0800165static MIR* SpecialIPut(CompilationUnit* cu, BasicBlock** bb, MIR* mir,
166 OpSize size, bool long_or_double, bool is_object)
buzbeeefc63692012-11-14 16:31:52 -0800167{
buzbeefa57c472012-11-21 12:06:18 -0800168 int field_offset;
169 bool is_volatile;
170 uint32_t field_idx = mir->dalvikInsn.vC;
171 bool fast_path = FastInstance(cu, field_idx, field_offset, is_volatile, false);
172 if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
buzbeeefc63692012-11-14 16:31:52 -0800173 return NULL;
174 }
buzbeefa57c472012-11-21 12:06:18 -0800175 RegLocation rl_src;
176 RegLocation rl_obj;
177 LockLiveArgs(cu, mir);
178 if (long_or_double) {
179 rl_src = GetSrcWide(cu, mir, 0);
180 rl_obj = GetSrc(cu, mir, 2);
buzbeeefc63692012-11-14 16:31:52 -0800181 } else {
buzbeefa57c472012-11-21 12:06:18 -0800182 rl_src = GetSrc(cu, mir, 0);
183 rl_obj = GetSrc(cu, mir, 1);
buzbeeefc63692012-11-14 16:31:52 -0800184 }
buzbeefa57c472012-11-21 12:06:18 -0800185 rl_src = ArgLoc(cu, rl_src);
186 rl_obj = ArgLoc(cu, rl_obj);
buzbeeefc63692012-11-14 16:31:52 -0800187 // Reject if source is split across registers & frame
buzbeefa57c472012-11-21 12:06:18 -0800188 if (rl_obj.location == kLocInvalid) {
189 ResetRegPool(cu);
buzbeeefc63692012-11-14 16:31:52 -0800190 return NULL;
191 }
192 // Point of no return - no aborts after this
buzbeefa57c472012-11-21 12:06:18 -0800193 GenPrintLabel(cu, mir);
194 rl_obj = LoadArg(cu, rl_obj);
195 rl_src = LoadArg(cu, rl_src);
196 GenIPut(cu, field_idx, mir->optimization_flags, size, rl_src, rl_obj,
197 long_or_double, is_object);
198 return GetNextMir(cu, bb, mir);
buzbeeefc63692012-11-14 16:31:52 -0800199}
200
buzbeefa57c472012-11-21 12:06:18 -0800201static MIR* SpecialIdentity(CompilationUnit* cu, MIR* mir)
buzbeeefc63692012-11-14 16:31:52 -0800202{
buzbeefa57c472012-11-21 12:06:18 -0800203 RegLocation rl_src;
204 RegLocation rl_dest;
205 bool wide = (mir->ssa_rep->num_uses == 2);
buzbeeefc63692012-11-14 16:31:52 -0800206 if (wide) {
buzbeefa57c472012-11-21 12:06:18 -0800207 rl_src = GetSrcWide(cu, mir, 0);
208 rl_dest = GetReturnWide(cu, false);
buzbeeefc63692012-11-14 16:31:52 -0800209 } else {
buzbeefa57c472012-11-21 12:06:18 -0800210 rl_src = GetSrc(cu, mir, 0);
211 rl_dest = GetReturn(cu, false);
buzbeeefc63692012-11-14 16:31:52 -0800212 }
buzbeefa57c472012-11-21 12:06:18 -0800213 LockLiveArgs(cu, mir);
214 rl_src = ArgLoc(cu, rl_src);
215 if (rl_src.location == kLocInvalid) {
216 ResetRegPool(cu);
buzbeeefc63692012-11-14 16:31:52 -0800217 return NULL;
218 }
219 // Point of no return - no aborts after this
buzbeefa57c472012-11-21 12:06:18 -0800220 GenPrintLabel(cu, mir);
221 rl_src = LoadArg(cu, rl_src);
buzbeeefc63692012-11-14 16:31:52 -0800222 if (wide) {
buzbeefa57c472012-11-21 12:06:18 -0800223 StoreValueWide(cu, rl_dest, rl_src);
buzbeeefc63692012-11-14 16:31:52 -0800224 } else {
buzbeefa57c472012-11-21 12:06:18 -0800225 StoreValue(cu, rl_dest, rl_src);
buzbeeefc63692012-11-14 16:31:52 -0800226 }
227 return mir;
228}
229
230/*
231 * Special-case code genration for simple non-throwing leaf methods.
232 */
buzbeefa57c472012-11-21 12:06:18 -0800233void GenSpecialCase(CompilationUnit* cu, BasicBlock* bb, MIR* mir,
234 SpecialCaseHandler special_case)
buzbeeefc63692012-11-14 16:31:52 -0800235{
buzbeefa57c472012-11-21 12:06:18 -0800236 cu->current_dalvik_offset = mir->offset;
237 MIR* next_mir = NULL;
238 switch (special_case) {
buzbeeefc63692012-11-14 16:31:52 -0800239 case kNullMethod:
240 DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
buzbeefa57c472012-11-21 12:06:18 -0800241 next_mir = mir;
buzbeeefc63692012-11-14 16:31:52 -0800242 break;
243 case kConstFunction:
buzbeefa57c472012-11-21 12:06:18 -0800244 GenPrintLabel(cu, mir);
245 LoadConstant(cu, rARM_RET0, mir->dalvikInsn.vB);
246 next_mir = GetNextMir(cu, &bb, mir);
buzbeeefc63692012-11-14 16:31:52 -0800247 break;
248 case kIGet:
buzbeefa57c472012-11-21 12:06:18 -0800249 next_mir = SpecialIGet(cu, &bb, mir, kWord, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800250 break;
251 case kIGetBoolean:
252 case kIGetByte:
buzbeefa57c472012-11-21 12:06:18 -0800253 next_mir = SpecialIGet(cu, &bb, mir, kUnsignedByte, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800254 break;
255 case kIGetObject:
buzbeefa57c472012-11-21 12:06:18 -0800256 next_mir = SpecialIGet(cu, &bb, mir, kWord, false, true);
buzbeeefc63692012-11-14 16:31:52 -0800257 break;
258 case kIGetChar:
buzbeefa57c472012-11-21 12:06:18 -0800259 next_mir = SpecialIGet(cu, &bb, mir, kUnsignedHalf, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800260 break;
261 case kIGetShort:
buzbeefa57c472012-11-21 12:06:18 -0800262 next_mir = SpecialIGet(cu, &bb, mir, kSignedHalf, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800263 break;
264 case kIGetWide:
buzbeefa57c472012-11-21 12:06:18 -0800265 next_mir = SpecialIGet(cu, &bb, mir, kLong, true, false);
buzbeeefc63692012-11-14 16:31:52 -0800266 break;
267 case kIPut:
buzbeefa57c472012-11-21 12:06:18 -0800268 next_mir = SpecialIPut(cu, &bb, mir, kWord, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800269 break;
270 case kIPutBoolean:
271 case kIPutByte:
buzbeefa57c472012-11-21 12:06:18 -0800272 next_mir = SpecialIPut(cu, &bb, mir, kUnsignedByte, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800273 break;
274 case kIPutObject:
buzbeefa57c472012-11-21 12:06:18 -0800275 next_mir = SpecialIPut(cu, &bb, mir, kWord, false, true);
buzbeeefc63692012-11-14 16:31:52 -0800276 break;
277 case kIPutChar:
buzbeefa57c472012-11-21 12:06:18 -0800278 next_mir = SpecialIPut(cu, &bb, mir, kUnsignedHalf, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800279 break;
280 case kIPutShort:
buzbeefa57c472012-11-21 12:06:18 -0800281 next_mir = SpecialIPut(cu, &bb, mir, kSignedHalf, false, false);
buzbeeefc63692012-11-14 16:31:52 -0800282 break;
283 case kIPutWide:
buzbeefa57c472012-11-21 12:06:18 -0800284 next_mir = SpecialIPut(cu, &bb, mir, kLong, true, false);
buzbeeefc63692012-11-14 16:31:52 -0800285 break;
286 case kIdentity:
buzbeefa57c472012-11-21 12:06:18 -0800287 next_mir = SpecialIdentity(cu, mir);
buzbeeefc63692012-11-14 16:31:52 -0800288 break;
289 default:
290 return;
291 }
buzbeefa57c472012-11-21 12:06:18 -0800292 if (next_mir != NULL) {
293 cu->current_dalvik_offset = next_mir->offset;
294 if (special_case != kIdentity) {
295 GenPrintLabel(cu, next_mir);
buzbeeefc63692012-11-14 16:31:52 -0800296 }
buzbeefa57c472012-11-21 12:06:18 -0800297 NewLIR1(cu, kThumbBx, rARM_LR);
298 cu->core_spill_mask = 0;
299 cu->num_core_spills = 0;
300 cu->fp_spill_mask = 0;
301 cu->num_fp_spills = 0;
302 cu->frame_size = 0;
303 cu->core_vmap_table.clear();
304 cu->fp_vmap_table.clear();
buzbeeefc63692012-11-14 16:31:52 -0800305 }
306}
307
308/*
309 * The sparse table in the literal pool is an array of <key,displacement>
310 * pairs. For each set, we'll load them as a pair using ldmia.
311 * This means that the register number of the temp we use for the key
312 * must be lower than the reg for the displacement.
313 *
314 * The test loop will look something like:
315 *
316 * adr rBase, <table>
buzbeefa57c472012-11-21 12:06:18 -0800317 * ldr r_val, [rARM_SP, v_reg_off]
318 * mov r_idx, #table_size
buzbeeefc63692012-11-14 16:31:52 -0800319 * lp:
buzbeefa57c472012-11-21 12:06:18 -0800320 * ldmia rBase!, {r_key, r_disp}
321 * sub r_idx, #1
322 * cmp r_val, r_key
buzbeeefc63692012-11-14 16:31:52 -0800323 * ifeq
buzbeefa57c472012-11-21 12:06:18 -0800324 * add rARM_PC, r_disp ; This is the branch from which we compute displacement
325 * cbnz r_idx, lp
buzbeeefc63692012-11-14 16:31:52 -0800326 */
buzbeefa57c472012-11-21 12:06:18 -0800327void GenSparseSwitch(CompilationUnit* cu, uint32_t table_offset,
328 RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800329{
buzbeefa57c472012-11-21 12:06:18 -0800330 const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
331 if (cu->verbose) {
buzbee52a77fc2012-11-20 19:50:46 -0800332 DumpSparseSwitchTable(table);
buzbeeefc63692012-11-14 16:31:52 -0800333 }
334 // Add the table to the list - we'll process it later
buzbeefa57c472012-11-21 12:06:18 -0800335 SwitchTable *tab_rec =
336 static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
337 tab_rec->table = table;
338 tab_rec->vaddr = cu->current_dalvik_offset;
buzbeeefc63692012-11-14 16:31:52 -0800339 int size = table[1];
buzbeefa57c472012-11-21 12:06:18 -0800340 tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
341 InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -0800342
343 // Get the switch value
buzbeefa57c472012-11-21 12:06:18 -0800344 rl_src = LoadValue(cu, rl_src, kCoreReg);
345 int rBase = AllocTemp(cu);
buzbeeefc63692012-11-14 16:31:52 -0800346 /* Allocate key and disp temps */
buzbeefa57c472012-11-21 12:06:18 -0800347 int r_key = AllocTemp(cu);
348 int r_disp = AllocTemp(cu);
349 // Make sure r_key's register number is less than r_disp's number for ldmia
350 if (r_key > r_disp) {
351 int tmp = r_disp;
352 r_disp = r_key;
353 r_key = tmp;
buzbeeefc63692012-11-14 16:31:52 -0800354 }
355 // Materialize a pointer to the switch table
buzbeefa57c472012-11-21 12:06:18 -0800356 NewLIR3(cu, kThumb2Adr, rBase, 0, reinterpret_cast<uintptr_t>(tab_rec));
357 // Set up r_idx
358 int r_idx = AllocTemp(cu);
359 LoadConstant(cu, r_idx, size);
buzbeeefc63692012-11-14 16:31:52 -0800360 // Establish loop branch target
buzbeefa57c472012-11-21 12:06:18 -0800361 LIR* target = NewLIR0(cu, kPseudoTargetLabel);
buzbeeefc63692012-11-14 16:31:52 -0800362 // Load next key/disp
buzbeefa57c472012-11-21 12:06:18 -0800363 NewLIR2(cu, kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
364 OpRegReg(cu, kOpCmp, r_key, rl_src.low_reg);
buzbeeefc63692012-11-14 16:31:52 -0800365 // Go if match. NOTE: No instruction set switch here - must stay Thumb2
buzbeefa57c472012-11-21 12:06:18 -0800366 OpIT(cu, kArmCondEq, "");
367 LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, r_disp);
368 tab_rec->anchor = switch_branch;
buzbeeefc63692012-11-14 16:31:52 -0800369 // Needs to use setflags encoding here
buzbeefa57c472012-11-21 12:06:18 -0800370 NewLIR3(cu, kThumb2SubsRRI12, r_idx, r_idx, 1);
371 OpCondBranch(cu, kCondNe, target);
buzbeeefc63692012-11-14 16:31:52 -0800372}
373
374
buzbeefa57c472012-11-21 12:06:18 -0800375void GenPackedSwitch(CompilationUnit* cu, uint32_t table_offset,
376 RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800377{
buzbeefa57c472012-11-21 12:06:18 -0800378 const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
379 if (cu->verbose) {
buzbee52a77fc2012-11-20 19:50:46 -0800380 DumpPackedSwitchTable(table);
buzbeeefc63692012-11-14 16:31:52 -0800381 }
382 // Add the table to the list - we'll process it later
buzbeefa57c472012-11-21 12:06:18 -0800383 SwitchTable *tab_rec =
384 static_cast<SwitchTable*>(NewMem(cu, sizeof(SwitchTable), true, kAllocData));
385 tab_rec->table = table;
386 tab_rec->vaddr = cu->current_dalvik_offset;
buzbeeefc63692012-11-14 16:31:52 -0800387 int size = table[1];
buzbeefa57c472012-11-21 12:06:18 -0800388 tab_rec->targets = static_cast<LIR**>(NewMem(cu, size * sizeof(LIR*), true, kAllocLIR));
389 InsertGrowableList(cu, &cu->switch_tables, reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -0800390
391 // Get the switch value
buzbeefa57c472012-11-21 12:06:18 -0800392 rl_src = LoadValue(cu, rl_src, kCoreReg);
393 int table_base = AllocTemp(cu);
buzbeeefc63692012-11-14 16:31:52 -0800394 // Materialize a pointer to the switch table
buzbeefa57c472012-11-21 12:06:18 -0800395 NewLIR3(cu, kThumb2Adr, table_base, 0, reinterpret_cast<uintptr_t>(tab_rec));
396 int low_key = s4FromSwitchData(&table[2]);
buzbeeefc63692012-11-14 16:31:52 -0800397 int keyReg;
398 // Remove the bias, if necessary
buzbeefa57c472012-11-21 12:06:18 -0800399 if (low_key == 0) {
400 keyReg = rl_src.low_reg;
buzbeeefc63692012-11-14 16:31:52 -0800401 } else {
buzbeefa57c472012-11-21 12:06:18 -0800402 keyReg = AllocTemp(cu);
403 OpRegRegImm(cu, kOpSub, keyReg, rl_src.low_reg, low_key);
buzbeeefc63692012-11-14 16:31:52 -0800404 }
405 // Bounds check - if < 0 or >= size continue following switch
buzbeefa57c472012-11-21 12:06:18 -0800406 OpRegImm(cu, kOpCmp, keyReg, size-1);
407 LIR* branch_over = OpCondBranch(cu, kCondHi, NULL);
buzbeeefc63692012-11-14 16:31:52 -0800408
409 // Load the displacement from the switch table
buzbeefa57c472012-11-21 12:06:18 -0800410 int disp_reg = AllocTemp(cu);
411 LoadBaseIndexed(cu, table_base, keyReg, disp_reg, 2, kWord);
buzbeeefc63692012-11-14 16:31:52 -0800412
413 // ..and go! NOTE: No instruction set switch here - must stay Thumb2
buzbeefa57c472012-11-21 12:06:18 -0800414 LIR* switch_branch = NewLIR1(cu, kThumb2AddPCR, disp_reg);
415 tab_rec->anchor = switch_branch;
buzbeeefc63692012-11-14 16:31:52 -0800416
buzbeefa57c472012-11-21 12:06:18 -0800417 /* branch_over target here */
418 LIR* target = NewLIR0(cu, kPseudoTargetLabel);
419 branch_over->target = target;
buzbeeefc63692012-11-14 16:31:52 -0800420}
421
422/*
423 * Array data table format:
424 * ushort ident = 0x0300 magic value
425 * ushort width width of each element in the table
426 * uint size number of elements in the table
427 * ubyte data[size*width] table of data values (may contain a single-byte
428 * padding at the end)
429 *
430 * Total size is 4+(width * size + 1)/2 16-bit code units.
431 */
buzbeefa57c472012-11-21 12:06:18 -0800432void GenFillArrayData(CompilationUnit* cu, uint32_t table_offset, RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800433{
buzbeefa57c472012-11-21 12:06:18 -0800434 const uint16_t* table = cu->insns + cu->current_dalvik_offset + table_offset;
buzbeeefc63692012-11-14 16:31:52 -0800435 // Add the table to the list - we'll process it later
buzbeefa57c472012-11-21 12:06:18 -0800436 FillArrayData *tab_rec =
437 static_cast<FillArrayData*>(NewMem(cu, sizeof(FillArrayData), true, kAllocData));
438 tab_rec->table = table;
439 tab_rec->vaddr = cu->current_dalvik_offset;
440 uint16_t width = tab_rec->table[1];
441 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
442 tab_rec->size = (size * width) + 8;
buzbeeefc63692012-11-14 16:31:52 -0800443
buzbeefa57c472012-11-21 12:06:18 -0800444 InsertGrowableList(cu, &cu->fill_array_data, reinterpret_cast<uintptr_t>(tab_rec));
buzbeeefc63692012-11-14 16:31:52 -0800445
446 // Making a call - use explicit registers
buzbeefa57c472012-11-21 12:06:18 -0800447 FlushAllRegs(cu); /* Everything to home location */
448 LoadValueDirectFixed(cu, rl_src, r0);
449 LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
buzbeeefc63692012-11-14 16:31:52 -0800450 rARM_LR);
451 // Materialize a pointer to the fill data image
buzbeefa57c472012-11-21 12:06:18 -0800452 NewLIR3(cu, kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
453 ClobberCalleeSave(cu);
454 LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
455 MarkSafepointPC(cu, call_inst);
buzbeeefc63692012-11-14 16:31:52 -0800456}
457
458/*
459 * Handle simple case (thin lock) inline. If it's complicated, bail
460 * out to the heavyweight lock/unlock routines. We'll use dedicated
461 * registers here in order to be in the right position in case we
buzbeeeaf09bc2012-11-15 14:51:41 -0800462 * to bail to oat[Lock/Unlock]Object(self, object)
buzbeeefc63692012-11-14 16:31:52 -0800463 *
buzbeeeaf09bc2012-11-15 14:51:41 -0800464 * r0 -> self pointer [arg0 for oat[Lock/Unlock]Object
465 * r1 -> object [arg1 for oat[Lock/Unlock]Object
buzbeeefc63692012-11-14 16:31:52 -0800466 * r2 -> intial contents of object->lock, later result of strex
buzbeefa57c472012-11-21 12:06:18 -0800467 * r3 -> self->thread_id
buzbeeefc63692012-11-14 16:31:52 -0800468 * r12 -> allow to be used by utilities as general temp
469 *
470 * The result of the strex is 0 if we acquire the lock.
471 *
472 * See comments in Sync.c for the layout of the lock word.
473 * Of particular interest to this code is the test for the
474 * simple case - which we handle inline. For monitor enter, the
475 * simple case is thin lock, held by no-one. For monitor exit,
476 * the simple case is thin lock, held by the unlocking thread with
477 * a recurse count of 0.
478 *
479 * A minor complication is that there is a field in the lock word
480 * unrelated to locking: the hash state. This field must be ignored, but
481 * preserved.
482 *
483 */
buzbeefa57c472012-11-21 12:06:18 -0800484void GenMonitorEnter(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800485{
buzbeefa57c472012-11-21 12:06:18 -0800486 FlushAllRegs(cu);
buzbeeefc63692012-11-14 16:31:52 -0800487 DCHECK_EQ(LW_SHAPE_THIN, 0);
buzbeefa57c472012-11-21 12:06:18 -0800488 LoadValueDirectFixed(cu, rl_src, r0); // Get obj
489 LockCallTemps(cu); // Prepare for explicit register usage
490 GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
491 LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
492 NewLIR3(cu, kThumb2Ldrex, r1, r0,
buzbeeefc63692012-11-14 16:31:52 -0800493 Object::MonitorOffset().Int32Value() >> 2); // Get object->lock
494 // Align owner
buzbeefa57c472012-11-21 12:06:18 -0800495 OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
496 // Is lock unheld on lock or held by us (==thread_id) on unlock?
497 NewLIR4(cu, kThumb2Bfi, r2, r1, 0, LW_LOCK_OWNER_SHIFT - 1);
498 NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
499 OpRegImm(cu, kOpCmp, r1, 0);
500 OpIT(cu, kArmCondEq, "");
501 NewLIR4(cu, kThumb2Strex, r1, r2, r0,
buzbeeefc63692012-11-14 16:31:52 -0800502 Object::MonitorOffset().Int32Value() >> 2);
buzbeefa57c472012-11-21 12:06:18 -0800503 OpRegImm(cu, kOpCmp, r1, 0);
504 OpIT(cu, kArmCondNe, "T");
buzbeeefc63692012-11-14 16:31:52 -0800505 // Go expensive route - artLockObjectFromCode(self, obj);
buzbeefa57c472012-11-21 12:06:18 -0800506 LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
507 ClobberCalleeSave(cu);
508 LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
509 MarkSafepointPC(cu, call_inst);
510 GenMemBarrier(cu, kLoadLoad);
buzbeeefc63692012-11-14 16:31:52 -0800511}
512
513/*
514 * For monitor unlock, we don't have to use ldrex/strex. Once
515 * we've determined that the lock is thin and that we own it with
516 * a zero recursion count, it's safe to punch it back to the
517 * initial, unlock thin state with a store word.
518 */
buzbeefa57c472012-11-21 12:06:18 -0800519void GenMonitorExit(CompilationUnit* cu, int opt_flags, RegLocation rl_src)
buzbeeefc63692012-11-14 16:31:52 -0800520{
521 DCHECK_EQ(LW_SHAPE_THIN, 0);
buzbeefa57c472012-11-21 12:06:18 -0800522 FlushAllRegs(cu);
523 LoadValueDirectFixed(cu, rl_src, r0); // Get obj
524 LockCallTemps(cu); // Prepare for explicit register usage
525 GenNullCheck(cu, rl_src.s_reg_low, r0, opt_flags);
526 LoadWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r1); // Get lock
527 LoadWordDisp(cu, rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
528 // Is lock unheld on lock or held by us (==thread_id) on unlock?
529 OpRegRegImm(cu, kOpAnd, r3, r1,
buzbeeefc63692012-11-14 16:31:52 -0800530 (LW_HASH_STATE_MASK << LW_HASH_STATE_SHIFT));
531 // Align owner
buzbeefa57c472012-11-21 12:06:18 -0800532 OpRegImm(cu, kOpLsl, r2, LW_LOCK_OWNER_SHIFT);
533 NewLIR3(cu, kThumb2Bfc, r1, LW_HASH_STATE_SHIFT, LW_LOCK_OWNER_SHIFT - 1);
534 OpRegReg(cu, kOpSub, r1, r2);
535 OpIT(cu, kArmCondEq, "EE");
536 StoreWordDisp(cu, r0, Object::MonitorOffset().Int32Value(), r3);
buzbeeefc63692012-11-14 16:31:52 -0800537 // Go expensive route - UnlockObjectFromCode(obj);
buzbeefa57c472012-11-21 12:06:18 -0800538 LoadWordDisp(cu, rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
539 ClobberCalleeSave(cu);
540 LIR* call_inst = OpReg(cu, kOpBlx, rARM_LR);
541 MarkSafepointPC(cu, call_inst);
542 GenMemBarrier(cu, kStoreLoad);
buzbeeefc63692012-11-14 16:31:52 -0800543}
544
545/*
546 * Mark garbage collection card. Skip if the value we're storing is null.
547 */
buzbeefa57c472012-11-21 12:06:18 -0800548void MarkGCCard(CompilationUnit* cu, int val_reg, int tgt_addr_reg)
buzbeeefc63692012-11-14 16:31:52 -0800549{
buzbeefa57c472012-11-21 12:06:18 -0800550 int reg_card_base = AllocTemp(cu);
551 int reg_card_no = AllocTemp(cu);
552 LIR* branch_over = OpCmpImmBranch(cu, kCondEq, val_reg, 0, NULL);
553 LoadWordDisp(cu, rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
554 OpRegRegImm(cu, kOpLsr, reg_card_no, tgt_addr_reg, CardTable::kCardShift);
555 StoreBaseIndexed(cu, reg_card_base, reg_card_no, reg_card_base, 0,
buzbeeefc63692012-11-14 16:31:52 -0800556 kUnsignedByte);
buzbeefa57c472012-11-21 12:06:18 -0800557 LIR* target = NewLIR0(cu, kPseudoTargetLabel);
558 branch_over->target = target;
559 FreeTemp(cu, reg_card_base);
560 FreeTemp(cu, reg_card_no);
buzbeeefc63692012-11-14 16:31:52 -0800561}
562
buzbeefa57c472012-11-21 12:06:18 -0800563void GenEntrySequence(CompilationUnit* cu, RegLocation* ArgLocs,
564 RegLocation rl_method)
buzbeeefc63692012-11-14 16:31:52 -0800565{
buzbeefa57c472012-11-21 12:06:18 -0800566 int spill_count = cu->num_core_spills + cu->num_fp_spills;
buzbeeefc63692012-11-14 16:31:52 -0800567 /*
568 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
569 * mechanism know so it doesn't try to use any of them when
570 * expanding the frame or flushing. This leaves the utility
571 * code with a single temp: r12. This should be enough.
572 */
buzbeefa57c472012-11-21 12:06:18 -0800573 LockTemp(cu, r0);
574 LockTemp(cu, r1);
575 LockTemp(cu, r2);
576 LockTemp(cu, r3);
buzbeeefc63692012-11-14 16:31:52 -0800577
578 /*
579 * We can safely skip the stack overflow check if we're
580 * a leaf *and* our frame size < fudge factor.
581 */
buzbeefa57c472012-11-21 12:06:18 -0800582 bool skip_overflow_check = ((cu->attrs & METHOD_IS_LEAF) &&
583 (static_cast<size_t>(cu->frame_size) <
buzbeeefc63692012-11-14 16:31:52 -0800584 Thread::kStackOverflowReservedBytes));
buzbeefa57c472012-11-21 12:06:18 -0800585 NewLIR0(cu, kPseudoMethodEntry);
586 if (!skip_overflow_check) {
buzbeeefc63692012-11-14 16:31:52 -0800587 /* Load stack limit */
buzbeefa57c472012-11-21 12:06:18 -0800588 LoadWordDisp(cu, rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
buzbeeefc63692012-11-14 16:31:52 -0800589 }
590 /* Spill core callee saves */
buzbeefa57c472012-11-21 12:06:18 -0800591 NewLIR1(cu, kThumb2Push, cu->core_spill_mask);
buzbeeefc63692012-11-14 16:31:52 -0800592 /* Need to spill any FP regs? */
buzbeefa57c472012-11-21 12:06:18 -0800593 if (cu->num_fp_spills) {
buzbeeefc63692012-11-14 16:31:52 -0800594 /*
595 * NOTE: fp spills are a little different from core spills in that
596 * they are pushed as a contiguous block. When promoting from
597 * the fp set, we must allocate all singles from s16..highest-promoted
598 */
buzbeefa57c472012-11-21 12:06:18 -0800599 NewLIR1(cu, kThumb2VPushCS, cu->num_fp_spills);
buzbeeefc63692012-11-14 16:31:52 -0800600 }
buzbeefa57c472012-11-21 12:06:18 -0800601 if (!skip_overflow_check) {
602 OpRegRegImm(cu, kOpSub, rARM_LR, rARM_SP, cu->frame_size - (spill_count * 4));
603 GenRegRegCheck(cu, kCondCc, rARM_LR, r12, kThrowStackOverflow);
604 OpRegCopy(cu, rARM_SP, rARM_LR); // Establish stack
buzbeeefc63692012-11-14 16:31:52 -0800605 } else {
buzbeefa57c472012-11-21 12:06:18 -0800606 OpRegImm(cu, kOpSub, rARM_SP, cu->frame_size - (spill_count * 4));
buzbeeefc63692012-11-14 16:31:52 -0800607 }
608
buzbeefa57c472012-11-21 12:06:18 -0800609 FlushIns(cu, ArgLocs, rl_method);
buzbeeefc63692012-11-14 16:31:52 -0800610
buzbeefa57c472012-11-21 12:06:18 -0800611 FreeTemp(cu, r0);
612 FreeTemp(cu, r1);
613 FreeTemp(cu, r2);
614 FreeTemp(cu, r3);
buzbeeefc63692012-11-14 16:31:52 -0800615}
616
buzbeefa57c472012-11-21 12:06:18 -0800617void GenExitSequence(CompilationUnit* cu)
buzbeeefc63692012-11-14 16:31:52 -0800618{
buzbeefa57c472012-11-21 12:06:18 -0800619 int spill_count = cu->num_core_spills + cu->num_fp_spills;
buzbeeefc63692012-11-14 16:31:52 -0800620 /*
621 * In the exit path, r0/r1 are live - make sure they aren't
622 * allocated by the register utilities as temps.
623 */
buzbeefa57c472012-11-21 12:06:18 -0800624 LockTemp(cu, r0);
625 LockTemp(cu, r1);
buzbeeefc63692012-11-14 16:31:52 -0800626
buzbeefa57c472012-11-21 12:06:18 -0800627 NewLIR0(cu, kPseudoMethodExit);
628 OpRegImm(cu, kOpAdd, rARM_SP, cu->frame_size - (spill_count * 4));
buzbeeefc63692012-11-14 16:31:52 -0800629 /* Need to restore any FP callee saves? */
buzbeefa57c472012-11-21 12:06:18 -0800630 if (cu->num_fp_spills) {
631 NewLIR1(cu, kThumb2VPopCS, cu->num_fp_spills);
buzbeeefc63692012-11-14 16:31:52 -0800632 }
buzbeefa57c472012-11-21 12:06:18 -0800633 if (cu->core_spill_mask & (1 << rARM_LR)) {
buzbeeefc63692012-11-14 16:31:52 -0800634 /* Unspill rARM_LR to rARM_PC */
buzbeefa57c472012-11-21 12:06:18 -0800635 cu->core_spill_mask &= ~(1 << rARM_LR);
636 cu->core_spill_mask |= (1 << rARM_PC);
buzbeeefc63692012-11-14 16:31:52 -0800637 }
buzbeefa57c472012-11-21 12:06:18 -0800638 NewLIR1(cu, kThumb2Pop, cu->core_spill_mask);
639 if (!(cu->core_spill_mask & (1 << rARM_PC))) {
buzbeeefc63692012-11-14 16:31:52 -0800640 /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
buzbeefa57c472012-11-21 12:06:18 -0800641 NewLIR1(cu, kThumbBx, rARM_LR);
buzbeeefc63692012-11-14 16:31:52 -0800642 }
643}
644
645} // namespace art