blob: 8226b24fe91e7a26473c0ae64420bb0e7339f1be [file] [log] [blame]
Brian Carlstrom7940e442013-07-12 13:46:57 -07001/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/* This file contains codegen for the Thumb2 ISA. */
18
19#include "arm_lir.h"
20#include "codegen_arm.h"
Vladimir Marko5816ed42013-11-27 17:04:20 +000021#include "dex/quick/dex_file_method_inliner.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070022#include "dex/quick/mir_to_lir-inl.h"
Ian Rogers166db042013-07-26 12:05:57 -070023#include "entrypoints/quick/quick_entrypoints.h"
Brian Carlstrom7940e442013-07-12 13:46:57 -070024
25namespace art {
26
27
28/* Return the position of an ssa name within the argument list */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070029int ArmMir2Lir::InPosition(int s_reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070030 int v_reg = mir_graph_->SRegToVReg(s_reg);
31 return v_reg - cu_->num_regs;
32}
33
34/*
35 * Describe an argument. If it's already in an arg register, just leave it
36 * there. NOTE: all live arg registers must be locked prior to this call
37 * to avoid having them allocated as a temp by downstream utilities.
38 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070039RegLocation ArmMir2Lir::ArgLoc(RegLocation loc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070040 int arg_num = InPosition(loc.s_reg_low);
41 if (loc.wide) {
42 if (arg_num == 2) {
43 // Bad case - half in register, half in frame. Just punt
44 loc.location = kLocInvalid;
45 } else if (arg_num < 2) {
46 loc.low_reg = rARM_ARG1 + arg_num;
47 loc.high_reg = loc.low_reg + 1;
48 loc.location = kLocPhysReg;
49 } else {
50 loc.location = kLocDalvikFrame;
51 }
52 } else {
53 if (arg_num < 3) {
54 loc.low_reg = rARM_ARG1 + arg_num;
55 loc.location = kLocPhysReg;
56 } else {
57 loc.location = kLocDalvikFrame;
58 }
59 }
60 return loc;
61}
62
63/*
64 * Load an argument. If already in a register, just return. If in
65 * the frame, we can't use the normal LoadValue() because it assumed
66 * a proper frame - and we're frameless.
67 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070068RegLocation ArmMir2Lir::LoadArg(RegLocation loc) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070069 if (loc.location == kLocDalvikFrame) {
70 int start = (InPosition(loc.s_reg_low) + 1) * sizeof(uint32_t);
71 loc.low_reg = AllocTemp();
72 LoadWordDisp(rARM_SP, start, loc.low_reg);
73 if (loc.wide) {
74 loc.high_reg = AllocTemp();
75 LoadWordDisp(rARM_SP, start + sizeof(uint32_t), loc.high_reg);
76 }
77 loc.location = kLocPhysReg;
78 }
79 return loc;
80}
81
82/* Lock any referenced arguments that arrive in registers */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070083void ArmMir2Lir::LockLiveArgs(MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070084 int first_in = cu_->num_regs;
85 const int num_arg_regs = 3; // TODO: generalize & move to RegUtil.cc
86 for (int i = 0; i < mir->ssa_rep->num_uses; i++) {
87 int v_reg = mir_graph_->SRegToVReg(mir->ssa_rep->uses[i]);
88 int InPosition = v_reg - first_in;
89 if (InPosition < num_arg_regs) {
90 LockTemp(rARM_ARG1 + InPosition);
91 }
92 }
93}
94
95/* Find the next MIR, which may be in a following basic block */
buzbee0d829482013-10-11 15:24:55 -070096// TODO: make this a utility in mir_graph.
Brian Carlstrom2ce745c2013-07-17 17:44:30 -070097MIR* ArmMir2Lir::GetNextMir(BasicBlock** p_bb, MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -070098 BasicBlock* bb = *p_bb;
99 MIR* orig_mir = mir;
100 while (bb != NULL) {
101 if (mir != NULL) {
102 mir = mir->next;
103 }
104 if (mir != NULL) {
105 return mir;
106 } else {
buzbee0d829482013-10-11 15:24:55 -0700107 bb = mir_graph_->GetBasicBlock(bb->fall_through);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700108 *p_bb = bb;
109 if (bb) {
110 mir = bb->first_mir_insn;
111 if (mir != NULL) {
112 return mir;
113 }
114 }
115 }
116 }
117 return orig_mir;
118}
119
120/* Used for the "verbose" listing */
Brian Carlstrom7934ac22013-07-26 10:54:15 -0700121// TODO: move to common code
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700122void ArmMir2Lir::GenPrintLabel(MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700123 /* Mark the beginning of a Dalvik instruction for line tracking */
buzbee252254b2013-09-08 16:20:53 -0700124 if (cu_->verbose) {
125 char* inst_str = mir_graph_->GetDalvikDisassembly(mir);
126 MarkBoundary(mir->offset, inst_str);
127 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700128}
129
130MIR* ArmMir2Lir::SpecialIGet(BasicBlock** bb, MIR* mir,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700131 OpSize size, bool long_or_double, bool is_object) {
buzbee0d829482013-10-11 15:24:55 -0700132 int32_t field_offset;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700133 bool is_volatile;
134 uint32_t field_idx = mir->dalvikInsn.vC;
Ian Rogers9b297bf2013-09-06 11:11:25 -0700135 bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700136 if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
137 return NULL;
138 }
139 RegLocation rl_obj = mir_graph_->GetSrc(mir, 0);
140 LockLiveArgs(mir);
141 rl_obj = ArmMir2Lir::ArgLoc(rl_obj);
142 RegLocation rl_dest;
143 if (long_or_double) {
144 rl_dest = GetReturnWide(false);
145 } else {
146 rl_dest = GetReturn(false);
147 }
148 // Point of no return - no aborts after this
149 ArmMir2Lir::GenPrintLabel(mir);
150 rl_obj = LoadArg(rl_obj);
151 GenIGet(field_idx, mir->optimization_flags, size, rl_dest, rl_obj, long_or_double, is_object);
152 return GetNextMir(bb, mir);
153}
154
155MIR* ArmMir2Lir::SpecialIPut(BasicBlock** bb, MIR* mir,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700156 OpSize size, bool long_or_double, bool is_object) {
buzbee0d829482013-10-11 15:24:55 -0700157 int32_t field_offset;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700158 bool is_volatile;
159 uint32_t field_idx = mir->dalvikInsn.vC;
Ian Rogers9b297bf2013-09-06 11:11:25 -0700160 bool fast_path = FastInstance(field_idx, false, &field_offset, &is_volatile);
Brian Carlstrom7940e442013-07-12 13:46:57 -0700161 if (!fast_path || !(mir->optimization_flags & MIR_IGNORE_NULL_CHECK)) {
162 return NULL;
163 }
164 RegLocation rl_src;
165 RegLocation rl_obj;
166 LockLiveArgs(mir);
167 if (long_or_double) {
168 rl_src = mir_graph_->GetSrcWide(mir, 0);
169 rl_obj = mir_graph_->GetSrc(mir, 2);
170 } else {
171 rl_src = mir_graph_->GetSrc(mir, 0);
172 rl_obj = mir_graph_->GetSrc(mir, 1);
173 }
174 rl_src = ArmMir2Lir::ArgLoc(rl_src);
175 rl_obj = ArmMir2Lir::ArgLoc(rl_obj);
176 // Reject if source is split across registers & frame
177 if (rl_obj.location == kLocInvalid) {
178 ResetRegPool();
179 return NULL;
180 }
181 // Point of no return - no aborts after this
182 ArmMir2Lir::GenPrintLabel(mir);
183 rl_obj = LoadArg(rl_obj);
184 rl_src = LoadArg(rl_src);
185 GenIPut(field_idx, mir->optimization_flags, size, rl_src, rl_obj, long_or_double, is_object);
186 return GetNextMir(bb, mir);
187}
188
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700189MIR* ArmMir2Lir::SpecialIdentity(MIR* mir) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700190 RegLocation rl_src;
191 RegLocation rl_dest;
192 bool wide = (mir->ssa_rep->num_uses == 2);
193 if (wide) {
194 rl_src = mir_graph_->GetSrcWide(mir, 0);
195 rl_dest = GetReturnWide(false);
196 } else {
197 rl_src = mir_graph_->GetSrc(mir, 0);
198 rl_dest = GetReturn(false);
199 }
200 LockLiveArgs(mir);
201 rl_src = ArmMir2Lir::ArgLoc(rl_src);
202 if (rl_src.location == kLocInvalid) {
203 ResetRegPool();
204 return NULL;
205 }
206 // Point of no return - no aborts after this
207 ArmMir2Lir::GenPrintLabel(mir);
208 rl_src = LoadArg(rl_src);
209 if (wide) {
210 StoreValueWide(rl_dest, rl_src);
211 } else {
212 StoreValue(rl_dest, rl_src);
213 }
214 return mir;
215}
216
217/*
218 * Special-case code genration for simple non-throwing leaf methods.
219 */
220void ArmMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
Vladimir Marko5816ed42013-11-27 17:04:20 +0000221 const InlineMethod& special) {
222 // TODO: Generate the method using only the data in special. (Requires FastInstance() field
223 // validation in DexFileMethodInliner::AnalyseIGetMethod()/AnalyseIPutMethod().)
224 DCHECK(special.flags & kInlineSpecial);
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700225 current_dalvik_offset_ = mir->offset;
226 MIR* next_mir = NULL;
Vladimir Marko5816ed42013-11-27 17:04:20 +0000227 switch (special.opcode) {
228 case kInlineOpNop:
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700229 DCHECK(mir->dalvikInsn.opcode == Instruction::RETURN_VOID);
230 next_mir = mir;
231 break;
Vladimir Marko5816ed42013-11-27 17:04:20 +0000232 case kInlineOpConst:
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700233 ArmMir2Lir::GenPrintLabel(mir);
Vladimir Marko5816ed42013-11-27 17:04:20 +0000234 LoadConstant(rARM_RET0, special.data);
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700235 next_mir = GetNextMir(&bb, mir);
236 break;
Vladimir Marko5816ed42013-11-27 17:04:20 +0000237 case kInlineOpIGet: {
238 InlineIGetIPutData data;
239 data.data = special.data;
240 OpSize op_size = static_cast<OpSize>(data.d.op_size);
241 DCHECK_NE(data.d.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong.
242 bool long_or_double = (data.d.op_size == kLong);
243 bool is_object = data.d.is_object;
244 next_mir = SpecialIGet(&bb, mir, op_size, long_or_double, is_object);
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700245 break;
Vladimir Marko5816ed42013-11-27 17:04:20 +0000246 }
247 case kInlineOpIPut: {
248 InlineIGetIPutData data;
249 data.data = special.data;
250 OpSize op_size = static_cast<OpSize>(data.d.op_size);
251 DCHECK_NE(data.d.op_size, kDouble); // The inliner doesn't distinguish kDouble, uses kLong.
252 bool long_or_double = (data.d.op_size == kLong);
253 bool is_object = data.d.is_object;
254 next_mir = SpecialIPut(&bb, mir, op_size, long_or_double, is_object);
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700255 break;
Vladimir Marko5816ed42013-11-27 17:04:20 +0000256 }
257 case kInlineOpReturnArg:
Brian Carlstrom6f485c62013-07-18 15:35:35 -0700258 next_mir = SpecialIdentity(mir);
259 break;
260 default:
261 return;
262 }
263 if (next_mir != NULL) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700264 current_dalvik_offset_ = next_mir->offset;
Vladimir Marko5816ed42013-11-27 17:04:20 +0000265 if (special.opcode != kInlineOpReturnArg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700266 ArmMir2Lir::GenPrintLabel(next_mir);
267 }
268 NewLIR1(kThumbBx, rARM_LR);
269 core_spill_mask_ = 0;
270 num_core_spills_ = 0;
271 fp_spill_mask_ = 0;
272 num_fp_spills_ = 0;
273 frame_size_ = 0;
274 core_vmap_table_.clear();
275 fp_vmap_table_.clear();
276 }
277}
278
279/*
280 * The sparse table in the literal pool is an array of <key,displacement>
281 * pairs. For each set, we'll load them as a pair using ldmia.
282 * This means that the register number of the temp we use for the key
283 * must be lower than the reg for the displacement.
284 *
285 * The test loop will look something like:
286 *
287 * adr rBase, <table>
288 * ldr r_val, [rARM_SP, v_reg_off]
289 * mov r_idx, #table_size
290 * lp:
291 * ldmia rBase!, {r_key, r_disp}
292 * sub r_idx, #1
293 * cmp r_val, r_key
294 * ifeq
295 * add rARM_PC, r_disp ; This is the branch from which we compute displacement
296 * cbnz r_idx, lp
297 */
298void ArmMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700299 RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700300 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
301 if (cu_->verbose) {
302 DumpSparseSwitchTable(table);
303 }
304 // Add the table to the list - we'll process it later
305 SwitchTable *tab_rec =
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700306 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700307 tab_rec->table = table;
308 tab_rec->vaddr = current_dalvik_offset_;
buzbee0d829482013-10-11 15:24:55 -0700309 uint32_t size = table[1];
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700310 tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
buzbee0d829482013-10-11 15:24:55 -0700311 ArenaAllocator::kAllocLIR));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700312 switch_tables_.Insert(tab_rec);
313
314 // Get the switch value
315 rl_src = LoadValue(rl_src, kCoreReg);
316 int rBase = AllocTemp();
317 /* Allocate key and disp temps */
318 int r_key = AllocTemp();
319 int r_disp = AllocTemp();
320 // Make sure r_key's register number is less than r_disp's number for ldmia
321 if (r_key > r_disp) {
322 int tmp = r_disp;
323 r_disp = r_key;
324 r_key = tmp;
325 }
326 // Materialize a pointer to the switch table
buzbee0d829482013-10-11 15:24:55 -0700327 NewLIR3(kThumb2Adr, rBase, 0, WrapPointer(tab_rec));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700328 // Set up r_idx
329 int r_idx = AllocTemp();
330 LoadConstant(r_idx, size);
331 // Establish loop branch target
332 LIR* target = NewLIR0(kPseudoTargetLabel);
333 // Load next key/disp
334 NewLIR2(kThumb2LdmiaWB, rBase, (1 << r_key) | (1 << r_disp));
335 OpRegReg(kOpCmp, r_key, rl_src.low_reg);
336 // Go if match. NOTE: No instruction set switch here - must stay Thumb2
337 OpIT(kCondEq, "");
338 LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp);
339 tab_rec->anchor = switch_branch;
340 // Needs to use setflags encoding here
341 NewLIR3(kThumb2SubsRRI12, r_idx, r_idx, 1);
342 OpCondBranch(kCondNe, target);
343}
344
345
346void ArmMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700347 RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700348 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
349 if (cu_->verbose) {
350 DumpPackedSwitchTable(table);
351 }
352 // Add the table to the list - we'll process it later
353 SwitchTable *tab_rec =
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700354 static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700355 tab_rec->table = table;
356 tab_rec->vaddr = current_dalvik_offset_;
buzbee0d829482013-10-11 15:24:55 -0700357 uint32_t size = table[1];
Brian Carlstrom7940e442013-07-12 13:46:57 -0700358 tab_rec->targets =
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700359 static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), ArenaAllocator::kAllocLIR));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700360 switch_tables_.Insert(tab_rec);
361
362 // Get the switch value
363 rl_src = LoadValue(rl_src, kCoreReg);
364 int table_base = AllocTemp();
365 // Materialize a pointer to the switch table
buzbee0d829482013-10-11 15:24:55 -0700366 NewLIR3(kThumb2Adr, table_base, 0, WrapPointer(tab_rec));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700367 int low_key = s4FromSwitchData(&table[2]);
368 int keyReg;
369 // Remove the bias, if necessary
370 if (low_key == 0) {
371 keyReg = rl_src.low_reg;
372 } else {
373 keyReg = AllocTemp();
374 OpRegRegImm(kOpSub, keyReg, rl_src.low_reg, low_key);
375 }
376 // Bounds check - if < 0 or >= size continue following switch
377 OpRegImm(kOpCmp, keyReg, size-1);
378 LIR* branch_over = OpCondBranch(kCondHi, NULL);
379
380 // Load the displacement from the switch table
381 int disp_reg = AllocTemp();
382 LoadBaseIndexed(table_base, keyReg, disp_reg, 2, kWord);
383
384 // ..and go! NOTE: No instruction set switch here - must stay Thumb2
385 LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg);
386 tab_rec->anchor = switch_branch;
387
388 /* branch_over target here */
389 LIR* target = NewLIR0(kPseudoTargetLabel);
390 branch_over->target = target;
391}
392
393/*
394 * Array data table format:
395 * ushort ident = 0x0300 magic value
396 * ushort width width of each element in the table
397 * uint size number of elements in the table
398 * ubyte data[size*width] table of data values (may contain a single-byte
399 * padding at the end)
400 *
401 * Total size is 4+(width * size + 1)/2 16-bit code units.
402 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700403void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700404 const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
405 // Add the table to the list - we'll process it later
406 FillArrayData *tab_rec =
Mathieu Chartierf6c4b3b2013-08-24 16:11:37 -0700407 static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), ArenaAllocator::kAllocData));
Brian Carlstrom7940e442013-07-12 13:46:57 -0700408 tab_rec->table = table;
409 tab_rec->vaddr = current_dalvik_offset_;
410 uint16_t width = tab_rec->table[1];
411 uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
412 tab_rec->size = (size * width) + 8;
413
414 fill_array_data_.Insert(tab_rec);
415
416 // Making a call - use explicit registers
417 FlushAllRegs(); /* Everything to home location */
418 LoadValueDirectFixed(rl_src, r0);
Ian Rogers468532e2013-08-05 10:56:33 -0700419 LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData).Int32Value(),
Brian Carlstrom7940e442013-07-12 13:46:57 -0700420 rARM_LR);
421 // Materialize a pointer to the fill data image
buzbee0d829482013-10-11 15:24:55 -0700422 NewLIR3(kThumb2Adr, r1, 0, WrapPointer(tab_rec));
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000423 ClobberCallerSave();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700424 LIR* call_inst = OpReg(kOpBlx, rARM_LR);
425 MarkSafepointPC(call_inst);
426}
427
428/*
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700429 * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more
430 * details see monitor.cc.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700431 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700432void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700433 FlushAllRegs();
Brian Carlstrom7940e442013-07-12 13:46:57 -0700434 LoadValueDirectFixed(rl_src, r0); // Get obj
435 LockCallTemps(); // Prepare for explicit register usage
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700436 constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15.
437 if (kArchVariantHasGoodBranchPredictor) {
438 LIR* null_check_branch;
439 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
440 null_check_branch = nullptr; // No null check.
441 } else {
442 // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
443 null_check_branch = OpCmpImmBranch(kCondEq, r0, 0, NULL);
444 }
445 LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
446 NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
447 LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, r1, 0, NULL);
448 NewLIR4(kThumb2Strex, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
449 LIR* lock_success_branch = OpCmpImmBranch(kCondEq, r1, 0, NULL);
450
451
452 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
453 not_unlocked_branch->target = slow_path_target;
454 if (null_check_branch != nullptr) {
455 null_check_branch->target = slow_path_target;
456 }
457 // TODO: move to a slow path.
458 // Go expensive route - artLockObjectFromCode(obj);
459 LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000460 ClobberCallerSave();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700461 LIR* call_inst = OpReg(kOpBlx, rARM_LR);
462 MarkSafepointPC(call_inst);
463
464 LIR* success_target = NewLIR0(kPseudoTargetLabel);
465 lock_success_branch->target = success_target;
466 GenMemBarrier(kLoadLoad);
467 } else {
468 // Explicit null-check as slow-path is entered using an IT.
469 GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
470 LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
471 NewLIR3(kThumb2Ldrex, r1, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
472 OpRegImm(kOpCmp, r1, 0);
473 OpIT(kCondEq, "");
474 NewLIR4(kThumb2Strex/*eq*/, r1, r2, r0, mirror::Object::MonitorOffset().Int32Value() >> 2);
475 OpRegImm(kOpCmp, r1, 0);
476 OpIT(kCondNe, "T");
477 // Go expensive route - artLockObjectFromCode(self, obj);
478 LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObject).Int32Value(), rARM_LR);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000479 ClobberCallerSave();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700480 LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
481 MarkSafepointPC(call_inst);
482 GenMemBarrier(kLoadLoad);
483 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700484}
485
486/*
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700487 * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more
488 * details see monitor.cc. Note the code below doesn't use ldrex/strex as the code holds the lock
489 * and can only give away ownership if its suspended.
Brian Carlstrom7940e442013-07-12 13:46:57 -0700490 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700491void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700492 FlushAllRegs();
493 LoadValueDirectFixed(rl_src, r0); // Get obj
494 LockCallTemps(); // Prepare for explicit register usage
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700495 LIR* null_check_branch;
Brian Carlstrom7940e442013-07-12 13:46:57 -0700496 LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700497 constexpr bool kArchVariantHasGoodBranchPredictor = false; // TODO: true if cortex-A15.
498 if (kArchVariantHasGoodBranchPredictor) {
499 if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
500 null_check_branch = nullptr; // No null check.
501 } else {
502 // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
503 null_check_branch = OpCmpImmBranch(kCondEq, r0, 0, NULL);
504 }
505 LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1);
506 LoadConstantNoClobber(r3, 0);
507 LIR* slow_unlock_branch = OpCmpBranch(kCondNe, r1, r2, NULL);
508 StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
509 LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
510
511 LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
512 slow_unlock_branch->target = slow_path_target;
513 if (null_check_branch != nullptr) {
514 null_check_branch->target = slow_path_target;
515 }
516 // TODO: move to a slow path.
517 // Go expensive route - artUnlockObjectFromCode(obj);
518 LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000519 ClobberCallerSave();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700520 LIR* call_inst = OpReg(kOpBlx, rARM_LR);
521 MarkSafepointPC(call_inst);
522
523 LIR* success_target = NewLIR0(kPseudoTargetLabel);
524 unlock_success_branch->target = success_target;
525 GenMemBarrier(kStoreLoad);
526 } else {
527 // Explicit null-check as slow-path is entered using an IT.
528 GenNullCheck(rl_src.s_reg_low, r0, opt_flags);
529 LoadWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r1); // Get lock
530 LoadWordDisp(rARM_SELF, Thread::ThinLockIdOffset().Int32Value(), r2);
531 LoadConstantNoClobber(r3, 0);
532 // Is lock unheld on lock or held by us (==thread_id) on unlock?
533 OpRegReg(kOpCmp, r1, r2);
534 OpIT(kCondEq, "EE");
535 StoreWordDisp/*eq*/(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
536 // Go expensive route - UnlockObjectFromCode(obj);
537 LoadWordDisp/*ne*/(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObject).Int32Value(), rARM_LR);
Vladimir Marko31c2aac2013-12-09 16:31:19 +0000538 ClobberCallerSave();
Ian Rogersd9c4fc92013-10-01 19:45:43 -0700539 LIR* call_inst = OpReg(kOpBlx/*ne*/, rARM_LR);
540 MarkSafepointPC(call_inst);
541 GenMemBarrier(kStoreLoad);
542 }
Brian Carlstrom7940e442013-07-12 13:46:57 -0700543}
544
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700545void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700546 int ex_offset = Thread::ExceptionOffset().Int32Value();
547 RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
548 int reset_reg = AllocTemp();
549 LoadWordDisp(rARM_SELF, ex_offset, rl_result.low_reg);
550 LoadConstant(reset_reg, 0);
551 StoreWordDisp(rARM_SELF, ex_offset, reset_reg);
552 FreeTemp(reset_reg);
553 StoreValue(rl_dest, rl_result);
554}
555
556/*
557 * Mark garbage collection card. Skip if the value we're storing is null.
558 */
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700559void ArmMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700560 int reg_card_base = AllocTemp();
561 int reg_card_no = AllocTemp();
562 LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
563 LoadWordDisp(rARM_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
564 OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
565 StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
566 kUnsignedByte);
567 LIR* target = NewLIR0(kPseudoTargetLabel);
568 branch_over->target = target;
569 FreeTemp(reg_card_base);
570 FreeTemp(reg_card_no);
571}
572
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700573void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700574 int spill_count = num_core_spills_ + num_fp_spills_;
575 /*
576 * On entry, r0, r1, r2 & r3 are live. Let the register allocation
577 * mechanism know so it doesn't try to use any of them when
578 * expanding the frame or flushing. This leaves the utility
579 * code with a single temp: r12. This should be enough.
580 */
581 LockTemp(r0);
582 LockTemp(r1);
583 LockTemp(r2);
584 LockTemp(r3);
585
586 /*
587 * We can safely skip the stack overflow check if we're
588 * a leaf *and* our frame size < fudge factor.
589 */
590 bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
591 (static_cast<size_t>(frame_size_) <
592 Thread::kStackOverflowReservedBytes));
593 NewLIR0(kPseudoMethodEntry);
594 if (!skip_overflow_check) {
595 /* Load stack limit */
596 LoadWordDisp(rARM_SELF, Thread::StackEndOffset().Int32Value(), r12);
597 }
598 /* Spill core callee saves */
599 NewLIR1(kThumb2Push, core_spill_mask_);
600 /* Need to spill any FP regs? */
601 if (num_fp_spills_) {
602 /*
603 * NOTE: fp spills are a little different from core spills in that
604 * they are pushed as a contiguous block. When promoting from
605 * the fp set, we must allocate all singles from s16..highest-promoted
606 */
607 NewLIR1(kThumb2VPushCS, num_fp_spills_);
608 }
609 if (!skip_overflow_check) {
610 OpRegRegImm(kOpSub, rARM_LR, rARM_SP, frame_size_ - (spill_count * 4));
611 GenRegRegCheck(kCondCc, rARM_LR, r12, kThrowStackOverflow);
612 OpRegCopy(rARM_SP, rARM_LR); // Establish stack
613 } else {
614 OpRegImm(kOpSub, rARM_SP, frame_size_ - (spill_count * 4));
615 }
616
617 FlushIns(ArgLocs, rl_method);
618
619 FreeTemp(r0);
620 FreeTemp(r1);
621 FreeTemp(r2);
622 FreeTemp(r3);
623}
624
Brian Carlstrom2ce745c2013-07-17 17:44:30 -0700625void ArmMir2Lir::GenExitSequence() {
Brian Carlstrom7940e442013-07-12 13:46:57 -0700626 int spill_count = num_core_spills_ + num_fp_spills_;
627 /*
628 * In the exit path, r0/r1 are live - make sure they aren't
629 * allocated by the register utilities as temps.
630 */
631 LockTemp(r0);
632 LockTemp(r1);
633
634 NewLIR0(kPseudoMethodExit);
635 OpRegImm(kOpAdd, rARM_SP, frame_size_ - (spill_count * 4));
636 /* Need to restore any FP callee saves? */
637 if (num_fp_spills_) {
638 NewLIR1(kThumb2VPopCS, num_fp_spills_);
639 }
640 if (core_spill_mask_ & (1 << rARM_LR)) {
641 /* Unspill rARM_LR to rARM_PC */
642 core_spill_mask_ &= ~(1 << rARM_LR);
643 core_spill_mask_ |= (1 << rARM_PC);
644 }
645 NewLIR1(kThumb2Pop, core_spill_mask_);
646 if (!(core_spill_mask_ & (1 << rARM_PC))) {
647 /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
648 NewLIR1(kThumbBx, rARM_LR);
649 }
650}
651
652} // namespace art