blob: 6d2ddcd40de381946f515595c85dce701a5d41d4 [file] [log] [blame]
Ben Cheng5d90c202009-11-22 23:31:11 -08001/*
2 * Copyright (C) 2009 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17/*
18 * This file contains codegen and support common to all supported
19 * ARM variants. It is included by:
20 *
21 * Codegen-$(TARGET_ARCH_VARIANT).c
22 *
23 * which combines this common code with specific support found in the
24 * applicable directory below this one.
25 */
26
27#include "compiler/Loop.h"
28
29/* Array holding the entry offset of each template relative to the first one */
30static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
31
32/* Track exercised opcodes */
33static int opcodeCoverage[256];
34
Bill Buzbee1f748632010-03-02 16:14:41 -080035static void setMemRefType(ArmLIR *lir, bool isLoad, int memType)
36{
37 u8 *maskPtr;
38 u8 mask;
39 assert( EncodingMap[lir->opCode].flags & (IS_LOAD | IS_STORE));
40 if (isLoad) {
41 maskPtr = &lir->useMask;
42 mask = ENCODE_MEM_USE;
43 } else {
44 maskPtr = &lir->defMask;
45 mask = ENCODE_MEM_DEF;
46 }
47 /* Clear out the memref flags */
48 *maskPtr &= ~mask;
49 /* ..and then add back the one we need */
50 switch(memType) {
51 case kLiteral:
52 assert(isLoad);
53 *maskPtr |= (ENCODE_LITERAL | ENCODE_LITPOOL_REF);
54 break;
55 case kDalvikReg:
56 *maskPtr |= (ENCODE_DALVIK_REG | ENCODE_FRAME_REF);
57 break;
58 case kHeapRef:
59 *maskPtr |= ENCODE_HEAP_REF;
60 break;
61 default:
62 LOGE("Jit: invalid memref kind - %d", memType);
63 dvmAbort();
64 }
65}
66
Ben Cheng5d90c202009-11-22 23:31:11 -080067/*
68 * Mark load/store instructions that access Dalvik registers through rFP +
69 * offset.
70 */
71static void annotateDalvikRegAccess(ArmLIR *lir, int regId, bool isLoad)
72{
Bill Buzbee1f748632010-03-02 16:14:41 -080073 setMemRefType(lir, isLoad, kDalvikReg);
Ben Cheng5d90c202009-11-22 23:31:11 -080074
75 /*
76 * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit
77 * access.
78 */
79 lir->aliasInfo = regId;
80 if (DOUBLEREG(lir->operands[0])) {
81 lir->aliasInfo |= 0x80000000;
82 }
83}
84
85/*
86 * Decode the register id and mark the corresponding bit(s).
87 */
88static inline void setupRegMask(u8 *mask, int reg)
89{
90 u8 seed;
91 int shift;
92 int regId = reg & 0x1f;
93
94 /*
95 * Each double register is equal to a pair of single-precision FP registers
96 */
97 seed = DOUBLEREG(reg) ? 3 : 1;
98 /* FP register starts at bit position 16 */
99 shift = FPREG(reg) ? kFPReg0 : 0;
100 /* Expand the double register id into single offset */
101 shift += regId;
102 *mask |= seed << shift;
103}
104
105/*
106 * Set up the proper fields in the resource mask
107 */
108static void setupResourceMasks(ArmLIR *lir)
109{
110 int opCode = lir->opCode;
111 int flags;
112
113 if (opCode <= 0) {
114 lir->useMask = lir->defMask = 0;
115 return;
116 }
117
118 flags = EncodingMap[lir->opCode].flags;
119
120 /* Set up the mask for resources that are updated */
Bill Buzbee1f748632010-03-02 16:14:41 -0800121 if (flags & (IS_LOAD | IS_STORE)) {
122 /* Default to heap - will catch specialized classes later */
123 setMemRefType(lir, flags & IS_LOAD, kHeapRef);
124 }
125
Ben Cheng5d90c202009-11-22 23:31:11 -0800126 if (flags & IS_BRANCH) {
127 lir->defMask |= ENCODE_REG_PC;
128 lir->useMask |= ENCODE_REG_PC;
129 }
130
131 if (flags & REG_DEF0) {
132 setupRegMask(&lir->defMask, lir->operands[0]);
133 }
134
135 if (flags & REG_DEF1) {
136 setupRegMask(&lir->defMask, lir->operands[1]);
137 }
138
139 if (flags & REG_DEF_SP) {
140 lir->defMask |= ENCODE_REG_SP;
141 }
142
Bill Buzbeed867b232010-02-25 15:38:40 -0800143 if (flags & REG_DEF_LR) {
Ben Cheng5d90c202009-11-22 23:31:11 -0800144 lir->defMask |= ENCODE_REG_LR;
145 }
146
147 if (flags & REG_DEF_LIST0) {
148 lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
149 }
150
151 if (flags & REG_DEF_LIST1) {
152 lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
153 }
154
155 if (flags & SETS_CCODES) {
156 lir->defMask |= ENCODE_CCODE;
157 }
158
159 /* Conservatively treat the IT block */
160 if (flags & IS_IT) {
161 lir->defMask = ENCODE_ALL;
162 }
163
164 /* Set up the mask for resources that are used */
165 if (flags & IS_BRANCH) {
166 lir->useMask |= ENCODE_REG_PC;
167 }
168
169 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
170 int i;
171
172 for (i = 0; i < 4; i++) {
173 if (flags & (1 << (kRegUse0 + i))) {
174 setupRegMask(&lir->useMask, lir->operands[i]);
175 }
176 }
177 }
178
179 if (flags & REG_USE_PC) {
180 lir->useMask |= ENCODE_REG_PC;
181 }
182
183 if (flags & REG_USE_SP) {
184 lir->useMask |= ENCODE_REG_SP;
185 }
186
187 if (flags & REG_USE_LIST0) {
188 lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
189 }
190
191 if (flags & REG_USE_LIST1) {
192 lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
193 }
194
195 if (flags & USES_CCODES) {
196 lir->useMask |= ENCODE_CCODE;
197 }
198}
199
200/*
201 * The following are building blocks to construct low-level IRs with 0 - 4
202 * operands.
203 */
204static ArmLIR *newLIR0(CompilationUnit *cUnit, ArmOpCode opCode)
205{
206 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
207 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & NO_OPERAND));
208 insn->opCode = opCode;
209 setupResourceMasks(insn);
210 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
211 return insn;
212}
213
214static ArmLIR *newLIR1(CompilationUnit *cUnit, ArmOpCode opCode,
215 int dest)
216{
217 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
218 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & IS_UNARY_OP));
219 insn->opCode = opCode;
220 insn->operands[0] = dest;
221 setupResourceMasks(insn);
222 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
223 return insn;
224}
225
226static ArmLIR *newLIR2(CompilationUnit *cUnit, ArmOpCode opCode,
227 int dest, int src1)
228{
229 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
230 assert(isPseudoOpCode(opCode) ||
231 (EncodingMap[opCode].flags & IS_BINARY_OP));
232 insn->opCode = opCode;
233 insn->operands[0] = dest;
234 insn->operands[1] = src1;
235 setupResourceMasks(insn);
236 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
237 return insn;
238}
239
240static ArmLIR *newLIR3(CompilationUnit *cUnit, ArmOpCode opCode,
241 int dest, int src1, int src2)
242{
243 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
244 if (!(EncodingMap[opCode].flags & IS_TERTIARY_OP)) {
245 LOGE("Bad LIR3: %s[%d]",EncodingMap[opCode].name,opCode);
246 }
247 assert(isPseudoOpCode(opCode) ||
248 (EncodingMap[opCode].flags & IS_TERTIARY_OP));
249 insn->opCode = opCode;
250 insn->operands[0] = dest;
251 insn->operands[1] = src1;
252 insn->operands[2] = src2;
253 setupResourceMasks(insn);
254 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
255 return insn;
256}
257
258static ArmLIR *newLIR4(CompilationUnit *cUnit, ArmOpCode opCode,
259 int dest, int src1, int src2, int info)
260{
261 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true);
262 assert(isPseudoOpCode(opCode) ||
263 (EncodingMap[opCode].flags & IS_QUAD_OP));
264 insn->opCode = opCode;
265 insn->operands[0] = dest;
266 insn->operands[1] = src1;
267 insn->operands[2] = src2;
268 insn->operands[3] = info;
269 setupResourceMasks(insn);
270 dvmCompilerAppendLIR(cUnit, (LIR *) insn);
271 return insn;
272}
273
274/*
275 * If the next instruction is a move-result or move-result-long,
276 * return the target Dalvik sReg[s] and convert the next to a
277 * nop. Otherwise, return INVALID_SREG. Used to optimize method inlining.
278 */
279static RegLocation inlinedTarget(CompilationUnit *cUnit, MIR *mir,
280 bool fpHint)
281{
282 if (mir->next &&
283 ((mir->next->dalvikInsn.opCode == OP_MOVE_RESULT) ||
284 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_OBJECT))) {
285 mir->next->dalvikInsn.opCode = OP_NOP;
Bill Buzbeec6f10662010-02-09 11:16:15 -0800286 return dvmCompilerGetDest(cUnit, mir->next, 0);
Ben Cheng5d90c202009-11-22 23:31:11 -0800287 } else {
288 RegLocation res = LOC_DALVIK_RETURN_VAL;
289 res.fp = fpHint;
290 return res;
291 }
292}
293
294/*
295 * Search the existing constants in the literal pool for an exact or close match
296 * within specified delta (greater or equal to 0).
297 */
298static ArmLIR *scanLiteralPool(CompilationUnit *cUnit, int value,
299 unsigned int delta)
300{
301 LIR *dataTarget = cUnit->wordList;
302 while (dataTarget) {
303 if (((unsigned) (value - ((ArmLIR *) dataTarget)->operands[0])) <=
304 delta)
305 return (ArmLIR *) dataTarget;
306 dataTarget = dataTarget->next;
307 }
308 return NULL;
309}
310
311/*
312 * The following are building blocks to insert constants into the pool or
313 * instruction streams.
314 */
315
316/* Add a 32-bit constant either in the constant pool or mixed with code */
317static ArmLIR *addWordData(CompilationUnit *cUnit, int value, bool inPlace)
318{
319 /* Add the constant to the literal pool */
320 if (!inPlace) {
321 ArmLIR *newValue = dvmCompilerNew(sizeof(ArmLIR), true);
322 newValue->operands[0] = value;
323 newValue->generic.next = cUnit->wordList;
324 cUnit->wordList = (LIR *) newValue;
325 return newValue;
326 } else {
327 /* Add the constant in the middle of code stream */
328 newLIR1(cUnit, kArm16BitData, (value & 0xffff));
329 newLIR1(cUnit, kArm16BitData, (value >> 16));
330 }
331 return NULL;
332}
333
334static RegLocation inlinedTargetWide(CompilationUnit *cUnit, MIR *mir,
335 bool fpHint)
336{
337 if (mir->next &&
338 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_WIDE)) {
339 mir->next->dalvikInsn.opCode = OP_NOP;
Bill Buzbeec6f10662010-02-09 11:16:15 -0800340 return dvmCompilerGetDestWide(cUnit, mir->next, 0, 1);
Ben Cheng5d90c202009-11-22 23:31:11 -0800341 } else {
342 RegLocation res = LOC_DALVIK_RETURN_VAL_WIDE;
343 res.fp = fpHint;
344 return res;
345 }
346}
347
348
349/*
350 * Generate an kArmPseudoBarrier marker to indicate the boundary of special
351 * blocks.
352 */
353static void genBarrier(CompilationUnit *cUnit)
354{
355 ArmLIR *barrier = newLIR0(cUnit, kArmPseudoBarrier);
356 /* Mark all resources as being clobbered */
357 barrier->defMask = -1;
358}
359
360/* Create the PC reconstruction slot if not already done */
361extern ArmLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset,
362 ArmLIR *branch,
363 ArmLIR *pcrLabel)
364{
Bill Buzbee1f5cd6f2010-01-11 21:44:36 -0800365 /* Forget all def info (because we might rollback here. Bug #2367397 */
Bill Buzbeec6f10662010-02-09 11:16:15 -0800366 dvmCompilerResetDefTracking(cUnit);
Bill Buzbee1f5cd6f2010-01-11 21:44:36 -0800367
Ben Cheng5d90c202009-11-22 23:31:11 -0800368 /* Set up the place holder to reconstruct this Dalvik PC */
369 if (pcrLabel == NULL) {
370 int dPC = (int) (cUnit->method->insns + dOffset);
371 pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true);
372 pcrLabel->opCode = ARM_PSEUDO_kPCReconstruction_CELL;
373 pcrLabel->operands[0] = dPC;
374 pcrLabel->operands[1] = dOffset;
375 /* Insert the place holder to the growable list */
376 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel);
377 }
378 /* Branch to the PC reconstruction code */
379 branch->generic.target = (LIR *) pcrLabel;
380 return pcrLabel;
381}