blob: 19f08e1dbdfe1eda0407a74e1b0e787afe1c13e7 [file] [log] [blame]
buzbee7520ee72010-09-17 16:01:49 -07001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Dalvik.h"
18#include "compiler/CompilerInternals.h"
19
20#ifndef _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H
21#define _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H
22
23/*
24 * For both JIT & interpreter:
25 * esi is Dalvik FP
26 * ebp is native FP
27 * esp is native SP
28 *
29 * For interpreter:
buzbeedfd1bbf2010-09-22 16:19:28 -070030 * edi is Dalvik PC (rPC)
buzbee7520ee72010-09-17 16:01:49 -070031 * ebx is rINST
32 *
33 * For JIT:
34 * eax, edx, ecx are scratch & caller-save
35 * ebx, edi are scratch & callee-save
36 *
37 * Calling conventions:
38 * 32-bit return in eax
39 * 64-bit return in edx:eax
40 * fp on top of fp stack st(0)
41 * Parameters passed on stack, pushed left to right
42 * On entry to target, first parm is at 4(%esp).
43 * For performance, we'll maintain 16-byte stack alignment
44 *
45 * When transitioning from code cache to interp:
46 * materialize Dalvik PC of target in rPC/%edx
47 * Preload rINST/%ebx such that high 24 bits are zero and
48 * bl contains the non-opcode 8-bits of the 16-bit Dalvik
49 * instruction at (rPC)
50 */
51
52/* Keys for target-specific scheduling and other optimizations here */
53typedef enum X86TargetOptHints {
54 kMaxHoistDistance,
55} X86TargetOptHints;
56
57 /*
58 * Data structure tracking the mapping between a Dalvik register (pair) and a
59 * native register (pair). The idea is to reuse the previously loaded value
60 * if possible, otherwise to keep the value in a native register as long as
61 * possible.
62 */
63typedef struct RegisterInfo {
64 int reg; // Reg number
65 bool inUse; // Has it been allocated?
66 bool pair; // Part of a register pair?
67 int partner; // If pair, other reg of pair
68 bool live; // Is there an associated SSA name?
69 bool dirty; // If live, is it dirty?
70 int sReg; // Name of live value
71 struct LIR *defStart; // Starting inst in last def sequence
72 struct LIR *defEnd; // Ending inst in last def sequence
73} RegisterInfo;
74
75typedef struct RegisterPool {
76 BitVector *nullCheckedRegs; // Track which registers have been null-checked
77 int numCoreTemps;
78 RegisterInfo *coreTemps;
79 int nextCoreTemp;
80 int numFPTemps;
81 RegisterInfo *FPTemps;
82 int nextFPTemp;
buzbee7520ee72010-09-17 16:01:49 -070083} RegisterPool;
84
85typedef enum OpSize {
86 kWord,
87 kLong,
88 kSingle,
89 kDouble,
90 kUnsignedHalf,
91 kSignedHalf,
92 kUnsignedByte,
93 kSignedByte,
94} OpSize;
95
96typedef enum OpKind {
97 kOpMov,
buzbee7520ee72010-09-17 16:01:49 -070098 kOpCmp,
99 kOpLsl,
100 kOpLsr,
101 kOpAsr,
102 kOpRor,
103 kOpNot,
104 kOpAnd,
105 kOpOr,
106 kOpXor,
107 kOpNeg,
108 kOpAdd,
109 kOpAdc,
110 kOpSub,
111 kOpSbc,
buzbee7520ee72010-09-17 16:01:49 -0700112 kOpMul,
113 kOpDiv,
114 kOpRem,
buzbee7520ee72010-09-17 16:01:49 -0700115 kOpTst,
buzbeedfd1bbf2010-09-22 16:19:28 -0700116 kOpCall,
buzbee7520ee72010-09-17 16:01:49 -0700117 kOpPush,
118 kOpPop,
119 kOp2Char,
120 kOp2Short,
121 kOp2Byte,
122 kOpCondBr,
123 kOpUncondBr,
124} OpKind;
125
buzbeedfd1bbf2010-09-22 16:19:28 -0700126#define FP_REG_OFFSET 8
127
128typedef enum NativeRegisterPool {
129 rEAX = 0,
130 rECX = 1,
131 rEDX = 2,
132 rEBX = 3,
133 rESP = 4,
134 rEBP = 5,
135 rESI = 6,
136 rEDI = 7,
137 rXMM0 = 0 + FP_REG_OFFSET,
138 rXMM1 = 1 + FP_REG_OFFSET,
139 rXMM2 = 2 + FP_REG_OFFSET,
140 rXMM3 = 3 + FP_REG_OFFSET,
141 rXMM4 = 4 + FP_REG_OFFSET,
142 rXMM5 = 5 + FP_REG_OFFSET,
143 rXMM6 = 6 + FP_REG_OFFSET,
144 rXMM7 = 7 + FP_REG_OFFSET,
145} NativeRegisterPool;
146
147#define rPC rEDI
148#define rFP rESI
149#define rINST rEBX
150
151#define OUT_ARG0 0
152#define OUT_ARG1 4
153#define OUT_ARG2 8
154#define OUT_ARG3 12
155#define OUT_ARG4 16
156
buzbee7520ee72010-09-17 16:01:49 -0700157typedef struct X86LIR {
158 LIR generic;
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800159 //X86Opcode opcode;
buzbee7520ee72010-09-17 16:01:49 -0700160 int operands[4]; // [0..3] = [dest, src1, src2, extra]
161 bool isNop; // LIR is optimized away
162 bool branchInsertSV;// mark for insertion of branch before this instruction,
163 // used to identify mem ops for self verification mode
164 int age; // default is 0, set lazily by the optimizer
165 int aliasInfo; // For Dalvik register access & litpool disambiguation
166 u8 useMask; // Resource mask for use
167 u8 defMask; // Resource mask for def
168} X86LIR;
169
170/* Utility macros to traverse the LIR/X86LIR list */
171#define NEXT_LIR(lir) ((X86LIR *) lir->generic.next)
172#define PREV_LIR(lir) ((X86LIR *) lir->generic.prev)
173
174#define NEXT_LIR_LVALUE(lir) (lir)->generic.next
175#define PREV_LIR_LVALUE(lir) (lir)->generic.prev
176
177#define CHAIN_CELL_OFFSET_TAG 0xcdab
178
179#define CHAIN_CELL_NORMAL_SIZE 12
180#define CHAIN_CELL_PREDICTED_SIZE 16
181
182#endif /* _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H */