blob: 8acf015d9c1861dd687654a367cf2b0f76632c7d [file] [log] [blame]
buzbee7520ee72010-09-17 16:01:49 -07001/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "Dalvik.h"
18#include "compiler/CompilerInternals.h"
19
20#ifndef _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H
21#define _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H
22
23/*
24 * For both JIT & interpreter:
25 * esi is Dalvik FP
26 * ebp is native FP
27 * esp is native SP
28 *
29 * For interpreter:
buzbeedfd1bbf2010-09-22 16:19:28 -070030 * edi is Dalvik PC (rPC)
buzbee7520ee72010-09-17 16:01:49 -070031 * ebx is rINST
32 *
33 * For JIT:
34 * eax, edx, ecx are scratch & caller-save
35 * ebx, edi are scratch & callee-save
36 *
37 * Calling conventions:
38 * 32-bit return in eax
39 * 64-bit return in edx:eax
40 * fp on top of fp stack st(0)
41 * Parameters passed on stack, pushed left to right
42 * On entry to target, first parm is at 4(%esp).
43 * For performance, we'll maintain 16-byte stack alignment
44 *
45 * When transitioning from code cache to interp:
46 * materialize Dalvik PC of target in rPC/%edx
47 * Preload rINST/%ebx such that high 24 bits are zero and
48 * bl contains the non-opcode 8-bits of the 16-bit Dalvik
49 * instruction at (rPC)
50 */
51
52/* Keys for target-specific scheduling and other optimizations here */
53typedef enum X86TargetOptHints {
54 kMaxHoistDistance,
55} X86TargetOptHints;
56
57 /*
58 * Data structure tracking the mapping between a Dalvik register (pair) and a
59 * native register (pair). The idea is to reuse the previously loaded value
60 * if possible, otherwise to keep the value in a native register as long as
61 * possible.
62 */
63typedef struct RegisterInfo {
64 int reg; // Reg number
65 bool inUse; // Has it been allocated?
66 bool pair; // Part of a register pair?
67 int partner; // If pair, other reg of pair
68 bool live; // Is there an associated SSA name?
69 bool dirty; // If live, is it dirty?
70 int sReg; // Name of live value
71 struct LIR *defStart; // Starting inst in last def sequence
72 struct LIR *defEnd; // Ending inst in last def sequence
73} RegisterInfo;
74
75typedef struct RegisterPool {
76 BitVector *nullCheckedRegs; // Track which registers have been null-checked
77 int numCoreTemps;
78 RegisterInfo *coreTemps;
79 int nextCoreTemp;
80 int numFPTemps;
81 RegisterInfo *FPTemps;
82 int nextFPTemp;
83 int numCoreRegs;
84 RegisterInfo *coreRegs;
buzbeedfd1bbf2010-09-22 16:19:28 -070085 int numMMRegs;
86 RegisterInfo *MMRegs;
buzbee7520ee72010-09-17 16:01:49 -070087} RegisterPool;
88
89typedef enum OpSize {
90 kWord,
91 kLong,
92 kSingle,
93 kDouble,
94 kUnsignedHalf,
95 kSignedHalf,
96 kUnsignedByte,
97 kSignedByte,
98} OpSize;
99
100typedef enum OpKind {
101 kOpMov,
buzbee7520ee72010-09-17 16:01:49 -0700102 kOpCmp,
103 kOpLsl,
104 kOpLsr,
105 kOpAsr,
106 kOpRor,
107 kOpNot,
108 kOpAnd,
109 kOpOr,
110 kOpXor,
111 kOpNeg,
112 kOpAdd,
113 kOpAdc,
114 kOpSub,
115 kOpSbc,
buzbee7520ee72010-09-17 16:01:49 -0700116 kOpMul,
117 kOpDiv,
118 kOpRem,
buzbee7520ee72010-09-17 16:01:49 -0700119 kOpTst,
buzbeedfd1bbf2010-09-22 16:19:28 -0700120 kOpCall,
buzbee7520ee72010-09-17 16:01:49 -0700121 kOpPush,
122 kOpPop,
123 kOp2Char,
124 kOp2Short,
125 kOp2Byte,
126 kOpCondBr,
127 kOpUncondBr,
128} OpKind;
129
buzbeedfd1bbf2010-09-22 16:19:28 -0700130#define FP_REG_OFFSET 8
131
132typedef enum NativeRegisterPool {
133 rEAX = 0,
134 rECX = 1,
135 rEDX = 2,
136 rEBX = 3,
137 rESP = 4,
138 rEBP = 5,
139 rESI = 6,
140 rEDI = 7,
141 rXMM0 = 0 + FP_REG_OFFSET,
142 rXMM1 = 1 + FP_REG_OFFSET,
143 rXMM2 = 2 + FP_REG_OFFSET,
144 rXMM3 = 3 + FP_REG_OFFSET,
145 rXMM4 = 4 + FP_REG_OFFSET,
146 rXMM5 = 5 + FP_REG_OFFSET,
147 rXMM6 = 6 + FP_REG_OFFSET,
148 rXMM7 = 7 + FP_REG_OFFSET,
149} NativeRegisterPool;
150
151#define rPC rEDI
152#define rFP rESI
153#define rINST rEBX
154
155#define OUT_ARG0 0
156#define OUT_ARG1 4
157#define OUT_ARG2 8
158#define OUT_ARG3 12
159#define OUT_ARG4 16
160
buzbee7520ee72010-09-17 16:01:49 -0700161typedef struct X86LIR {
162 LIR generic;
Dan Bornstein9a1f8162010-12-01 17:02:26 -0800163 //X86Opcode opcode;
buzbee7520ee72010-09-17 16:01:49 -0700164 int operands[4]; // [0..3] = [dest, src1, src2, extra]
165 bool isNop; // LIR is optimized away
166 bool branchInsertSV;// mark for insertion of branch before this instruction,
167 // used to identify mem ops for self verification mode
168 int age; // default is 0, set lazily by the optimizer
169 int aliasInfo; // For Dalvik register access & litpool disambiguation
170 u8 useMask; // Resource mask for use
171 u8 defMask; // Resource mask for def
172} X86LIR;
173
174/* Utility macros to traverse the LIR/X86LIR list */
175#define NEXT_LIR(lir) ((X86LIR *) lir->generic.next)
176#define PREV_LIR(lir) ((X86LIR *) lir->generic.prev)
177
178#define NEXT_LIR_LVALUE(lir) (lir)->generic.next
179#define PREV_LIR_LVALUE(lir) (lir)->generic.prev
180
181#define CHAIN_CELL_OFFSET_TAG 0xcdab
182
183#define CHAIN_CELL_NORMAL_SIZE 12
184#define CHAIN_CELL_PREDICTED_SIZE 16
185
186#endif /* _DALVIK_VM_COMPILER_CODEGEN_X86_X86LIR_H */