blob: 0c19debad72dbbb4357f88c0e95ae7c0a131224f [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
6#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// ARM-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14#define TARGET_ARCH_OPCODE_LIST(V) \
15 V(ArmAdd) \
16 V(ArmAnd) \
17 V(ArmBic) \
Ben Murdoch014dc512016-03-22 12:00:34 +000018 V(ArmClz) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000019 V(ArmCmp) \
20 V(ArmCmn) \
21 V(ArmTst) \
22 V(ArmTeq) \
23 V(ArmOrr) \
24 V(ArmEor) \
25 V(ArmSub) \
26 V(ArmRsb) \
27 V(ArmMul) \
28 V(ArmMla) \
29 V(ArmMls) \
Ben Murdochf91f0612016-11-29 16:50:11 +000030 V(ArmSmull) \
Emily Bernier958fae72015-03-24 16:35:39 -040031 V(ArmSmmul) \
32 V(ArmSmmla) \
33 V(ArmUmull) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000034 V(ArmSdiv) \
35 V(ArmUdiv) \
36 V(ArmMov) \
37 V(ArmMvn) \
38 V(ArmBfc) \
39 V(ArmUbfx) \
Ben Murdoch109988c2016-05-18 11:27:45 +010040 V(ArmSbfx) \
Emily Bernier958fae72015-03-24 16:35:39 -040041 V(ArmSxtb) \
42 V(ArmSxth) \
43 V(ArmSxtab) \
44 V(ArmSxtah) \
45 V(ArmUxtb) \
46 V(ArmUxth) \
47 V(ArmUxtab) \
Ben Murdoch109988c2016-05-18 11:27:45 +010048 V(ArmRbit) \
Emily Bernier958fae72015-03-24 16:35:39 -040049 V(ArmUxtah) \
Ben Murdoch3b9bc312016-06-02 14:46:10 +010050 V(ArmAddPair) \
51 V(ArmSubPair) \
52 V(ArmMulPair) \
53 V(ArmLslPair) \
54 V(ArmLsrPair) \
55 V(ArmAsrPair) \
Ben Murdoch014dc512016-03-22 12:00:34 +000056 V(ArmVcmpF32) \
57 V(ArmVaddF32) \
58 V(ArmVsubF32) \
59 V(ArmVmulF32) \
60 V(ArmVmlaF32) \
61 V(ArmVmlsF32) \
62 V(ArmVdivF32) \
63 V(ArmVabsF32) \
64 V(ArmVnegF32) \
65 V(ArmVsqrtF32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000066 V(ArmVcmpF64) \
67 V(ArmVaddF64) \
68 V(ArmVsubF64) \
69 V(ArmVmulF64) \
70 V(ArmVmlaF64) \
71 V(ArmVmlsF64) \
72 V(ArmVdivF64) \
73 V(ArmVmodF64) \
Ben Murdoch014dc512016-03-22 12:00:34 +000074 V(ArmVabsF64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000075 V(ArmVnegF64) \
76 V(ArmVsqrtF64) \
Ben Murdoch014dc512016-03-22 12:00:34 +000077 V(ArmVrintmF32) \
78 V(ArmVrintmF64) \
79 V(ArmVrintpF32) \
80 V(ArmVrintpF64) \
81 V(ArmVrintzF32) \
82 V(ArmVrintzF64) \
83 V(ArmVrintaF64) \
84 V(ArmVrintnF32) \
85 V(ArmVrintnF64) \
Emily Bernier958fae72015-03-24 16:35:39 -040086 V(ArmVcvtF32F64) \
87 V(ArmVcvtF64F32) \
Ben Murdoch109988c2016-05-18 11:27:45 +010088 V(ArmVcvtF32S32) \
89 V(ArmVcvtF32U32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000090 V(ArmVcvtF64S32) \
91 V(ArmVcvtF64U32) \
Ben Murdoch109988c2016-05-18 11:27:45 +010092 V(ArmVcvtS32F32) \
93 V(ArmVcvtU32F32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000094 V(ArmVcvtS32F64) \
95 V(ArmVcvtU32F64) \
Ben Murdoch13e2dad2016-09-16 13:49:30 +010096 V(ArmVmovU32F32) \
97 V(ArmVmovF32U32) \
Ben Murdoch014dc512016-03-22 12:00:34 +000098 V(ArmVmovLowU32F64) \
99 V(ArmVmovLowF64U32) \
100 V(ArmVmovHighU32F64) \
101 V(ArmVmovHighF64U32) \
102 V(ArmVmovF64U32U32) \
Ben Murdochf91f0612016-11-29 16:50:11 +0000103 V(ArmVmovU32U32F64) \
Emily Bernier958fae72015-03-24 16:35:39 -0400104 V(ArmVldrF32) \
105 V(ArmVstrF32) \
106 V(ArmVldrF64) \
107 V(ArmVstrF64) \
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100108 V(ArmFloat32Max) \
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100109 V(ArmFloat64Max) \
Ben Murdochf91f0612016-11-29 16:50:11 +0000110 V(ArmFloat32Min) \
Ben Murdochbcf72ee2016-08-08 18:44:38 +0100111 V(ArmFloat64Min) \
Ben Murdoch13e2dad2016-09-16 13:49:30 +0100112 V(ArmFloat64SilenceNaN) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 V(ArmLdrb) \
114 V(ArmLdrsb) \
115 V(ArmStrb) \
116 V(ArmLdrh) \
117 V(ArmLdrsh) \
118 V(ArmStrh) \
119 V(ArmLdr) \
120 V(ArmStr) \
121 V(ArmPush) \
Ben Murdoch62ed6312017-06-06 11:06:27 +0100122 V(ArmPoke) \
123 V(ArmFloat32x4Splat) \
124 V(ArmFloat32x4ExtractLane) \
125 V(ArmFloat32x4ReplaceLane) \
126 V(ArmFloat32x4FromInt32x4) \
127 V(ArmFloat32x4FromUint32x4) \
128 V(ArmFloat32x4Abs) \
129 V(ArmFloat32x4Neg) \
130 V(ArmFloat32x4Add) \
131 V(ArmFloat32x4Sub) \
132 V(ArmFloat32x4Equal) \
133 V(ArmFloat32x4NotEqual) \
134 V(ArmInt32x4Splat) \
135 V(ArmInt32x4ExtractLane) \
136 V(ArmInt32x4ReplaceLane) \
137 V(ArmInt32x4FromFloat32x4) \
138 V(ArmUint32x4FromFloat32x4) \
139 V(ArmInt32x4Neg) \
140 V(ArmInt32x4ShiftLeftByScalar) \
141 V(ArmInt32x4ShiftRightByScalar) \
142 V(ArmInt32x4Add) \
143 V(ArmInt32x4Sub) \
144 V(ArmInt32x4Mul) \
145 V(ArmInt32x4Min) \
146 V(ArmInt32x4Max) \
147 V(ArmInt32x4Equal) \
148 V(ArmInt32x4NotEqual) \
149 V(ArmInt32x4GreaterThan) \
150 V(ArmInt32x4GreaterThanOrEqual) \
151 V(ArmUint32x4ShiftRightByScalar) \
152 V(ArmUint32x4Min) \
153 V(ArmUint32x4Max) \
154 V(ArmUint32x4GreaterThan) \
155 V(ArmUint32x4GreaterThanOrEqual) \
156 V(ArmInt16x8Splat) \
157 V(ArmInt16x8ExtractLane) \
158 V(ArmInt16x8ReplaceLane) \
159 V(ArmInt16x8Neg) \
160 V(ArmInt16x8ShiftLeftByScalar) \
161 V(ArmInt16x8ShiftRightByScalar) \
162 V(ArmInt16x8Add) \
163 V(ArmInt16x8AddSaturate) \
164 V(ArmInt16x8Sub) \
165 V(ArmInt16x8SubSaturate) \
166 V(ArmInt16x8Mul) \
167 V(ArmInt16x8Min) \
168 V(ArmInt16x8Max) \
169 V(ArmInt16x8Equal) \
170 V(ArmInt16x8NotEqual) \
171 V(ArmInt16x8GreaterThan) \
172 V(ArmInt16x8GreaterThanOrEqual) \
173 V(ArmUint16x8ShiftRightByScalar) \
174 V(ArmUint16x8AddSaturate) \
175 V(ArmUint16x8SubSaturate) \
176 V(ArmUint16x8Min) \
177 V(ArmUint16x8Max) \
178 V(ArmUint16x8GreaterThan) \
179 V(ArmUint16x8GreaterThanOrEqual) \
180 V(ArmInt8x16Splat) \
181 V(ArmInt8x16ExtractLane) \
182 V(ArmInt8x16ReplaceLane) \
183 V(ArmInt8x16Neg) \
184 V(ArmInt8x16ShiftLeftByScalar) \
185 V(ArmInt8x16ShiftRightByScalar) \
186 V(ArmInt8x16Add) \
187 V(ArmInt8x16AddSaturate) \
188 V(ArmInt8x16Sub) \
189 V(ArmInt8x16SubSaturate) \
190 V(ArmInt8x16Mul) \
191 V(ArmInt8x16Min) \
192 V(ArmInt8x16Max) \
193 V(ArmInt8x16Equal) \
194 V(ArmInt8x16NotEqual) \
195 V(ArmInt8x16GreaterThan) \
196 V(ArmInt8x16GreaterThanOrEqual) \
197 V(ArmUint8x16ShiftRightByScalar) \
198 V(ArmUint8x16AddSaturate) \
199 V(ArmUint8x16SubSaturate) \
200 V(ArmUint8x16Min) \
201 V(ArmUint8x16Max) \
202 V(ArmUint8x16GreaterThan) \
203 V(ArmUint8x16GreaterThanOrEqual) \
204 V(ArmSimd128And) \
205 V(ArmSimd128Or) \
206 V(ArmSimd128Xor) \
207 V(ArmSimd128Not) \
208 V(ArmSimd32x4Select) \
209 V(ArmSimd16x8Select) \
210 V(ArmSimd8x16Select)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000211
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000212// Addressing modes represent the "shape" of inputs to an instruction.
213// Many instructions support multiple addressing modes. Addressing modes
214// are encoded into the InstructionCode of the instruction and tell the
215// code generator after register allocation which assembler method to call.
216#define TARGET_ADDRESSING_MODE_LIST(V) \
217 V(Offset_RI) /* [%r0 + K] */ \
218 V(Offset_RR) /* [%r0 + %r1] */ \
219 V(Operand2_I) /* K */ \
220 V(Operand2_R) /* %r0 */ \
221 V(Operand2_R_ASR_I) /* %r0 ASR K */ \
222 V(Operand2_R_LSL_I) /* %r0 LSL K */ \
223 V(Operand2_R_LSR_I) /* %r0 LSR K */ \
224 V(Operand2_R_ROR_I) /* %r0 ROR K */ \
225 V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
226 V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
227 V(Operand2_R_LSR_R) /* %r0 LSR %r1 */ \
228 V(Operand2_R_ROR_R) /* %r0 ROR %r1 */
229
230} // namespace compiler
231} // namespace internal
232} // namespace v8
233
234#endif // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_