blob: 2b5fe331bc8546bca10292bd8d2ae717d5e8b9bd [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
6#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// ARM64-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14#define TARGET_ARCH_OPCODE_LIST(V) \
15 V(Arm64Add) \
16 V(Arm64Add32) \
17 V(Arm64And) \
18 V(Arm64And32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040019 V(Arm64Bic) \
20 V(Arm64Bic32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000021 V(Arm64Clz) \
22 V(Arm64Clz32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023 V(Arm64Cmp) \
24 V(Arm64Cmp32) \
25 V(Arm64Cmn) \
26 V(Arm64Cmn32) \
27 V(Arm64Tst) \
28 V(Arm64Tst32) \
29 V(Arm64Or) \
30 V(Arm64Or32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040031 V(Arm64Orn) \
32 V(Arm64Orn32) \
33 V(Arm64Eor) \
34 V(Arm64Eor32) \
35 V(Arm64Eon) \
36 V(Arm64Eon32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037 V(Arm64Sub) \
38 V(Arm64Sub32) \
39 V(Arm64Mul) \
40 V(Arm64Mul32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040041 V(Arm64Smull) \
42 V(Arm64Umull) \
43 V(Arm64Madd) \
44 V(Arm64Madd32) \
45 V(Arm64Msub) \
46 V(Arm64Msub32) \
47 V(Arm64Mneg) \
48 V(Arm64Mneg32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000049 V(Arm64Idiv) \
50 V(Arm64Idiv32) \
51 V(Arm64Udiv) \
52 V(Arm64Udiv32) \
53 V(Arm64Imod) \
54 V(Arm64Imod32) \
55 V(Arm64Umod) \
56 V(Arm64Umod32) \
57 V(Arm64Not) \
58 V(Arm64Not32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040059 V(Arm64Lsl) \
60 V(Arm64Lsl32) \
61 V(Arm64Lsr) \
62 V(Arm64Lsr32) \
63 V(Arm64Asr) \
64 V(Arm64Asr32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000065 V(Arm64Ror) \
66 V(Arm64Ror32) \
67 V(Arm64Mov32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040068 V(Arm64Sxtb32) \
69 V(Arm64Sxth32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070 V(Arm64Sxtw) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000071 V(Arm64Sbfx32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040072 V(Arm64Ubfx) \
73 V(Arm64Ubfx32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 V(Arm64Ubfiz32) \
75 V(Arm64Bfi) \
Ben Murdoch097c5b22016-05-18 11:27:45 +010076 V(Arm64Rbit) \
77 V(Arm64Rbit32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040078 V(Arm64TestAndBranch32) \
79 V(Arm64TestAndBranch) \
80 V(Arm64CompareAndBranch32) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010081 V(Arm64CompareAndBranch) \
Ben Murdoch097c5b22016-05-18 11:27:45 +010082 V(Arm64ClaimCSP) \
83 V(Arm64ClaimJSSP) \
84 V(Arm64PokeCSP) \
85 V(Arm64PokeJSSP) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000086 V(Arm64PokePair) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000087 V(Arm64Float32Cmp) \
88 V(Arm64Float32Add) \
89 V(Arm64Float32Sub) \
90 V(Arm64Float32Mul) \
91 V(Arm64Float32Div) \
92 V(Arm64Float32Max) \
93 V(Arm64Float32Min) \
94 V(Arm64Float32Abs) \
Ben Murdoch61f157c2016-09-16 13:49:30 +010095 V(Arm64Float32Neg) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000096 V(Arm64Float32Sqrt) \
97 V(Arm64Float32RoundDown) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098 V(Arm64Float64Cmp) \
99 V(Arm64Float64Add) \
100 V(Arm64Float64Sub) \
101 V(Arm64Float64Mul) \
102 V(Arm64Float64Div) \
103 V(Arm64Float64Mod) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000104 V(Arm64Float64Max) \
105 V(Arm64Float64Min) \
106 V(Arm64Float64Abs) \
107 V(Arm64Float64Neg) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000108 V(Arm64Float64Sqrt) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000109 V(Arm64Float64RoundDown) \
110 V(Arm64Float32RoundUp) \
111 V(Arm64Float64RoundUp) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400112 V(Arm64Float64RoundTiesAway) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000113 V(Arm64Float32RoundTruncate) \
114 V(Arm64Float64RoundTruncate) \
115 V(Arm64Float32RoundTiesEven) \
116 V(Arm64Float64RoundTiesEven) \
Ben Murdoch61f157c2016-09-16 13:49:30 +0100117 V(Arm64Float64SilenceNaN) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400118 V(Arm64Float32ToFloat64) \
119 V(Arm64Float64ToFloat32) \
Ben Murdoch097c5b22016-05-18 11:27:45 +0100120 V(Arm64Float32ToInt32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000121 V(Arm64Float64ToInt32) \
Ben Murdoch097c5b22016-05-18 11:27:45 +0100122 V(Arm64Float32ToUint32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000123 V(Arm64Float64ToUint32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000124 V(Arm64Float32ToInt64) \
125 V(Arm64Float64ToInt64) \
126 V(Arm64Float32ToUint64) \
127 V(Arm64Float64ToUint64) \
Ben Murdoch097c5b22016-05-18 11:27:45 +0100128 V(Arm64Int32ToFloat32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000129 V(Arm64Int32ToFloat64) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000130 V(Arm64Int64ToFloat32) \
131 V(Arm64Int64ToFloat64) \
Ben Murdoch097c5b22016-05-18 11:27:45 +0100132 V(Arm64Uint32ToFloat32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000133 V(Arm64Uint32ToFloat64) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000134 V(Arm64Uint64ToFloat32) \
135 V(Arm64Uint64ToFloat64) \
136 V(Arm64Float64ExtractLowWord32) \
137 V(Arm64Float64ExtractHighWord32) \
138 V(Arm64Float64InsertLowWord32) \
139 V(Arm64Float64InsertHighWord32) \
140 V(Arm64Float64MoveU64) \
141 V(Arm64U64MoveFloat64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000142 V(Arm64LdrS) \
143 V(Arm64StrS) \
144 V(Arm64LdrD) \
145 V(Arm64StrD) \
146 V(Arm64Ldrb) \
147 V(Arm64Ldrsb) \
148 V(Arm64Strb) \
149 V(Arm64Ldrh) \
150 V(Arm64Ldrsh) \
151 V(Arm64Strh) \
152 V(Arm64LdrW) \
153 V(Arm64StrW) \
154 V(Arm64Ldr) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000155 V(Arm64Str)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000156
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000157// Addressing modes represent the "shape" of inputs to an instruction.
158// Many instructions support multiple addressing modes. Addressing modes
159// are encoded into the InstructionCode of the instruction and tell the
160// code generator after register allocation which assembler method to call.
161//
162// We use the following local notation for addressing modes:
163//
164// R = register
165// O = register or stack slot
166// D = double register
167// I = immediate (handle, external, int32)
168// MRI = [register + immediate]
169// MRR = [register + register]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000170#define TARGET_ADDRESSING_MODE_LIST(V) \
171 V(MRI) /* [%r0 + K] */ \
172 V(MRR) /* [%r0 + %r1] */ \
173 V(Operand2_R_LSL_I) /* %r0 LSL K */ \
174 V(Operand2_R_LSR_I) /* %r0 LSR K */ \
175 V(Operand2_R_ASR_I) /* %r0 ASR K */ \
176 V(Operand2_R_ROR_I) /* %r0 ROR K */ \
177 V(Operand2_R_UXTB) /* %r0 UXTB (unsigned extend byte) */ \
178 V(Operand2_R_UXTH) /* %r0 UXTH (unsigned extend halfword) */ \
179 V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
180 V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000181
Ben Murdoch097c5b22016-05-18 11:27:45 +0100182enum ResetJSSPAfterCall { kNoResetJSSP, kResetJSSP };
183
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000184} // namespace compiler
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000185} // namespace internal
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000186} // namespace v8
187
188#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_