blob: ef333480e38871e9d66a8ec051b9b12a25af2cb9 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
6#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// ARM64-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14#define TARGET_ARCH_OPCODE_LIST(V) \
15 V(Arm64Add) \
16 V(Arm64Add32) \
17 V(Arm64And) \
18 V(Arm64And32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040019 V(Arm64Bic) \
20 V(Arm64Bic32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000021 V(Arm64Clz) \
22 V(Arm64Clz32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023 V(Arm64Cmp) \
24 V(Arm64Cmp32) \
25 V(Arm64Cmn) \
26 V(Arm64Cmn32) \
27 V(Arm64Tst) \
28 V(Arm64Tst32) \
29 V(Arm64Or) \
30 V(Arm64Or32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040031 V(Arm64Orn) \
32 V(Arm64Orn32) \
33 V(Arm64Eor) \
34 V(Arm64Eor32) \
35 V(Arm64Eon) \
36 V(Arm64Eon32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037 V(Arm64Sub) \
38 V(Arm64Sub32) \
39 V(Arm64Mul) \
40 V(Arm64Mul32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040041 V(Arm64Smull) \
42 V(Arm64Umull) \
43 V(Arm64Madd) \
44 V(Arm64Madd32) \
45 V(Arm64Msub) \
46 V(Arm64Msub32) \
47 V(Arm64Mneg) \
48 V(Arm64Mneg32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000049 V(Arm64Idiv) \
50 V(Arm64Idiv32) \
51 V(Arm64Udiv) \
52 V(Arm64Udiv32) \
53 V(Arm64Imod) \
54 V(Arm64Imod32) \
55 V(Arm64Umod) \
56 V(Arm64Umod32) \
57 V(Arm64Not) \
58 V(Arm64Not32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040059 V(Arm64Lsl) \
60 V(Arm64Lsl32) \
61 V(Arm64Lsr) \
62 V(Arm64Lsr32) \
63 V(Arm64Asr) \
64 V(Arm64Asr32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000065 V(Arm64Ror) \
66 V(Arm64Ror32) \
67 V(Arm64Mov32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040068 V(Arm64Sxtb32) \
69 V(Arm64Sxth32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000070 V(Arm64Sxtw) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000071 V(Arm64Sbfx32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040072 V(Arm64Ubfx) \
73 V(Arm64Ubfx32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000074 V(Arm64Ubfiz32) \
75 V(Arm64Bfi) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040076 V(Arm64TestAndBranch32) \
77 V(Arm64TestAndBranch) \
78 V(Arm64CompareAndBranch32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000079 V(Arm64ClaimForCallArguments) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000080 V(Arm64Poke) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081 V(Arm64PokePair) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000082 V(Arm64Float32Cmp) \
83 V(Arm64Float32Add) \
84 V(Arm64Float32Sub) \
85 V(Arm64Float32Mul) \
86 V(Arm64Float32Div) \
87 V(Arm64Float32Max) \
88 V(Arm64Float32Min) \
89 V(Arm64Float32Abs) \
90 V(Arm64Float32Sqrt) \
91 V(Arm64Float32RoundDown) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 V(Arm64Float64Cmp) \
93 V(Arm64Float64Add) \
94 V(Arm64Float64Sub) \
95 V(Arm64Float64Mul) \
96 V(Arm64Float64Div) \
97 V(Arm64Float64Mod) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000098 V(Arm64Float64Max) \
99 V(Arm64Float64Min) \
100 V(Arm64Float64Abs) \
101 V(Arm64Float64Neg) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000102 V(Arm64Float64Sqrt) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000103 V(Arm64Float64RoundDown) \
104 V(Arm64Float32RoundUp) \
105 V(Arm64Float64RoundUp) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400106 V(Arm64Float64RoundTiesAway) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000107 V(Arm64Float32RoundTruncate) \
108 V(Arm64Float64RoundTruncate) \
109 V(Arm64Float32RoundTiesEven) \
110 V(Arm64Float64RoundTiesEven) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400111 V(Arm64Float32ToFloat64) \
112 V(Arm64Float64ToFloat32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000113 V(Arm64Float64ToInt32) \
114 V(Arm64Float64ToUint32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000115 V(Arm64Float32ToInt64) \
116 V(Arm64Float64ToInt64) \
117 V(Arm64Float32ToUint64) \
118 V(Arm64Float64ToUint64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000119 V(Arm64Int32ToFloat64) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000120 V(Arm64Int64ToFloat32) \
121 V(Arm64Int64ToFloat64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000122 V(Arm64Uint32ToFloat64) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000123 V(Arm64Uint64ToFloat32) \
124 V(Arm64Uint64ToFloat64) \
125 V(Arm64Float64ExtractLowWord32) \
126 V(Arm64Float64ExtractHighWord32) \
127 V(Arm64Float64InsertLowWord32) \
128 V(Arm64Float64InsertHighWord32) \
129 V(Arm64Float64MoveU64) \
130 V(Arm64U64MoveFloat64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000131 V(Arm64LdrS) \
132 V(Arm64StrS) \
133 V(Arm64LdrD) \
134 V(Arm64StrD) \
135 V(Arm64Ldrb) \
136 V(Arm64Ldrsb) \
137 V(Arm64Strb) \
138 V(Arm64Ldrh) \
139 V(Arm64Ldrsh) \
140 V(Arm64Strh) \
141 V(Arm64LdrW) \
142 V(Arm64StrW) \
143 V(Arm64Ldr) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000144 V(Arm64Str)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000145
146
147// Addressing modes represent the "shape" of inputs to an instruction.
148// Many instructions support multiple addressing modes. Addressing modes
149// are encoded into the InstructionCode of the instruction and tell the
150// code generator after register allocation which assembler method to call.
151//
152// We use the following local notation for addressing modes:
153//
154// R = register
155// O = register or stack slot
156// D = double register
157// I = immediate (handle, external, int32)
158// MRI = [register + immediate]
159// MRR = [register + register]
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000160#define TARGET_ADDRESSING_MODE_LIST(V) \
161 V(MRI) /* [%r0 + K] */ \
162 V(MRR) /* [%r0 + %r1] */ \
163 V(Operand2_R_LSL_I) /* %r0 LSL K */ \
164 V(Operand2_R_LSR_I) /* %r0 LSR K */ \
165 V(Operand2_R_ASR_I) /* %r0 ASR K */ \
166 V(Operand2_R_ROR_I) /* %r0 ROR K */ \
167 V(Operand2_R_UXTB) /* %r0 UXTB (unsigned extend byte) */ \
168 V(Operand2_R_UXTH) /* %r0 UXTH (unsigned extend halfword) */ \
169 V(Operand2_R_SXTB) /* %r0 SXTB (signed extend byte) */ \
170 V(Operand2_R_SXTH) /* %r0 SXTH (signed extend halfword) */
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000171
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000172} // namespace compiler
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000173} // namespace internal
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000174} // namespace v8
175
176#endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_