blob: 29acee3bd9025e7f0eef41b7cfb07a8d1d98b884 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
6#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// X64-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14#define TARGET_ARCH_OPCODE_LIST(V) \
15 V(X64Add) \
16 V(X64Add32) \
17 V(X64And) \
18 V(X64And32) \
19 V(X64Cmp) \
20 V(X64Cmp32) \
Ben Murdochda12d292016-06-02 14:46:10 +010021 V(X64Cmp16) \
22 V(X64Cmp8) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000023 V(X64Test) \
24 V(X64Test32) \
Ben Murdochda12d292016-06-02 14:46:10 +010025 V(X64Test16) \
26 V(X64Test8) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000027 V(X64Or) \
28 V(X64Or32) \
29 V(X64Xor) \
30 V(X64Xor32) \
31 V(X64Sub) \
32 V(X64Sub32) \
33 V(X64Imul) \
34 V(X64Imul32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040035 V(X64ImulHigh32) \
36 V(X64UmulHigh32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000037 V(X64Idiv) \
38 V(X64Idiv32) \
39 V(X64Udiv) \
40 V(X64Udiv32) \
41 V(X64Not) \
42 V(X64Not32) \
43 V(X64Neg) \
44 V(X64Neg32) \
45 V(X64Shl) \
46 V(X64Shl32) \
47 V(X64Shr) \
48 V(X64Shr32) \
49 V(X64Sar) \
50 V(X64Sar32) \
51 V(X64Ror) \
52 V(X64Ror32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000053 V(X64Lzcnt) \
54 V(X64Lzcnt32) \
55 V(X64Tzcnt) \
56 V(X64Tzcnt32) \
57 V(X64Popcnt) \
58 V(X64Popcnt32) \
59 V(SSEFloat32Cmp) \
60 V(SSEFloat32Add) \
61 V(SSEFloat32Sub) \
62 V(SSEFloat32Mul) \
63 V(SSEFloat32Div) \
64 V(SSEFloat32Abs) \
65 V(SSEFloat32Neg) \
66 V(SSEFloat32Sqrt) \
67 V(SSEFloat32Max) \
68 V(SSEFloat32Min) \
69 V(SSEFloat32ToFloat64) \
Ben Murdoch097c5b22016-05-18 11:27:45 +010070 V(SSEFloat32ToInt32) \
71 V(SSEFloat32ToUint32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000072 V(SSEFloat32Round) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000073 V(SSEFloat64Cmp) \
74 V(SSEFloat64Add) \
75 V(SSEFloat64Sub) \
76 V(SSEFloat64Mul) \
77 V(SSEFloat64Div) \
78 V(SSEFloat64Mod) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000079 V(SSEFloat64Abs) \
80 V(SSEFloat64Neg) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000081 V(SSEFloat64Sqrt) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000082 V(SSEFloat64Round) \
83 V(SSEFloat64Max) \
84 V(SSEFloat64Min) \
85 V(SSEFloat64ToFloat32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000086 V(SSEFloat64ToInt32) \
87 V(SSEFloat64ToUint32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000088 V(SSEFloat32ToInt64) \
89 V(SSEFloat64ToInt64) \
90 V(SSEFloat32ToUint64) \
91 V(SSEFloat64ToUint64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000092 V(SSEInt32ToFloat64) \
Ben Murdoch097c5b22016-05-18 11:27:45 +010093 V(SSEInt32ToFloat32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000094 V(SSEInt64ToFloat32) \
95 V(SSEInt64ToFloat64) \
96 V(SSEUint64ToFloat32) \
97 V(SSEUint64ToFloat64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000098 V(SSEUint32ToFloat64) \
Ben Murdoch097c5b22016-05-18 11:27:45 +010099 V(SSEUint32ToFloat32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000100 V(SSEFloat64ExtractLowWord32) \
101 V(SSEFloat64ExtractHighWord32) \
102 V(SSEFloat64InsertLowWord32) \
103 V(SSEFloat64InsertHighWord32) \
104 V(SSEFloat64LoadLowWord32) \
Ben Murdoch61f157c2016-09-16 13:49:30 +0100105 V(SSEFloat64SilenceNaN) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000106 V(AVXFloat32Cmp) \
107 V(AVXFloat32Add) \
108 V(AVXFloat32Sub) \
109 V(AVXFloat32Mul) \
110 V(AVXFloat32Div) \
111 V(AVXFloat32Max) \
112 V(AVXFloat32Min) \
113 V(AVXFloat64Cmp) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400114 V(AVXFloat64Add) \
115 V(AVXFloat64Sub) \
116 V(AVXFloat64Mul) \
117 V(AVXFloat64Div) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000118 V(AVXFloat64Max) \
119 V(AVXFloat64Min) \
120 V(AVXFloat64Abs) \
121 V(AVXFloat64Neg) \
122 V(AVXFloat32Abs) \
123 V(AVXFloat32Neg) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000124 V(X64Movsxbl) \
125 V(X64Movzxbl) \
126 V(X64Movb) \
127 V(X64Movsxwl) \
128 V(X64Movzxwl) \
129 V(X64Movw) \
130 V(X64Movl) \
131 V(X64Movsxlq) \
132 V(X64Movq) \
133 V(X64Movsd) \
134 V(X64Movss) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000135 V(X64BitcastFI) \
136 V(X64BitcastDL) \
137 V(X64BitcastIF) \
138 V(X64BitcastLD) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400139 V(X64Lea32) \
140 V(X64Lea) \
141 V(X64Dec32) \
142 V(X64Inc32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000143 V(X64Push) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000144 V(X64Poke) \
Ben Murdochc5610432016-08-08 18:44:38 +0100145 V(X64StackCheck) \
146 V(X64Xchgb) \
147 V(X64Xchgw) \
148 V(X64Xchgl)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000149
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000150// Addressing modes represent the "shape" of inputs to an instruction.
151// Many instructions support multiple addressing modes. Addressing modes
152// are encoded into the InstructionCode of the instruction and tell the
153// code generator after register allocation which assembler method to call.
154//
155// We use the following local notation for addressing modes:
156//
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400157// M = memory operand
158// R = base register
159// N = index register * N for N in {1, 2, 4, 8}
160// I = immediate displacement (32-bit signed integer)
161
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000162#define TARGET_ADDRESSING_MODE_LIST(V) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400163 V(MR) /* [%r1 ] */ \
164 V(MRI) /* [%r1 + K] */ \
165 V(MR1) /* [%r1 + %r2*1 ] */ \
166 V(MR2) /* [%r1 + %r2*2 ] */ \
167 V(MR4) /* [%r1 + %r2*4 ] */ \
168 V(MR8) /* [%r1 + %r2*8 ] */ \
169 V(MR1I) /* [%r1 + %r2*1 + K] */ \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000170 V(MR2I) /* [%r1 + %r2*2 + K] */ \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400171 V(MR4I) /* [%r1 + %r2*3 + K] */ \
172 V(MR8I) /* [%r1 + %r2*4 + K] */ \
173 V(M1) /* [ %r2*1 ] */ \
174 V(M2) /* [ %r2*2 ] */ \
175 V(M4) /* [ %r2*4 ] */ \
176 V(M8) /* [ %r2*8 ] */ \
177 V(M1I) /* [ %r2*1 + K] */ \
178 V(M2I) /* [ %r2*2 + K] */ \
179 V(M4I) /* [ %r2*4 + K] */ \
180 V(M8I) /* [ %r2*8 + K] */
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000181
182} // namespace compiler
183} // namespace internal
184} // namespace v8
185
186#endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_