blob: 8e8e7652c31bafbed69229704ec98900e90bea26 [file] [log] [blame]
Ben Murdochb8a8cc12014-11-26 15:28:44 +00001// Copyright 2014 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
6#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
7
8namespace v8 {
9namespace internal {
10namespace compiler {
11
12// X64-specific opcodes that specify which assembly sequence to emit.
13// Most opcodes specify a single instruction.
14#define TARGET_ARCH_OPCODE_LIST(V) \
15 V(X64Add) \
16 V(X64Add32) \
17 V(X64And) \
18 V(X64And32) \
19 V(X64Cmp) \
20 V(X64Cmp32) \
21 V(X64Test) \
22 V(X64Test32) \
23 V(X64Or) \
24 V(X64Or32) \
25 V(X64Xor) \
26 V(X64Xor32) \
27 V(X64Sub) \
28 V(X64Sub32) \
29 V(X64Imul) \
30 V(X64Imul32) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -040031 V(X64ImulHigh32) \
32 V(X64UmulHigh32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000033 V(X64Idiv) \
34 V(X64Idiv32) \
35 V(X64Udiv) \
36 V(X64Udiv32) \
37 V(X64Not) \
38 V(X64Not32) \
39 V(X64Neg) \
40 V(X64Neg32) \
41 V(X64Shl) \
42 V(X64Shl32) \
43 V(X64Shr) \
44 V(X64Shr32) \
45 V(X64Sar) \
46 V(X64Sar32) \
47 V(X64Ror) \
48 V(X64Ror32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000049 V(X64Lzcnt) \
50 V(X64Lzcnt32) \
51 V(X64Tzcnt) \
52 V(X64Tzcnt32) \
53 V(X64Popcnt) \
54 V(X64Popcnt32) \
55 V(SSEFloat32Cmp) \
56 V(SSEFloat32Add) \
57 V(SSEFloat32Sub) \
58 V(SSEFloat32Mul) \
59 V(SSEFloat32Div) \
60 V(SSEFloat32Abs) \
61 V(SSEFloat32Neg) \
62 V(SSEFloat32Sqrt) \
63 V(SSEFloat32Max) \
64 V(SSEFloat32Min) \
65 V(SSEFloat32ToFloat64) \
66 V(SSEFloat32Round) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000067 V(SSEFloat64Cmp) \
68 V(SSEFloat64Add) \
69 V(SSEFloat64Sub) \
70 V(SSEFloat64Mul) \
71 V(SSEFloat64Div) \
72 V(SSEFloat64Mod) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000073 V(SSEFloat64Abs) \
74 V(SSEFloat64Neg) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000075 V(SSEFloat64Sqrt) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000076 V(SSEFloat64Round) \
77 V(SSEFloat64Max) \
78 V(SSEFloat64Min) \
79 V(SSEFloat64ToFloat32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000080 V(SSEFloat64ToInt32) \
81 V(SSEFloat64ToUint32) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000082 V(SSEFloat32ToInt64) \
83 V(SSEFloat64ToInt64) \
84 V(SSEFloat32ToUint64) \
85 V(SSEFloat64ToUint64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000086 V(SSEInt32ToFloat64) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000087 V(SSEInt64ToFloat32) \
88 V(SSEInt64ToFloat64) \
89 V(SSEUint64ToFloat32) \
90 V(SSEUint64ToFloat64) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +000091 V(SSEUint32ToFloat64) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +000092 V(SSEFloat64ExtractLowWord32) \
93 V(SSEFloat64ExtractHighWord32) \
94 V(SSEFloat64InsertLowWord32) \
95 V(SSEFloat64InsertHighWord32) \
96 V(SSEFloat64LoadLowWord32) \
97 V(AVXFloat32Cmp) \
98 V(AVXFloat32Add) \
99 V(AVXFloat32Sub) \
100 V(AVXFloat32Mul) \
101 V(AVXFloat32Div) \
102 V(AVXFloat32Max) \
103 V(AVXFloat32Min) \
104 V(AVXFloat64Cmp) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400105 V(AVXFloat64Add) \
106 V(AVXFloat64Sub) \
107 V(AVXFloat64Mul) \
108 V(AVXFloat64Div) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000109 V(AVXFloat64Max) \
110 V(AVXFloat64Min) \
111 V(AVXFloat64Abs) \
112 V(AVXFloat64Neg) \
113 V(AVXFloat32Abs) \
114 V(AVXFloat32Neg) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000115 V(X64Movsxbl) \
116 V(X64Movzxbl) \
117 V(X64Movb) \
118 V(X64Movsxwl) \
119 V(X64Movzxwl) \
120 V(X64Movw) \
121 V(X64Movl) \
122 V(X64Movsxlq) \
123 V(X64Movq) \
124 V(X64Movsd) \
125 V(X64Movss) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000126 V(X64BitcastFI) \
127 V(X64BitcastDL) \
128 V(X64BitcastIF) \
129 V(X64BitcastLD) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400130 V(X64Lea32) \
131 V(X64Lea) \
132 V(X64Dec32) \
133 V(X64Inc32) \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000134 V(X64Push) \
Ben Murdoch4a90d5f2016-03-22 12:00:34 +0000135 V(X64Poke) \
136 V(X64StackCheck)
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000137
138
139// Addressing modes represent the "shape" of inputs to an instruction.
140// Many instructions support multiple addressing modes. Addressing modes
141// are encoded into the InstructionCode of the instruction and tell the
142// code generator after register allocation which assembler method to call.
143//
144// We use the following local notation for addressing modes:
145//
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400146// M = memory operand
147// R = base register
148// N = index register * N for N in {1, 2, 4, 8}
149// I = immediate displacement (32-bit signed integer)
150
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000151#define TARGET_ADDRESSING_MODE_LIST(V) \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400152 V(MR) /* [%r1 ] */ \
153 V(MRI) /* [%r1 + K] */ \
154 V(MR1) /* [%r1 + %r2*1 ] */ \
155 V(MR2) /* [%r1 + %r2*2 ] */ \
156 V(MR4) /* [%r1 + %r2*4 ] */ \
157 V(MR8) /* [%r1 + %r2*8 ] */ \
158 V(MR1I) /* [%r1 + %r2*1 + K] */ \
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000159 V(MR2I) /* [%r1 + %r2*2 + K] */ \
Emily Bernierd0a1eb72015-03-24 16:35:39 -0400160 V(MR4I) /* [%r1 + %r2*3 + K] */ \
161 V(MR8I) /* [%r1 + %r2*4 + K] */ \
162 V(M1) /* [ %r2*1 ] */ \
163 V(M2) /* [ %r2*2 ] */ \
164 V(M4) /* [ %r2*4 ] */ \
165 V(M8) /* [ %r2*8 ] */ \
166 V(M1I) /* [ %r2*1 + K] */ \
167 V(M2I) /* [ %r2*2 + K] */ \
168 V(M4I) /* [ %r2*4 + K] */ \
169 V(M8I) /* [ %r2*8 + K] */
Ben Murdochb8a8cc12014-11-26 15:28:44 +0000170
171} // namespace compiler
172} // namespace internal
173} // namespace v8
174
175#endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_