Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 1 | // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
| 6 | #define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |
| 7 | |
| 8 | namespace v8 { |
| 9 | namespace internal { |
| 10 | namespace compiler { |
| 11 | |
| 12 | // X64-specific opcodes that specify which assembly sequence to emit. |
| 13 | // Most opcodes specify a single instruction. |
| 14 | #define TARGET_ARCH_OPCODE_LIST(V) \ |
| 15 | V(X64Add) \ |
| 16 | V(X64Add32) \ |
| 17 | V(X64And) \ |
| 18 | V(X64And32) \ |
| 19 | V(X64Cmp) \ |
| 20 | V(X64Cmp32) \ |
| 21 | V(X64Test) \ |
| 22 | V(X64Test32) \ |
| 23 | V(X64Or) \ |
| 24 | V(X64Or32) \ |
| 25 | V(X64Xor) \ |
| 26 | V(X64Xor32) \ |
| 27 | V(X64Sub) \ |
| 28 | V(X64Sub32) \ |
| 29 | V(X64Imul) \ |
| 30 | V(X64Imul32) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 31 | V(X64ImulHigh32) \ |
| 32 | V(X64UmulHigh32) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 33 | V(X64Idiv) \ |
| 34 | V(X64Idiv32) \ |
| 35 | V(X64Udiv) \ |
| 36 | V(X64Udiv32) \ |
| 37 | V(X64Not) \ |
| 38 | V(X64Not32) \ |
| 39 | V(X64Neg) \ |
| 40 | V(X64Neg32) \ |
| 41 | V(X64Shl) \ |
| 42 | V(X64Shl32) \ |
| 43 | V(X64Shr) \ |
| 44 | V(X64Shr32) \ |
| 45 | V(X64Sar) \ |
| 46 | V(X64Sar32) \ |
| 47 | V(X64Ror) \ |
| 48 | V(X64Ror32) \ |
| 49 | V(SSEFloat64Cmp) \ |
| 50 | V(SSEFloat64Add) \ |
| 51 | V(SSEFloat64Sub) \ |
| 52 | V(SSEFloat64Mul) \ |
| 53 | V(SSEFloat64Div) \ |
| 54 | V(SSEFloat64Mod) \ |
| 55 | V(SSEFloat64Sqrt) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 56 | V(SSEFloat64Floor) \ |
| 57 | V(SSEFloat64Ceil) \ |
| 58 | V(SSEFloat64RoundTruncate) \ |
| 59 | V(SSECvtss2sd) \ |
| 60 | V(SSECvtsd2ss) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 61 | V(SSEFloat64ToInt32) \ |
| 62 | V(SSEFloat64ToUint32) \ |
| 63 | V(SSEInt32ToFloat64) \ |
| 64 | V(SSEUint32ToFloat64) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 65 | V(AVXFloat64Add) \ |
| 66 | V(AVXFloat64Sub) \ |
| 67 | V(AVXFloat64Mul) \ |
| 68 | V(AVXFloat64Div) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 69 | V(X64Movsxbl) \ |
| 70 | V(X64Movzxbl) \ |
| 71 | V(X64Movb) \ |
| 72 | V(X64Movsxwl) \ |
| 73 | V(X64Movzxwl) \ |
| 74 | V(X64Movw) \ |
| 75 | V(X64Movl) \ |
| 76 | V(X64Movsxlq) \ |
| 77 | V(X64Movq) \ |
| 78 | V(X64Movsd) \ |
| 79 | V(X64Movss) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 80 | V(X64Lea32) \ |
| 81 | V(X64Lea) \ |
| 82 | V(X64Dec32) \ |
| 83 | V(X64Inc32) \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 84 | V(X64Push) \ |
| 85 | V(X64StoreWriteBarrier) |
| 86 | |
| 87 | |
| 88 | // Addressing modes represent the "shape" of inputs to an instruction. |
| 89 | // Many instructions support multiple addressing modes. Addressing modes |
| 90 | // are encoded into the InstructionCode of the instruction and tell the |
| 91 | // code generator after register allocation which assembler method to call. |
| 92 | // |
| 93 | // We use the following local notation for addressing modes: |
| 94 | // |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 95 | // M = memory operand |
| 96 | // R = base register |
| 97 | // N = index register * N for N in {1, 2, 4, 8} |
| 98 | // I = immediate displacement (32-bit signed integer) |
| 99 | |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 100 | #define TARGET_ADDRESSING_MODE_LIST(V) \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 101 | V(MR) /* [%r1 ] */ \ |
| 102 | V(MRI) /* [%r1 + K] */ \ |
| 103 | V(MR1) /* [%r1 + %r2*1 ] */ \ |
| 104 | V(MR2) /* [%r1 + %r2*2 ] */ \ |
| 105 | V(MR4) /* [%r1 + %r2*4 ] */ \ |
| 106 | V(MR8) /* [%r1 + %r2*8 ] */ \ |
| 107 | V(MR1I) /* [%r1 + %r2*1 + K] */ \ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 108 | V(MR2I) /* [%r1 + %r2*2 + K] */ \ |
Emily Bernier | d0a1eb7 | 2015-03-24 16:35:39 -0400 | [diff] [blame^] | 109 | V(MR4I) /* [%r1 + %r2*3 + K] */ \ |
| 110 | V(MR8I) /* [%r1 + %r2*4 + K] */ \ |
| 111 | V(M1) /* [ %r2*1 ] */ \ |
| 112 | V(M2) /* [ %r2*2 ] */ \ |
| 113 | V(M4) /* [ %r2*4 ] */ \ |
| 114 | V(M8) /* [ %r2*8 ] */ \ |
| 115 | V(M1I) /* [ %r2*1 + K] */ \ |
| 116 | V(M2I) /* [ %r2*2 + K] */ \ |
| 117 | V(M4I) /* [ %r2*4 + K] */ \ |
| 118 | V(M8I) /* [ %r2*8 + K] */ |
Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame] | 119 | |
| 120 | } // namespace compiler |
| 121 | } // namespace internal |
| 122 | } // namespace v8 |
| 123 | |
| 124 | #endif // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_ |