Ben Murdoch | b8a8cc1 | 2014-11-26 15:28:44 +0000 | [diff] [blame^] | 1 | // Copyright 2014 the V8 project authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
| 5 | #ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
| 6 | #define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |
| 7 | |
| 8 | namespace v8 { |
| 9 | namespace internal { |
| 10 | namespace compiler { |
| 11 | |
| 12 | // ARM64-specific opcodes that specify which assembly sequence to emit. |
| 13 | // Most opcodes specify a single instruction. |
| 14 | #define TARGET_ARCH_OPCODE_LIST(V) \ |
| 15 | V(Arm64Add) \ |
| 16 | V(Arm64Add32) \ |
| 17 | V(Arm64And) \ |
| 18 | V(Arm64And32) \ |
| 19 | V(Arm64Cmp) \ |
| 20 | V(Arm64Cmp32) \ |
| 21 | V(Arm64Cmn) \ |
| 22 | V(Arm64Cmn32) \ |
| 23 | V(Arm64Tst) \ |
| 24 | V(Arm64Tst32) \ |
| 25 | V(Arm64Or) \ |
| 26 | V(Arm64Or32) \ |
| 27 | V(Arm64Xor) \ |
| 28 | V(Arm64Xor32) \ |
| 29 | V(Arm64Sub) \ |
| 30 | V(Arm64Sub32) \ |
| 31 | V(Arm64Mul) \ |
| 32 | V(Arm64Mul32) \ |
| 33 | V(Arm64Idiv) \ |
| 34 | V(Arm64Idiv32) \ |
| 35 | V(Arm64Udiv) \ |
| 36 | V(Arm64Udiv32) \ |
| 37 | V(Arm64Imod) \ |
| 38 | V(Arm64Imod32) \ |
| 39 | V(Arm64Umod) \ |
| 40 | V(Arm64Umod32) \ |
| 41 | V(Arm64Not) \ |
| 42 | V(Arm64Not32) \ |
| 43 | V(Arm64Neg) \ |
| 44 | V(Arm64Neg32) \ |
| 45 | V(Arm64Shl) \ |
| 46 | V(Arm64Shl32) \ |
| 47 | V(Arm64Shr) \ |
| 48 | V(Arm64Shr32) \ |
| 49 | V(Arm64Sar) \ |
| 50 | V(Arm64Sar32) \ |
| 51 | V(Arm64Ror) \ |
| 52 | V(Arm64Ror32) \ |
| 53 | V(Arm64Mov32) \ |
| 54 | V(Arm64Sxtw) \ |
| 55 | V(Arm64Claim) \ |
| 56 | V(Arm64Poke) \ |
| 57 | V(Arm64PokePairZero) \ |
| 58 | V(Arm64PokePair) \ |
| 59 | V(Arm64Float64Cmp) \ |
| 60 | V(Arm64Float64Add) \ |
| 61 | V(Arm64Float64Sub) \ |
| 62 | V(Arm64Float64Mul) \ |
| 63 | V(Arm64Float64Div) \ |
| 64 | V(Arm64Float64Mod) \ |
| 65 | V(Arm64Float64Sqrt) \ |
| 66 | V(Arm64Float64ToInt32) \ |
| 67 | V(Arm64Float64ToUint32) \ |
| 68 | V(Arm64Int32ToFloat64) \ |
| 69 | V(Arm64Uint32ToFloat64) \ |
| 70 | V(Arm64LdrS) \ |
| 71 | V(Arm64StrS) \ |
| 72 | V(Arm64LdrD) \ |
| 73 | V(Arm64StrD) \ |
| 74 | V(Arm64Ldrb) \ |
| 75 | V(Arm64Ldrsb) \ |
| 76 | V(Arm64Strb) \ |
| 77 | V(Arm64Ldrh) \ |
| 78 | V(Arm64Ldrsh) \ |
| 79 | V(Arm64Strh) \ |
| 80 | V(Arm64LdrW) \ |
| 81 | V(Arm64StrW) \ |
| 82 | V(Arm64Ldr) \ |
| 83 | V(Arm64Str) \ |
| 84 | V(Arm64StoreWriteBarrier) |
| 85 | |
| 86 | |
| 87 | // Addressing modes represent the "shape" of inputs to an instruction. |
| 88 | // Many instructions support multiple addressing modes. Addressing modes |
| 89 | // are encoded into the InstructionCode of the instruction and tell the |
| 90 | // code generator after register allocation which assembler method to call. |
| 91 | // |
| 92 | // We use the following local notation for addressing modes: |
| 93 | // |
| 94 | // R = register |
| 95 | // O = register or stack slot |
| 96 | // D = double register |
| 97 | // I = immediate (handle, external, int32) |
| 98 | // MRI = [register + immediate] |
| 99 | // MRR = [register + register] |
| 100 | #define TARGET_ADDRESSING_MODE_LIST(V) \ |
| 101 | V(MRI) /* [%r0 + K] */ \ |
| 102 | V(MRR) /* [%r0 + %r1] */ |
| 103 | |
| 104 | } // namespace internal |
| 105 | } // namespace compiler |
| 106 | } // namespace v8 |
| 107 | |
| 108 | #endif // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_ |