Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 The Android Open Source Project |
| 3 | * |
| 4 | * Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | * you may not use this file except in compliance with the License. |
| 6 | * You may obtain a copy of the License at |
| 7 | * |
| 8 | * http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | * |
| 10 | * Unless required by applicable law or agreed to in writing, software |
| 11 | * distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | * See the License for the specific language governing permissions and |
| 14 | * limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #include "codegen_x86.h" |
| 18 | #include "dex/quick/mir_to_lir-inl.h" |
| 19 | #include "x86_lir.h" |
| 20 | |
| 21 | namespace art { |
| 22 | |
| 23 | #define MAX_ASSEMBLER_RETRIES 50 |
| 24 | |
| 25 | const X86EncodingMap X86Mir2Lir::EncodingMap[kX86Last] = { |
| 26 | { kX8632BitData, kData, IS_UNARY_OP, { 0, 0, 0x00, 0, 0, 0, 0, 4 }, "data", "0x!0d" }, |
| 27 | { kX86Bkpt, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xCC, 0, 0, 0, 0, 0 }, "int 3", "" }, |
| 28 | { kX86Nop, kNop, IS_UNARY_OP, { 0, 0, 0x90, 0, 0, 0, 0, 0 }, "nop", "" }, |
| 29 | |
| 30 | #define ENCODING_MAP(opname, mem_use, reg_def, uses_ccodes, \ |
| 31 | rm8_r8, rm32_r32, \ |
| 32 | r8_rm8, r32_rm32, \ |
| 33 | ax8_i8, ax32_i32, \ |
| 34 | rm8_i8, rm8_i8_modrm, \ |
| 35 | rm32_i32, rm32_i32_modrm, \ |
| 36 | rm32_i8, rm32_i8_modrm) \ |
| 37 | { kX86 ## opname ## 8MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8MR", "[!0r+!1d],!2r" }, \ |
| 38 | { kX86 ## opname ## 8AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8AR", "[!0r+!1r<<!2d+!3d],!4r" }, \ |
| 39 | { kX86 ## opname ## 8TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_r8, 0, 0, 0, 0, 0 }, #opname "8TR", "fs:[!0d],!1r" }, \ |
| 40 | { kX86 ## opname ## 8RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RR", "!0r,!1r" }, \ |
| 41 | { kX86 ## opname ## 8RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RM", "!0r,[!1r+!2d]" }, \ |
| 42 | { kX86 ## opname ## 8RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \ |
| 43 | { kX86 ## opname ## 8RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r8_rm8, 0, 0, 0, 0, 0 }, #opname "8RT", "!0r,fs:[!1d]" }, \ |
| 44 | { kX86 ## opname ## 8RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, ax8_i8, 1 }, #opname "8RI", "!0r,!1d" }, \ |
| 45 | { kX86 ## opname ## 8MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \ |
| 46 | { kX86 ## opname ## 8AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 47 | { kX86 ## opname ## 8TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm8_i8, 0, 0, rm8_i8_modrm, 0, 1 }, #opname "8TI", "fs:[!0d],!1d" }, \ |
| 48 | \ |
| 49 | { kX86 ## opname ## 16MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16MR", "[!0r+!1d],!2r" }, \ |
| 50 | { kX86 ## opname ## 16AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16AR", "[!0r+!1r<<!2d+!3d],!4r" }, \ |
| 51 | { kX86 ## opname ## 16TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_r32, 0, 0, 0, 0, 0 }, #opname "16TR", "fs:[!0d],!1r" }, \ |
| 52 | { kX86 ## opname ## 16RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RR", "!0r,!1r" }, \ |
| 53 | { kX86 ## opname ## 16RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RM", "!0r,[!1r+!2d]" }, \ |
| 54 | { kX86 ## opname ## 16RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0x66, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \ |
| 55 | { kX86 ## opname ## 16RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, r32_rm32, 0, 0, 0, 0, 0 }, #opname "16RT", "!0r,fs:[!1d]" }, \ |
| 56 | { kX86 ## opname ## 16RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 2 }, #opname "16RI", "!0r,!1d" }, \ |
| 57 | { kX86 ## opname ## 16MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16MI", "[!0r+!1d],!2d" }, \ |
| 58 | { kX86 ## opname ## 16AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 59 | { kX86 ## opname ## 16TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i32, 0, 0, rm32_i32_modrm, 0, 2 }, #opname "16TI", "fs:[!0d],!1d" }, \ |
| 60 | { kX86 ## opname ## 16RI8, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16RI8", "!0r,!1d" }, \ |
| 61 | { kX86 ## opname ## 16MI8, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16MI8", "[!0r+!1d],!2d" }, \ |
| 62 | { kX86 ## opname ## 16AI8, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0x66, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 63 | { kX86 ## opname ## 16TI8, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0x66, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "16TI8", "fs:[!0d],!1d" }, \ |
| 64 | \ |
| 65 | { kX86 ## opname ## 32MR, kMemReg, mem_use | IS_TERTIARY_OP | REG_USE02 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32MR", "[!0r+!1d],!2r" }, \ |
| 66 | { kX86 ## opname ## 32AR, kArrayReg, mem_use | IS_QUIN_OP | REG_USE014 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32AR", "[!0r+!1r<<!2d+!3d],!4r" }, \ |
| 67 | { kX86 ## opname ## 32TR, kThreadReg, mem_use | IS_BINARY_OP | REG_USE1 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_r32, 0, 0, 0, 0, 0 }, #opname "32TR", "fs:[!0d],!1r" }, \ |
| 68 | { kX86 ## opname ## 32RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RR", "!0r,!1r" }, \ |
| 69 | { kX86 ## opname ## 32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RM", "!0r,[!1r+!2d]" }, \ |
| 70 | { kX86 ## opname ## 32RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012 | SETS_CCODES | uses_ccodes, { 0, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, \ |
| 71 | { kX86 ## opname ## 32RT, kRegThread, IS_LOAD | IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, r32_rm32, 0, 0, 0, 0, 0 }, #opname "32RT", "!0r,fs:[!1d]" }, \ |
| 72 | { kX86 ## opname ## 32RI, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, ax32_i32, 4 }, #opname "32RI", "!0r,!1d" }, \ |
| 73 | { kX86 ## opname ## 32MI, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32MI", "[!0r+!1d],!2d" }, \ |
| 74 | { kX86 ## opname ## 32AI, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 75 | { kX86 ## opname ## 32TI, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i32, 0, 0, rm32_i32_modrm, 0, 4 }, #opname "32TI", "fs:[!0d],!1d" }, \ |
| 76 | { kX86 ## opname ## 32RI8, kRegImm, IS_BINARY_OP | reg_def | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32RI8", "!0r,!1d" }, \ |
| 77 | { kX86 ## opname ## 32MI8, kMemImm, mem_use | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32MI8", "[!0r+!1d],!2d" }, \ |
| 78 | { kX86 ## opname ## 32AI8, kArrayImm, mem_use | IS_QUIN_OP | REG_USE01 | SETS_CCODES | uses_ccodes, { 0, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32AI8", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 79 | { kX86 ## opname ## 32TI8, kThreadImm, mem_use | IS_BINARY_OP | SETS_CCODES | uses_ccodes, { THREAD_PREFIX, 0, rm32_i8, 0, 0, rm32_i8_modrm, 0, 1 }, #opname "32TI8", "fs:[!0d],!1d" } |
| 80 | |
| 81 | ENCODING_MAP(Add, IS_LOAD | IS_STORE, REG_DEF0, 0, |
| 82 | 0x00 /* RegMem8/Reg8 */, 0x01 /* RegMem32/Reg32 */, |
| 83 | 0x02 /* Reg8/RegMem8 */, 0x03 /* Reg32/RegMem32 */, |
| 84 | 0x04 /* Rax8/imm8 opcode */, 0x05 /* Rax32/imm32 */, |
| 85 | 0x80, 0x0 /* RegMem8/imm8 */, |
| 86 | 0x81, 0x0 /* RegMem32/imm32 */, 0x83, 0x0 /* RegMem32/imm8 */), |
| 87 | ENCODING_MAP(Or, IS_LOAD | IS_STORE, REG_DEF0, 0, |
| 88 | 0x08 /* RegMem8/Reg8 */, 0x09 /* RegMem32/Reg32 */, |
| 89 | 0x0A /* Reg8/RegMem8 */, 0x0B /* Reg32/RegMem32 */, |
| 90 | 0x0C /* Rax8/imm8 opcode */, 0x0D /* Rax32/imm32 */, |
| 91 | 0x80, 0x1 /* RegMem8/imm8 */, |
| 92 | 0x81, 0x1 /* RegMem32/imm32 */, 0x83, 0x1 /* RegMem32/imm8 */), |
| 93 | ENCODING_MAP(Adc, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES, |
| 94 | 0x10 /* RegMem8/Reg8 */, 0x11 /* RegMem32/Reg32 */, |
| 95 | 0x12 /* Reg8/RegMem8 */, 0x13 /* Reg32/RegMem32 */, |
| 96 | 0x14 /* Rax8/imm8 opcode */, 0x15 /* Rax32/imm32 */, |
| 97 | 0x80, 0x2 /* RegMem8/imm8 */, |
| 98 | 0x81, 0x2 /* RegMem32/imm32 */, 0x83, 0x2 /* RegMem32/imm8 */), |
| 99 | ENCODING_MAP(Sbb, IS_LOAD | IS_STORE, REG_DEF0, USES_CCODES, |
| 100 | 0x18 /* RegMem8/Reg8 */, 0x19 /* RegMem32/Reg32 */, |
| 101 | 0x1A /* Reg8/RegMem8 */, 0x1B /* Reg32/RegMem32 */, |
| 102 | 0x1C /* Rax8/imm8 opcode */, 0x1D /* Rax32/imm32 */, |
| 103 | 0x80, 0x3 /* RegMem8/imm8 */, |
| 104 | 0x81, 0x3 /* RegMem32/imm32 */, 0x83, 0x3 /* RegMem32/imm8 */), |
| 105 | ENCODING_MAP(And, IS_LOAD | IS_STORE, REG_DEF0, 0, |
| 106 | 0x20 /* RegMem8/Reg8 */, 0x21 /* RegMem32/Reg32 */, |
| 107 | 0x22 /* Reg8/RegMem8 */, 0x23 /* Reg32/RegMem32 */, |
| 108 | 0x24 /* Rax8/imm8 opcode */, 0x25 /* Rax32/imm32 */, |
| 109 | 0x80, 0x4 /* RegMem8/imm8 */, |
| 110 | 0x81, 0x4 /* RegMem32/imm32 */, 0x83, 0x4 /* RegMem32/imm8 */), |
| 111 | ENCODING_MAP(Sub, IS_LOAD | IS_STORE, REG_DEF0, 0, |
| 112 | 0x28 /* RegMem8/Reg8 */, 0x29 /* RegMem32/Reg32 */, |
| 113 | 0x2A /* Reg8/RegMem8 */, 0x2B /* Reg32/RegMem32 */, |
| 114 | 0x2C /* Rax8/imm8 opcode */, 0x2D /* Rax32/imm32 */, |
| 115 | 0x80, 0x5 /* RegMem8/imm8 */, |
| 116 | 0x81, 0x5 /* RegMem32/imm32 */, 0x83, 0x5 /* RegMem32/imm8 */), |
| 117 | ENCODING_MAP(Xor, IS_LOAD | IS_STORE, REG_DEF0, 0, |
| 118 | 0x30 /* RegMem8/Reg8 */, 0x31 /* RegMem32/Reg32 */, |
| 119 | 0x32 /* Reg8/RegMem8 */, 0x33 /* Reg32/RegMem32 */, |
| 120 | 0x34 /* Rax8/imm8 opcode */, 0x35 /* Rax32/imm32 */, |
| 121 | 0x80, 0x6 /* RegMem8/imm8 */, |
| 122 | 0x81, 0x6 /* RegMem32/imm32 */, 0x83, 0x6 /* RegMem32/imm8 */), |
| 123 | ENCODING_MAP(Cmp, IS_LOAD, 0, 0, |
| 124 | 0x38 /* RegMem8/Reg8 */, 0x39 /* RegMem32/Reg32 */, |
| 125 | 0x3A /* Reg8/RegMem8 */, 0x3B /* Reg32/RegMem32 */, |
| 126 | 0x3C /* Rax8/imm8 opcode */, 0x3D /* Rax32/imm32 */, |
| 127 | 0x80, 0x7 /* RegMem8/imm8 */, |
| 128 | 0x81, 0x7 /* RegMem32/imm32 */, 0x83, 0x7 /* RegMem32/imm8 */), |
| 129 | #undef ENCODING_MAP |
| 130 | |
| 131 | { kX86Imul16RRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RRI", "!0r,!1r,!2d" }, |
| 132 | { kX86Imul16RMI, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RMI", "!0r,[!1r+!2d],!3d" }, |
| 133 | { kX86Imul16RAI, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0x66, 0, 0x69, 0, 0, 0, 0, 2 }, "Imul16RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" }, |
| 134 | |
| 135 | { kX86Imul32RRI, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RRI", "!0r,!1r,!2d" }, |
| 136 | { kX86Imul32RMI, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RMI", "!0r,[!1r+!2d],!3d" }, |
| 137 | { kX86Imul32RAI, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x69, 0, 0, 0, 0, 4 }, "Imul32RAI", "!0r,[!1r+!2r<<!3d+!4d],!5d" }, |
| 138 | { kX86Imul32RRI8, kRegRegImm, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RRI8", "!0r,!1r,!2d" }, |
| 139 | { kX86Imul32RMI8, kRegMemImm, IS_LOAD | IS_QUAD_OP | REG_DEF0_USE1 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RMI8", "!0r,[!1r+!2d],!3d" }, |
| 140 | { kX86Imul32RAI8, kRegArrayImm, IS_LOAD | IS_SEXTUPLE_OP | REG_DEF0_USE12 | SETS_CCODES, { 0, 0, 0x6B, 0, 0, 0, 0, 1 }, "Imul32RAI8", "!0r,[!1r+!2r<<!3d+!4d],!5d" }, |
| 141 | |
| 142 | { kX86Mov8MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8MR", "[!0r+!1d],!2r" }, |
| 143 | { kX86Mov8AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8AR", "[!0r+!1r<<!2d+!3d],!4r" }, |
| 144 | { kX86Mov8TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0, 0x88, 0, 0, 0, 0, 0 }, "Mov8TR", "fs:[!0d],!1r" }, |
| 145 | { kX86Mov8RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RR", "!0r,!1r" }, |
| 146 | { kX86Mov8RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RM", "!0r,[!1r+!2d]" }, |
| 147 | { kX86Mov8RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RA", "!0r,[!1r+!2r<<!3d+!4d]" }, |
| 148 | { kX86Mov8RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8A, 0, 0, 0, 0, 0 }, "Mov8RT", "!0r,fs:[!1d]" }, |
| 149 | { kX86Mov8RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB0, 0, 0, 0, 0, 1 }, "Mov8RI", "!0r,!1d" }, |
| 150 | { kX86Mov8MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8MI", "[!0r+!1d],!2d" }, |
| 151 | { kX86Mov8AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8AI", "[!0r+!1r<<!2d+!3d],!4d" }, |
| 152 | { kX86Mov8TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC6, 0, 0, 0, 0, 1 }, "Mov8TI", "fs:[!0d],!1d" }, |
| 153 | |
| 154 | { kX86Mov16MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov16MR", "[!0r+!1d],!2r" }, |
| 155 | { kX86Mov16AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x66, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov16AR", "[!0r+!1r<<!2d+!3d],!4r" }, |
| 156 | { kX86Mov16TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0x66, 0x89, 0, 0, 0, 0, 0 }, "Mov16TR", "fs:[!0d],!1r" }, |
| 157 | { kX86Mov16RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RR", "!0r,!1r" }, |
| 158 | { kX86Mov16RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RM", "!0r,[!1r+!2d]" }, |
| 159 | { kX86Mov16RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0x66, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RA", "!0r,[!1r+!2r<<!3d+!4d]" }, |
| 160 | { kX86Mov16RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0x66, 0x8B, 0, 0, 0, 0, 0 }, "Mov16RT", "!0r,fs:[!1d]" }, |
| 161 | { kX86Mov16RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0x66, 0, 0xB8, 0, 0, 0, 0, 2 }, "Mov16RI", "!0r,!1d" }, |
| 162 | { kX86Mov16MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0x66, 0, 0xC7, 0, 0, 0, 0, 2 }, "Mov16MI", "[!0r+!1d],!2d" }, |
| 163 | { kX86Mov16AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0x66, 0, 0xC7, 0, 0, 0, 0, 2 }, "Mov16AI", "[!0r+!1r<<!2d+!3d],!4d" }, |
| 164 | { kX86Mov16TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0x66, 0xC7, 0, 0, 0, 0, 2 }, "Mov16TI", "fs:[!0d],!1d" }, |
| 165 | |
| 166 | { kX86Mov32MR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32MR", "[!0r+!1d],!2r" }, |
| 167 | { kX86Mov32AR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" }, |
| 168 | { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP | REG_USE1, { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0 }, "Mov32TR", "fs:[!0d],!1r" }, |
| 169 | { kX86Mov32RR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RR", "!0r,!1r" }, |
| 170 | { kX86Mov32RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | REG_DEF0_USE1, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RM", "!0r,[!1r+!2d]" }, |
| 171 | { kX86Mov32RA, kRegArray, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, |
| 172 | { kX86Mov32RT, kRegThread, IS_LOAD | IS_BINARY_OP | REG_DEF0, { THREAD_PREFIX, 0, 0x8B, 0, 0, 0, 0, 0 }, "Mov32RT", "!0r,fs:[!1d]" }, |
| 173 | { kX86Mov32RI, kMovRegImm, IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "Mov32RI", "!0r,!1d" }, |
| 174 | { kX86Mov32MI, kMemImm, IS_STORE | IS_TERTIARY_OP | REG_USE0, { 0, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32MI", "[!0r+!1d],!2d" }, |
| 175 | { kX86Mov32AI, kArrayImm, IS_STORE | IS_QUIN_OP | REG_USE01, { 0, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32AI", "[!0r+!1r<<!2d+!3d],!4d" }, |
| 176 | { kX86Mov32TI, kThreadImm, IS_STORE | IS_BINARY_OP, { THREAD_PREFIX, 0, 0xC7, 0, 0, 0, 0, 4 }, "Mov32TI", "fs:[!0d],!1d" }, |
| 177 | |
| 178 | { kX86Lea32RA, kRegArray, IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8D, 0, 0, 0, 0, 0 }, "Lea32RA", "!0r,[!1r+!2r<<!3d+!4d]" }, |
| 179 | |
Razvan A Lupusoru | bd288c2 | 2013-12-20 17:27:23 -0800 | [diff] [blame^] | 180 | { kX86Cmov32RRC, kRegRegCond, IS_TERTIARY_OP | REG_DEF0_USE01 | USES_CCODES, {0, 0, 0x0F, 0x40, 0, 0, 0, 0}, "Cmovcc32RR", "!2c !0r,!1r" }, |
| 181 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 182 | #define SHIFT_ENCODING_MAP(opname, modrm_opcode) \ |
| 183 | { kX86 ## opname ## 8RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8RI", "!0r,!1d" }, \ |
| 184 | { kX86 ## opname ## 8MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8MI", "[!0r+!1d],!2d" }, \ |
| 185 | { kX86 ## opname ## 8AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xC0, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "8AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 186 | { kX86 ## opname ## 8RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8RC", "!0r,cl" }, \ |
| 187 | { kX86 ## opname ## 8MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8MC", "[!0r+!1d],cl" }, \ |
| 188 | { kX86 ## opname ## 8AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0xD2, 0, 0, modrm_opcode, 0, 1 }, #opname "8AC", "[!0r+!1r<<!2d+!3d],cl" }, \ |
| 189 | \ |
| 190 | { kX86 ## opname ## 16RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16RI", "!0r,!1d" }, \ |
| 191 | { kX86 ## opname ## 16MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16MI", "[!0r+!1d],!2d" }, \ |
| 192 | { kX86 ## opname ## 16AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "16AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 193 | { kX86 ## opname ## 16RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16RC", "!0r,cl" }, \ |
| 194 | { kX86 ## opname ## 16MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16MC", "[!0r+!1d],cl" }, \ |
| 195 | { kX86 ## opname ## 16AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0x66, 0, 0xD3, 0, 0, modrm_opcode, 0, 1 }, #opname "16AC", "[!0r+!1r<<!2d+!3d],cl" }, \ |
| 196 | \ |
| 197 | { kX86 ## opname ## 32RI, kShiftRegImm, IS_BINARY_OP | REG_DEF0_USE0 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32RI", "!0r,!1d" }, \ |
| 198 | { kX86 ## opname ## 32MI, kShiftMemImm, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32MI", "[!0r+!1d],!2d" }, \ |
| 199 | { kX86 ## opname ## 32AI, kShiftArrayImm, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xC1, 0, 0, modrm_opcode, 0xD1, 1 }, #opname "32AI", "[!0r+!1r<<!2d+!3d],!4d" }, \ |
| 200 | { kX86 ## opname ## 32RC, kShiftRegCl, IS_BINARY_OP | REG_DEF0_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32RC", "!0r,cl" }, \ |
| 201 | { kX86 ## opname ## 32MC, kShiftMemCl, IS_LOAD | IS_STORE | IS_TERTIARY_OP | REG_USE0 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32MC", "[!0r+!1d],cl" }, \ |
| 202 | { kX86 ## opname ## 32AC, kShiftArrayCl, IS_LOAD | IS_STORE | IS_QUIN_OP | REG_USE01 | REG_USEC | SETS_CCODES, { 0, 0, 0xD3, 0, 0, modrm_opcode, 0, 0 }, #opname "32AC", "[!0r+!1r<<!2d+!3d],cl" } |
| 203 | |
| 204 | SHIFT_ENCODING_MAP(Rol, 0x0), |
| 205 | SHIFT_ENCODING_MAP(Ror, 0x1), |
| 206 | SHIFT_ENCODING_MAP(Rcl, 0x2), |
| 207 | SHIFT_ENCODING_MAP(Rcr, 0x3), |
| 208 | SHIFT_ENCODING_MAP(Sal, 0x4), |
| 209 | SHIFT_ENCODING_MAP(Shr, 0x5), |
| 210 | SHIFT_ENCODING_MAP(Sar, 0x7), |
| 211 | #undef SHIFT_ENCODING_MAP |
| 212 | |
| 213 | { kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0}, "Cmc", "" }, |
| 214 | |
| 215 | { kX86Test8RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8RI", "!0r,!1d" }, |
| 216 | { kX86Test8MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8MI", "[!0r+!1d],!2d" }, |
| 217 | { kX86Test8AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF6, 0, 0, 0, 0, 1}, "Test8AI", "[!0r+!1r<<!2d+!3d],!4d" }, |
| 218 | { kX86Test16RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16RI", "!0r,!1d" }, |
| 219 | { kX86Test16MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16MI", "[!0r+!1d],!2d" }, |
| 220 | { kX86Test16AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0x66, 0, 0xF7, 0, 0, 0, 0, 2}, "Test16AI", "[!0r+!1r<<!2d+!3d],!4d" }, |
| 221 | { kX86Test32RI, kRegImm, IS_BINARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32RI", "!0r,!1d" }, |
| 222 | { kX86Test32MI, kMemImm, IS_LOAD | IS_TERTIARY_OP | REG_USE0 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32MI", "[!0r+!1d],!2d" }, |
| 223 | { kX86Test32AI, kArrayImm, IS_LOAD | IS_QUIN_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0xF7, 0, 0, 0, 0, 4}, "Test32AI", "[!0r+!1r<<!2d+!3d],!4d" }, |
| 224 | { kX86Test32RR, kRegReg, IS_BINARY_OP | REG_USE01 | SETS_CCODES, { 0, 0, 0x85, 0, 0, 0, 0, 0}, "Test32RR", "!0r,!1r" }, |
| 225 | |
| 226 | #define UNARY_ENCODING_MAP(opname, modrm, is_store, sets_ccodes, \ |
| 227 | reg, reg_kind, reg_flags, \ |
| 228 | mem, mem_kind, mem_flags, \ |
| 229 | arr, arr_kind, arr_flags, imm, \ |
| 230 | b_flags, hw_flags, w_flags, \ |
| 231 | b_format, hw_format, w_format) \ |
| 232 | { kX86 ## opname ## 8 ## reg, reg_kind, reg_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #reg, #b_format "!0r" }, \ |
| 233 | { kX86 ## opname ## 8 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #mem, #b_format "[!0r+!1d]" }, \ |
| 234 | { kX86 ## opname ## 8 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | b_flags | sets_ccodes, { 0, 0, 0xF6, 0, 0, modrm, 0, imm << 0}, #opname "8" #arr, #b_format "[!0r+!1r<<!2d+!3d]" }, \ |
| 235 | { kX86 ## opname ## 16 ## reg, reg_kind, reg_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #reg, #hw_format "!0r" }, \ |
| 236 | { kX86 ## opname ## 16 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #mem, #hw_format "[!0r+!1d]" }, \ |
| 237 | { kX86 ## opname ## 16 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | hw_flags | sets_ccodes, { 0x66, 0, 0xF7, 0, 0, modrm, 0, imm << 1}, #opname "16" #arr, #hw_format "[!0r+!1r<<!2d+!3d]" }, \ |
| 238 | { kX86 ## opname ## 32 ## reg, reg_kind, reg_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #reg, #w_format "!0r" }, \ |
| 239 | { kX86 ## opname ## 32 ## mem, mem_kind, IS_LOAD | is_store | mem_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #mem, #w_format "[!0r+!1d]" }, \ |
| 240 | { kX86 ## opname ## 32 ## arr, arr_kind, IS_LOAD | is_store | arr_flags | w_flags | sets_ccodes, { 0, 0, 0xF7, 0, 0, modrm, 0, imm << 2}, #opname "32" #arr, #w_format "[!0r+!1r<<!2d+!3d]" } |
| 241 | |
| 242 | UNARY_ENCODING_MAP(Not, 0x2, IS_STORE, 0, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""), |
| 243 | UNARY_ENCODING_MAP(Neg, 0x3, IS_STORE, SETS_CCODES, R, kReg, IS_UNARY_OP | REG_DEF0_USE0, M, kMem, IS_BINARY_OP | REG_USE0, A, kArray, IS_QUAD_OP | REG_USE01, 0, 0, 0, 0, "", "", ""), |
| 244 | |
| 245 | UNARY_ENCODING_MAP(Mul, 0x4, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA, REG_DEFAD_USEA, "ax,al,", "dx:ax,ax,", "edx:eax,eax,"), |
| 246 | UNARY_ENCODING_MAP(Imul, 0x5, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEA, REG_DEFAD_USEA, "ax,al,", "dx:ax,ax,", "edx:eax,eax,"), |
| 247 | UNARY_ENCODING_MAP(Divmod, 0x6, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"), |
| 248 | UNARY_ENCODING_MAP(Idivmod, 0x7, 0, SETS_CCODES, DaR, kRegRegReg, IS_UNARY_OP | REG_USE0, DaM, kRegRegMem, IS_BINARY_OP | REG_USE0, DaA, kRegRegArray, IS_QUAD_OP | REG_USE01, 0, REG_DEFA_USEA, REG_DEFAD_USEAD, REG_DEFAD_USEAD, "ah:al,ax,", "dx:ax,dx:ax,", "edx:eax,edx:eax,"), |
| 249 | #undef UNARY_ENCODING_MAP |
| 250 | |
Vladimir Marko | a6fd8ba | 2013-12-13 10:53:49 +0000 | [diff] [blame] | 251 | { kX86Bswap32R, kRegOpcode, IS_UNARY_OP | REG_DEF0_USE0, { 0, 0, 0x0F, 0xC8, 0, 0, 0, 0 }, "Bswap32R", "!0r" }, |
| 252 | { kX86Push32R, kRegOpcode, IS_UNARY_OP | REG_USE0 | REG_USE_SP | REG_DEF_SP | IS_STORE, { 0, 0, 0x50, 0, 0, 0, 0, 0 }, "Push32R", "!0r" }, |
| 253 | { kX86Pop32R, kRegOpcode, IS_UNARY_OP | REG_DEF0 | REG_USE_SP | REG_DEF_SP | IS_LOAD, { 0, 0, 0x58, 0, 0, 0, 0, 0 }, "Pop32R", "!0r" }, |
Vladimir Marko | a8b4caf | 2013-10-24 15:08:57 +0100 | [diff] [blame] | 254 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 255 | #define EXT_0F_ENCODING_MAP(opname, prefix, opcode, reg_def) \ |
| 256 | { kX86 ## opname ## RR, kRegReg, IS_BINARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RR", "!0r,!1r" }, \ |
| 257 | { kX86 ## opname ## RM, kRegMem, IS_LOAD | IS_TERTIARY_OP | reg_def | REG_USE01, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RM", "!0r,[!1r+!2d]" }, \ |
| 258 | { kX86 ## opname ## RA, kRegArray, IS_LOAD | IS_QUIN_OP | reg_def | REG_USE012, { prefix, 0, 0x0F, opcode, 0, 0, 0, 0 }, #opname "RA", "!0r,[!1r+!2r<<!3d+!4d]" } |
| 259 | |
| 260 | EXT_0F_ENCODING_MAP(Movsd, 0xF2, 0x10, REG_DEF0), |
| 261 | { kX86MovsdMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdMR", "[!0r+!1d],!2r" }, |
| 262 | { kX86MovsdAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0xF2, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovsdAR", "[!0r+!1r<<!2d+!3d],!4r" }, |
| 263 | |
| 264 | EXT_0F_ENCODING_MAP(Movss, 0xF3, 0x10, REG_DEF0), |
| 265 | { kX86MovssMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssMR", "[!0r+!1d],!2r" }, |
| 266 | { kX86MovssAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0xF3, 0, 0x0F, 0x11, 0, 0, 0, 0 }, "MovssAR", "[!0r+!1r<<!2d+!3d],!4r" }, |
| 267 | |
| 268 | EXT_0F_ENCODING_MAP(Cvtsi2sd, 0xF2, 0x2A, REG_DEF0), |
| 269 | EXT_0F_ENCODING_MAP(Cvtsi2ss, 0xF3, 0x2A, REG_DEF0), |
| 270 | EXT_0F_ENCODING_MAP(Cvttsd2si, 0xF2, 0x2C, REG_DEF0), |
| 271 | EXT_0F_ENCODING_MAP(Cvttss2si, 0xF3, 0x2C, REG_DEF0), |
| 272 | EXT_0F_ENCODING_MAP(Cvtsd2si, 0xF2, 0x2D, REG_DEF0), |
| 273 | EXT_0F_ENCODING_MAP(Cvtss2si, 0xF3, 0x2D, REG_DEF0), |
| 274 | EXT_0F_ENCODING_MAP(Ucomisd, 0x66, 0x2E, SETS_CCODES), |
| 275 | EXT_0F_ENCODING_MAP(Ucomiss, 0x00, 0x2E, SETS_CCODES), |
| 276 | EXT_0F_ENCODING_MAP(Comisd, 0x66, 0x2F, SETS_CCODES), |
| 277 | EXT_0F_ENCODING_MAP(Comiss, 0x00, 0x2F, SETS_CCODES), |
| 278 | EXT_0F_ENCODING_MAP(Orps, 0x00, 0x56, REG_DEF0), |
| 279 | EXT_0F_ENCODING_MAP(Xorps, 0x00, 0x57, REG_DEF0), |
| 280 | EXT_0F_ENCODING_MAP(Addsd, 0xF2, 0x58, REG_DEF0), |
| 281 | EXT_0F_ENCODING_MAP(Addss, 0xF3, 0x58, REG_DEF0), |
| 282 | EXT_0F_ENCODING_MAP(Mulsd, 0xF2, 0x59, REG_DEF0), |
| 283 | EXT_0F_ENCODING_MAP(Mulss, 0xF3, 0x59, REG_DEF0), |
| 284 | EXT_0F_ENCODING_MAP(Cvtsd2ss, 0xF2, 0x5A, REG_DEF0), |
| 285 | EXT_0F_ENCODING_MAP(Cvtss2sd, 0xF3, 0x5A, REG_DEF0), |
| 286 | EXT_0F_ENCODING_MAP(Subsd, 0xF2, 0x5C, REG_DEF0), |
| 287 | EXT_0F_ENCODING_MAP(Subss, 0xF3, 0x5C, REG_DEF0), |
| 288 | EXT_0F_ENCODING_MAP(Divsd, 0xF2, 0x5E, REG_DEF0), |
| 289 | EXT_0F_ENCODING_MAP(Divss, 0xF3, 0x5E, REG_DEF0), |
| 290 | |
| 291 | { kX86PsrlqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 2, 0, 1 }, "PsrlqRI", "!0r,!1d" }, |
| 292 | { kX86PsllqRI, kRegImm, IS_BINARY_OP | REG_DEF0_USE0, { 0x66, 0, 0x0F, 0x73, 0, 6, 0, 1 }, "PsllqRI", "!0r,!1d" }, |
Mark Mendell | bff1ef0 | 2013-12-13 13:47:34 -0800 | [diff] [blame] | 293 | { kX86SqrtsdRR, kRegReg, IS_BINARY_OP | REG_DEF0_USE1, { 0xF2, 0, 0x0F, 0x51, 0, 0, 0, 0 }, "SqrtsdRR", "!0r,!1r" }, |
Vladimir Marko | 12f9628 | 2013-12-16 14:44:03 +0000 | [diff] [blame] | 294 | { kX86FstpdM, kMem, IS_STORE | IS_BINARY_OP | REG_USE0, { 0x0, 0, 0xDD, 0x00, 0, 3, 0, 0 }, "FstpdM", "[!0r,!1d]" }, |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 295 | |
| 296 | EXT_0F_ENCODING_MAP(Movdxr, 0x66, 0x6E, REG_DEF0), |
| 297 | { kX86MovdrxRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxRR", "!0r,!1r" }, |
| 298 | { kX86MovdrxMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxMR", "[!0r+!1d],!2r" }, |
| 299 | { kX86MovdrxAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014, { 0x66, 0, 0x0F, 0x7E, 0, 0, 0, 0 }, "MovdrxAR", "[!0r+!1r<<!2d+!3d],!4r" }, |
| 300 | |
| 301 | { kX86Set8R, kRegCond, IS_BINARY_OP | REG_DEF0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8R", "!1c !0r" }, |
| 302 | { kX86Set8M, kMemCond, IS_STORE | IS_TERTIARY_OP | REG_USE0 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8M", "!2c [!0r+!1d]" }, |
| 303 | { kX86Set8A, kArrayCond, IS_STORE | IS_QUIN_OP | REG_USE01 | USES_CCODES, { 0, 0, 0x0F, 0x90, 0, 0, 0, 0 }, "Set8A", "!4c [!0r+!1r<<!2d+!3d]" }, |
| 304 | |
| 305 | // TODO: load/store? |
| 306 | // Encode the modrm opcode as an extra opcode byte to avoid computation during assembly. |
| 307 | { kX86Mfence, kReg, NO_OPERAND, { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0 }, "Mfence", "" }, |
| 308 | |
| 309 | EXT_0F_ENCODING_MAP(Imul16, 0x66, 0xAF, REG_DEF0 | SETS_CCODES), |
| 310 | EXT_0F_ENCODING_MAP(Imul32, 0x00, 0xAF, REG_DEF0 | SETS_CCODES), |
| 311 | |
| 312 | { kX86CmpxchgRR, kRegRegStore, IS_BINARY_OP | REG_DEF0 | REG_USE01 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "!0r,!1r" }, |
| 313 | { kX86CmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1d],!2r" }, |
| 314 | { kX86CmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" }, |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 315 | { kX86LockCmpxchgMR, kMemReg, IS_STORE | IS_TERTIARY_OP | REG_USE02 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1d],!2r" }, |
| 316 | { kX86LockCmpxchgAR, kArrayReg, IS_STORE | IS_QUIN_OP | REG_USE014 | REG_DEFA_USEA | SETS_CCODES, { 0xF0, 0, 0x0F, 0xB1, 0, 0, 0, 0 }, "Lock Cmpxchg", "[!0r+!1r<<!2d+!3d],!4r" }, |
Vladimir Marko | 70b797d | 2013-12-03 15:25:24 +0000 | [diff] [blame] | 317 | { kX86LockCmpxchg8bM, kMem, IS_STORE | IS_BINARY_OP | REG_USE0 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0 }, "Lock Cmpxchg8b", "[!0r+!1d]" }, |
| 318 | { kX86LockCmpxchg8bA, kArray, IS_STORE | IS_QUAD_OP | REG_USE01 | REG_DEFAD_USEAD | REG_USEC | REG_USEB | SETS_CCODES, { 0xF0, 0, 0x0F, 0xC7, 0, 1, 0, 0 }, "Lock Cmpxchg8b", "[!0r+!1r<<!2d+!3d]" }, |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 319 | |
| 320 | EXT_0F_ENCODING_MAP(Movzx8, 0x00, 0xB6, REG_DEF0), |
| 321 | EXT_0F_ENCODING_MAP(Movzx16, 0x00, 0xB7, REG_DEF0), |
| 322 | EXT_0F_ENCODING_MAP(Movsx8, 0x00, 0xBE, REG_DEF0), |
| 323 | EXT_0F_ENCODING_MAP(Movsx16, 0x00, 0xBF, REG_DEF0), |
| 324 | #undef EXT_0F_ENCODING_MAP |
| 325 | |
| 326 | { kX86Jcc8, kJcc, IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0, 0, 0x70, 0, 0, 0, 0, 0 }, "Jcc8", "!1c !0t" }, |
| 327 | { kX86Jcc32, kJcc, IS_BINARY_OP | IS_BRANCH | NEEDS_FIXUP | USES_CCODES, { 0, 0, 0x0F, 0x80, 0, 0, 0, 0 }, "Jcc32", "!1c !0t" }, |
| 328 | { kX86Jmp8, kJmp, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP, { 0, 0, 0xEB, 0, 0, 0, 0, 0 }, "Jmp8", "!0t" }, |
| 329 | { kX86Jmp32, kJmp, IS_UNARY_OP | IS_BRANCH | NEEDS_FIXUP, { 0, 0, 0xE9, 0, 0, 0, 0, 0 }, "Jmp32", "!0t" }, |
| 330 | { kX86JmpR, kJmp, IS_UNARY_OP | IS_BRANCH | REG_USE0, { 0, 0, 0xFF, 0, 0, 4, 0, 0 }, "JmpR", "!0r" }, |
| 331 | { kX86CallR, kCall, IS_UNARY_OP | IS_BRANCH | REG_USE0, { 0, 0, 0xE8, 0, 0, 0, 0, 0 }, "CallR", "!0r" }, |
| 332 | { kX86CallM, kCall, IS_BINARY_OP | IS_BRANCH | IS_LOAD | REG_USE0, { 0, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallM", "[!0r+!1d]" }, |
| 333 | { kX86CallA, kCall, IS_QUAD_OP | IS_BRANCH | IS_LOAD | REG_USE01, { 0, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallA", "[!0r+!1r<<!2d+!3d]" }, |
| 334 | { kX86CallT, kCall, IS_UNARY_OP | IS_BRANCH | IS_LOAD, { THREAD_PREFIX, 0, 0xFF, 0, 0, 2, 0, 0 }, "CallT", "fs:[!0d]" }, |
Brian Carlstrom | b1eba21 | 2013-07-17 18:07:19 -0700 | [diff] [blame] | 335 | { kX86Ret, kNullary, NO_OPERAND | IS_BRANCH, { 0, 0, 0xC3, 0, 0, 0, 0, 0 }, "Ret", "" }, |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 336 | |
| 337 | { kX86StartOfMethod, kMacro, IS_UNARY_OP | SETS_CCODES, { 0, 0, 0, 0, 0, 0, 0, 0 }, "StartOfMethod", "!0r" }, |
| 338 | { kX86PcRelLoadRA, kPcRel, IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0 }, "PcRelLoadRA", "!0r,[!1r+!2r<<!3d+!4p]" }, |
| 339 | { kX86PcRelAdr, kPcRel, IS_LOAD | IS_BINARY_OP | REG_DEF0, { 0, 0, 0xB8, 0, 0, 0, 0, 4 }, "PcRelAdr", "!0r,!1d" }, |
| 340 | }; |
| 341 | |
| 342 | static size_t ComputeSize(const X86EncodingMap* entry, int base, int displacement, bool has_sib) { |
| 343 | size_t size = 0; |
| 344 | if (entry->skeleton.prefix1 > 0) { |
| 345 | ++size; |
| 346 | if (entry->skeleton.prefix2 > 0) { |
| 347 | ++size; |
| 348 | } |
| 349 | } |
| 350 | ++size; // opcode |
| 351 | if (entry->skeleton.opcode == 0x0F) { |
| 352 | ++size; |
| 353 | if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) { |
| 354 | ++size; |
| 355 | } |
| 356 | } |
| 357 | ++size; // modrm |
| 358 | if (has_sib || base == rX86_SP) { |
| 359 | // SP requires a SIB byte. |
| 360 | ++size; |
| 361 | } |
| 362 | if (displacement != 0 || base == rBP) { |
| 363 | // BP requires an explicit displacement, even when it's 0. |
| 364 | if (entry->opcode != kX86Lea32RA) { |
| 365 | DCHECK_NE(entry->flags & (IS_LOAD | IS_STORE), 0ULL) << entry->name; |
| 366 | } |
| 367 | size += IS_SIMM8(displacement) ? 1 : 4; |
| 368 | } |
| 369 | size += entry->skeleton.immediate_bytes; |
| 370 | return size; |
| 371 | } |
| 372 | |
| 373 | int X86Mir2Lir::GetInsnSize(LIR* lir) { |
buzbee | 409fe94 | 2013-10-11 10:49:56 -0700 | [diff] [blame] | 374 | DCHECK(!IsPseudoLirOp(lir->opcode)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 375 | const X86EncodingMap* entry = &X86Mir2Lir::EncodingMap[lir->opcode]; |
| 376 | switch (entry->kind) { |
| 377 | case kData: |
| 378 | return 4; // 4 bytes of data |
| 379 | case kNop: |
| 380 | return lir->operands[0]; // length of nop is sole operand |
| 381 | case kNullary: |
| 382 | return 1; // 1 byte of opcode |
Vladimir Marko | a8b4caf | 2013-10-24 15:08:57 +0100 | [diff] [blame] | 383 | case kRegOpcode: // lir operands - 0: reg |
| 384 | return ComputeSize(entry, 0, 0, false) - 1; // substract 1 for modrm |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 385 | case kReg: // lir operands - 0: reg |
| 386 | return ComputeSize(entry, 0, 0, false); |
| 387 | case kMem: // lir operands - 0: base, 1: disp |
| 388 | return ComputeSize(entry, lir->operands[0], lir->operands[1], false); |
| 389 | case kArray: // lir operands - 0: base, 1: index, 2: scale, 3: disp |
| 390 | return ComputeSize(entry, lir->operands[0], lir->operands[3], true); |
| 391 | case kMemReg: // lir operands - 0: base, 1: disp, 2: reg |
| 392 | return ComputeSize(entry, lir->operands[0], lir->operands[1], false); |
| 393 | case kArrayReg: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg |
| 394 | return ComputeSize(entry, lir->operands[0], lir->operands[3], true); |
| 395 | case kThreadReg: // lir operands - 0: disp, 1: reg |
| 396 | return ComputeSize(entry, 0, lir->operands[0], false); |
| 397 | case kRegReg: |
| 398 | return ComputeSize(entry, 0, 0, false); |
| 399 | case kRegRegStore: |
| 400 | return ComputeSize(entry, 0, 0, false); |
| 401 | case kRegMem: // lir operands - 0: reg, 1: base, 2: disp |
| 402 | return ComputeSize(entry, lir->operands[1], lir->operands[2], false); |
| 403 | case kRegArray: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp |
| 404 | return ComputeSize(entry, lir->operands[1], lir->operands[4], true); |
| 405 | case kRegThread: // lir operands - 0: reg, 1: disp |
| 406 | return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit |
| 407 | case kRegImm: { // lir operands - 0: reg, 1: immediate |
| 408 | size_t size = ComputeSize(entry, 0, 0, false); |
| 409 | if (entry->skeleton.ax_opcode == 0) { |
| 410 | return size; |
| 411 | } else { |
| 412 | // AX opcodes don't require the modrm byte. |
| 413 | int reg = lir->operands[0]; |
| 414 | return size - (reg == rAX ? 1 : 0); |
| 415 | } |
| 416 | } |
| 417 | case kMemImm: // lir operands - 0: base, 1: disp, 2: immediate |
| 418 | return ComputeSize(entry, lir->operands[0], lir->operands[1], false); |
| 419 | case kArrayImm: // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate |
| 420 | return ComputeSize(entry, lir->operands[0], lir->operands[3], true); |
| 421 | case kThreadImm: // lir operands - 0: disp, 1: imm |
| 422 | return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit |
| 423 | case kRegRegImm: // lir operands - 0: reg, 1: reg, 2: imm |
| 424 | return ComputeSize(entry, 0, 0, false); |
| 425 | case kRegMemImm: // lir operands - 0: reg, 1: base, 2: disp, 3: imm |
| 426 | return ComputeSize(entry, lir->operands[1], lir->operands[2], false); |
| 427 | case kRegArrayImm: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp, 5: imm |
| 428 | return ComputeSize(entry, lir->operands[1], lir->operands[4], true); |
| 429 | case kMovRegImm: // lir operands - 0: reg, 1: immediate |
| 430 | return 1 + entry->skeleton.immediate_bytes; |
| 431 | case kShiftRegImm: // lir operands - 0: reg, 1: immediate |
| 432 | // Shift by immediate one has a shorter opcode. |
| 433 | return ComputeSize(entry, 0, 0, false) - (lir->operands[1] == 1 ? 1 : 0); |
| 434 | case kShiftMemImm: // lir operands - 0: base, 1: disp, 2: immediate |
| 435 | // Shift by immediate one has a shorter opcode. |
| 436 | return ComputeSize(entry, lir->operands[0], lir->operands[1], false) - |
| 437 | (lir->operands[2] == 1 ? 1 : 0); |
| 438 | case kShiftArrayImm: // lir operands - 0: base, 1: index, 2: scale, 3: disp 4: immediate |
| 439 | // Shift by immediate one has a shorter opcode. |
| 440 | return ComputeSize(entry, lir->operands[0], lir->operands[3], true) - |
| 441 | (lir->operands[4] == 1 ? 1 : 0); |
| 442 | case kShiftRegCl: |
| 443 | return ComputeSize(entry, 0, 0, false); |
| 444 | case kShiftMemCl: // lir operands - 0: base, 1: disp, 2: cl |
| 445 | return ComputeSize(entry, lir->operands[0], lir->operands[1], false); |
| 446 | case kShiftArrayCl: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg |
| 447 | return ComputeSize(entry, lir->operands[0], lir->operands[3], true); |
| 448 | case kRegCond: // lir operands - 0: reg, 1: cond |
| 449 | return ComputeSize(entry, 0, 0, false); |
| 450 | case kMemCond: // lir operands - 0: base, 1: disp, 2: cond |
| 451 | return ComputeSize(entry, lir->operands[0], lir->operands[1], false); |
| 452 | case kArrayCond: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: cond |
| 453 | return ComputeSize(entry, lir->operands[0], lir->operands[3], true); |
Razvan A Lupusoru | bd288c2 | 2013-12-20 17:27:23 -0800 | [diff] [blame^] | 454 | case kRegRegCond: // lir operands - 0: reg, 1: reg, 2: cond |
| 455 | return ComputeSize(entry, 0, 0, false); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 456 | case kJcc: |
| 457 | if (lir->opcode == kX86Jcc8) { |
| 458 | return 2; // opcode + rel8 |
| 459 | } else { |
| 460 | DCHECK(lir->opcode == kX86Jcc32); |
| 461 | return 6; // 2 byte opcode + rel32 |
| 462 | } |
| 463 | case kJmp: |
| 464 | if (lir->opcode == kX86Jmp8) { |
| 465 | return 2; // opcode + rel8 |
| 466 | } else if (lir->opcode == kX86Jmp32) { |
| 467 | return 5; // opcode + rel32 |
| 468 | } else { |
| 469 | DCHECK(lir->opcode == kX86JmpR); |
| 470 | return 2; // opcode + modrm |
| 471 | } |
| 472 | case kCall: |
| 473 | switch (lir->opcode) { |
| 474 | case kX86CallR: return 2; // opcode modrm |
| 475 | case kX86CallM: // lir operands - 0: base, 1: disp |
| 476 | return ComputeSize(entry, lir->operands[0], lir->operands[1], false); |
| 477 | case kX86CallA: // lir operands - 0: base, 1: index, 2: scale, 3: disp |
| 478 | return ComputeSize(entry, lir->operands[0], lir->operands[3], true); |
| 479 | case kX86CallT: // lir operands - 0: disp |
| 480 | return ComputeSize(entry, 0, 0x12345678, false); // displacement size is always 32bit |
| 481 | default: |
| 482 | break; |
| 483 | } |
| 484 | break; |
| 485 | case kPcRel: |
| 486 | if (entry->opcode == kX86PcRelLoadRA) { |
| 487 | // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table |
| 488 | return ComputeSize(entry, lir->operands[1], 0x12345678, true); |
| 489 | } else { |
| 490 | DCHECK(entry->opcode == kX86PcRelAdr); |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 491 | return 5; // opcode with reg + 4 byte immediate |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 492 | } |
| 493 | case kMacro: |
| 494 | DCHECK_EQ(lir->opcode, static_cast<int>(kX86StartOfMethod)); |
| 495 | return 5 /* call opcode + 4 byte displacement */ + 1 /* pop reg */ + |
| 496 | ComputeSize(&X86Mir2Lir::EncodingMap[kX86Sub32RI], 0, 0, false) - |
| 497 | (lir->operands[0] == rAX ? 1 : 0); // shorter ax encoding |
| 498 | default: |
| 499 | break; |
| 500 | } |
| 501 | UNIMPLEMENTED(FATAL) << "Unimplemented size encoding for: " << entry->name; |
| 502 | return 0; |
| 503 | } |
| 504 | |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 505 | void X86Mir2Lir::EmitPrefix(const X86EncodingMap* entry) { |
| 506 | if (entry->skeleton.prefix1 != 0) { |
| 507 | code_buffer_.push_back(entry->skeleton.prefix1); |
| 508 | if (entry->skeleton.prefix2 != 0) { |
| 509 | code_buffer_.push_back(entry->skeleton.prefix2); |
| 510 | } |
| 511 | } else { |
| 512 | DCHECK_EQ(0, entry->skeleton.prefix2); |
| 513 | } |
| 514 | } |
| 515 | |
| 516 | void X86Mir2Lir::EmitOpcode(const X86EncodingMap* entry) { |
| 517 | code_buffer_.push_back(entry->skeleton.opcode); |
| 518 | if (entry->skeleton.opcode == 0x0F) { |
| 519 | code_buffer_.push_back(entry->skeleton.extra_opcode1); |
| 520 | if (entry->skeleton.extra_opcode1 == 0x38 || entry->skeleton.extra_opcode1 == 0x3A) { |
| 521 | code_buffer_.push_back(entry->skeleton.extra_opcode2); |
| 522 | } else { |
| 523 | DCHECK_EQ(0, entry->skeleton.extra_opcode2); |
| 524 | } |
| 525 | } else { |
| 526 | DCHECK_EQ(0, entry->skeleton.extra_opcode1); |
| 527 | DCHECK_EQ(0, entry->skeleton.extra_opcode2); |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | void X86Mir2Lir::EmitPrefixAndOpcode(const X86EncodingMap* entry) { |
| 532 | EmitPrefix(entry); |
| 533 | EmitOpcode(entry); |
| 534 | } |
| 535 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 536 | static uint8_t ModrmForDisp(int base, int disp) { |
| 537 | // BP requires an explicit disp, so do not omit it in the 0 case |
| 538 | if (disp == 0 && base != rBP) { |
| 539 | return 0; |
| 540 | } else if (IS_SIMM8(disp)) { |
| 541 | return 1; |
| 542 | } else { |
| 543 | return 2; |
| 544 | } |
| 545 | } |
| 546 | |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 547 | void X86Mir2Lir::EmitDisp(uint8_t base, int disp) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 548 | // BP requires an explicit disp, so do not omit it in the 0 case |
| 549 | if (disp == 0 && base != rBP) { |
| 550 | return; |
| 551 | } else if (IS_SIMM8(disp)) { |
| 552 | code_buffer_.push_back(disp & 0xFF); |
| 553 | } else { |
| 554 | code_buffer_.push_back(disp & 0xFF); |
| 555 | code_buffer_.push_back((disp >> 8) & 0xFF); |
| 556 | code_buffer_.push_back((disp >> 16) & 0xFF); |
| 557 | code_buffer_.push_back((disp >> 24) & 0xFF); |
| 558 | } |
| 559 | } |
| 560 | |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 561 | void X86Mir2Lir::EmitModrmDisp(uint8_t reg_or_opcode, uint8_t base, int disp) { |
| 562 | DCHECK_LT(reg_or_opcode, 8); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 563 | DCHECK_LT(base, 8); |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 564 | uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | base; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 565 | code_buffer_.push_back(modrm); |
| 566 | if (base == rX86_SP) { |
| 567 | // Special SIB for SP base |
| 568 | code_buffer_.push_back(0 << 6 | (rX86_SP << 3) | rX86_SP); |
| 569 | } |
| 570 | EmitDisp(base, disp); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 571 | } |
| 572 | |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 573 | void X86Mir2Lir::EmitModrmSibDisp(uint8_t reg_or_opcode, uint8_t base, uint8_t index, |
| 574 | int scale, int disp) { |
| 575 | DCHECK_LT(reg_or_opcode, 8); |
| 576 | uint8_t modrm = (ModrmForDisp(base, disp) << 6) | (reg_or_opcode << 3) | rX86_SP; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 577 | code_buffer_.push_back(modrm); |
| 578 | DCHECK_LT(scale, 4); |
| 579 | DCHECK_LT(index, 8); |
| 580 | DCHECK_LT(base, 8); |
| 581 | uint8_t sib = (scale << 6) | (index << 3) | base; |
| 582 | code_buffer_.push_back(sib); |
| 583 | EmitDisp(base, disp); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 584 | } |
| 585 | |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 586 | void X86Mir2Lir::EmitImm(const X86EncodingMap* entry, int imm) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 587 | switch (entry->skeleton.immediate_bytes) { |
| 588 | case 1: |
| 589 | DCHECK(IS_SIMM8(imm)); |
| 590 | code_buffer_.push_back(imm & 0xFF); |
| 591 | break; |
| 592 | case 2: |
| 593 | DCHECK(IS_SIMM16(imm)); |
| 594 | code_buffer_.push_back(imm & 0xFF); |
| 595 | code_buffer_.push_back((imm >> 8) & 0xFF); |
| 596 | break; |
| 597 | case 4: |
| 598 | code_buffer_.push_back(imm & 0xFF); |
| 599 | code_buffer_.push_back((imm >> 8) & 0xFF); |
| 600 | code_buffer_.push_back((imm >> 16) & 0xFF); |
| 601 | code_buffer_.push_back((imm >> 24) & 0xFF); |
| 602 | break; |
| 603 | default: |
| 604 | LOG(FATAL) << "Unexpected immediate bytes (" << entry->skeleton.immediate_bytes |
| 605 | << ") for instruction: " << entry->name; |
| 606 | break; |
| 607 | } |
| 608 | } |
| 609 | |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 610 | void X86Mir2Lir::EmitOpRegOpcode(const X86EncodingMap* entry, uint8_t reg) { |
| 611 | EmitPrefixAndOpcode(entry); |
| 612 | // There's no 3-byte instruction with +rd |
| 613 | DCHECK(entry->skeleton.opcode != 0x0F || |
| 614 | (entry->skeleton.extra_opcode1 != 0x38 && entry->skeleton.extra_opcode1 != 0x3A)); |
| 615 | DCHECK(!X86_FPREG(reg)); |
| 616 | DCHECK_LT(reg, 8); |
| 617 | code_buffer_.back() += reg; |
| 618 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 619 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 620 | } |
| 621 | |
| 622 | void X86Mir2Lir::EmitOpReg(const X86EncodingMap* entry, uint8_t reg) { |
| 623 | EmitPrefixAndOpcode(entry); |
| 624 | if (X86_FPREG(reg)) { |
| 625 | reg = reg & X86_FP_REG_MASK; |
| 626 | } |
| 627 | if (reg >= 4) { |
| 628 | DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg) |
| 629 | << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 630 | } |
| 631 | DCHECK_LT(reg, 8); |
| 632 | uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; |
| 633 | code_buffer_.push_back(modrm); |
| 634 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 635 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 636 | } |
| 637 | |
| 638 | void X86Mir2Lir::EmitOpMem(const X86EncodingMap* entry, uint8_t base, int disp) { |
| 639 | EmitPrefix(entry); |
| 640 | code_buffer_.push_back(entry->skeleton.opcode); |
| 641 | DCHECK_NE(0x0F, entry->skeleton.opcode); |
| 642 | DCHECK_EQ(0, entry->skeleton.extra_opcode1); |
| 643 | DCHECK_EQ(0, entry->skeleton.extra_opcode2); |
| 644 | DCHECK_NE(rX86_SP, base); |
| 645 | EmitModrmDisp(entry->skeleton.modrm_opcode, base, disp); |
| 646 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 647 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 648 | } |
| 649 | |
| 650 | void X86Mir2Lir::EmitOpArray(const X86EncodingMap* entry, uint8_t base, uint8_t index, |
| 651 | int scale, int disp) { |
| 652 | EmitPrefixAndOpcode(entry); |
| 653 | EmitModrmSibDisp(entry->skeleton.modrm_opcode, base, index, scale, disp); |
| 654 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 655 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 656 | } |
| 657 | |
| 658 | void X86Mir2Lir::EmitMemReg(const X86EncodingMap* entry, |
| 659 | uint8_t base, int disp, uint8_t reg) { |
| 660 | EmitPrefixAndOpcode(entry); |
| 661 | if (X86_FPREG(reg)) { |
| 662 | reg = reg & X86_FP_REG_MASK; |
| 663 | } |
| 664 | if (reg >= 4) { |
| 665 | DCHECK(strchr(entry->name, '8') == NULL || |
| 666 | entry->opcode == kX86Movzx8RM || entry->opcode == kX86Movsx8RM) |
| 667 | << entry->name << " " << static_cast<int>(reg) |
| 668 | << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 669 | } |
| 670 | EmitModrmDisp(reg, base, disp); |
| 671 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 672 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 673 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 674 | } |
| 675 | |
| 676 | void X86Mir2Lir::EmitRegMem(const X86EncodingMap* entry, |
| 677 | uint8_t reg, uint8_t base, int disp) { |
| 678 | // Opcode will flip operands. |
| 679 | EmitMemReg(entry, base, disp, reg); |
| 680 | } |
| 681 | |
| 682 | void X86Mir2Lir::EmitRegArray(const X86EncodingMap* entry, uint8_t reg, uint8_t base, uint8_t index, |
| 683 | int scale, int disp) { |
| 684 | EmitPrefixAndOpcode(entry); |
| 685 | if (X86_FPREG(reg)) { |
| 686 | reg = reg & X86_FP_REG_MASK; |
| 687 | } |
| 688 | EmitModrmSibDisp(reg, base, index, scale, disp); |
| 689 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 690 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 691 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 692 | } |
| 693 | |
| 694 | void X86Mir2Lir::EmitArrayReg(const X86EncodingMap* entry, uint8_t base, uint8_t index, int scale, int disp, |
| 695 | uint8_t reg) { |
| 696 | // Opcode will flip operands. |
| 697 | EmitRegArray(entry, reg, base, index, scale, disp); |
| 698 | } |
| 699 | |
| 700 | void X86Mir2Lir::EmitRegThread(const X86EncodingMap* entry, uint8_t reg, int disp) { |
| 701 | DCHECK_NE(entry->skeleton.prefix1, 0); |
| 702 | EmitPrefixAndOpcode(entry); |
| 703 | if (X86_FPREG(reg)) { |
| 704 | reg = reg & X86_FP_REG_MASK; |
| 705 | } |
| 706 | if (reg >= 4) { |
| 707 | DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg) |
| 708 | << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 709 | } |
| 710 | DCHECK_LT(reg, 8); |
| 711 | uint8_t modrm = (0 << 6) | (reg << 3) | rBP; |
| 712 | code_buffer_.push_back(modrm); |
| 713 | code_buffer_.push_back(disp & 0xFF); |
| 714 | code_buffer_.push_back((disp >> 8) & 0xFF); |
| 715 | code_buffer_.push_back((disp >> 16) & 0xFF); |
| 716 | code_buffer_.push_back((disp >> 24) & 0xFF); |
| 717 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 718 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 719 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 720 | } |
| 721 | |
| 722 | void X86Mir2Lir::EmitRegReg(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2) { |
| 723 | EmitPrefixAndOpcode(entry); |
| 724 | if (X86_FPREG(reg1)) { |
| 725 | reg1 = reg1 & X86_FP_REG_MASK; |
| 726 | } |
| 727 | if (X86_FPREG(reg2)) { |
| 728 | reg2 = reg2 & X86_FP_REG_MASK; |
| 729 | } |
| 730 | DCHECK_LT(reg1, 8); |
| 731 | DCHECK_LT(reg2, 8); |
| 732 | uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2; |
| 733 | code_buffer_.push_back(modrm); |
| 734 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 735 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 736 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 737 | } |
| 738 | |
| 739 | void X86Mir2Lir::EmitRegRegImm(const X86EncodingMap* entry, |
| 740 | uint8_t reg1, uint8_t reg2, int32_t imm) { |
| 741 | EmitPrefixAndOpcode(entry); |
| 742 | if (X86_FPREG(reg1)) { |
| 743 | reg1 = reg1 & X86_FP_REG_MASK; |
| 744 | } |
| 745 | if (X86_FPREG(reg2)) { |
| 746 | reg2 = reg2 & X86_FP_REG_MASK; |
| 747 | } |
| 748 | DCHECK_LT(reg1, 8); |
| 749 | DCHECK_LT(reg2, 8); |
| 750 | uint8_t modrm = (3 << 6) | (reg1 << 3) | reg2; |
| 751 | code_buffer_.push_back(modrm); |
| 752 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 753 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 754 | EmitImm(entry, imm); |
| 755 | } |
| 756 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 757 | void X86Mir2Lir::EmitRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) { |
| 758 | if (entry->skeleton.prefix1 != 0) { |
| 759 | code_buffer_.push_back(entry->skeleton.prefix1); |
| 760 | if (entry->skeleton.prefix2 != 0) { |
| 761 | code_buffer_.push_back(entry->skeleton.prefix2); |
| 762 | } |
| 763 | } else { |
| 764 | DCHECK_EQ(0, entry->skeleton.prefix2); |
| 765 | } |
| 766 | if (reg == rAX && entry->skeleton.ax_opcode != 0) { |
| 767 | code_buffer_.push_back(entry->skeleton.ax_opcode); |
| 768 | } else { |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 769 | EmitOpcode(entry); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 770 | if (X86_FPREG(reg)) { |
| 771 | reg = reg & X86_FP_REG_MASK; |
| 772 | } |
| 773 | uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; |
| 774 | code_buffer_.push_back(modrm); |
| 775 | } |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 776 | EmitImm(entry, imm); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 777 | } |
| 778 | |
Mark Mendell | 343adb5 | 2013-12-18 06:02:17 -0800 | [diff] [blame] | 779 | void X86Mir2Lir::EmitMemImm(const X86EncodingMap* entry, uint8_t base, int disp, int32_t imm) { |
| 780 | EmitPrefixAndOpcode(entry); |
| 781 | EmitModrmDisp(entry->skeleton.modrm_opcode, base, disp); |
| 782 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 783 | EmitImm(entry, imm); |
| 784 | } |
| 785 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 786 | void X86Mir2Lir::EmitThreadImm(const X86EncodingMap* entry, int disp, int imm) { |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 787 | EmitPrefixAndOpcode(entry); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 788 | uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP; |
| 789 | code_buffer_.push_back(modrm); |
| 790 | code_buffer_.push_back(disp & 0xFF); |
| 791 | code_buffer_.push_back((disp >> 8) & 0xFF); |
| 792 | code_buffer_.push_back((disp >> 16) & 0xFF); |
| 793 | code_buffer_.push_back((disp >> 24) & 0xFF); |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 794 | EmitImm(entry, imm); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 795 | DCHECK_EQ(entry->skeleton.ax_opcode, 0); |
| 796 | } |
| 797 | |
| 798 | void X86Mir2Lir::EmitMovRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) { |
| 799 | DCHECK_LT(reg, 8); |
| 800 | code_buffer_.push_back(0xB8 + reg); |
| 801 | code_buffer_.push_back(imm & 0xFF); |
| 802 | code_buffer_.push_back((imm >> 8) & 0xFF); |
| 803 | code_buffer_.push_back((imm >> 16) & 0xFF); |
| 804 | code_buffer_.push_back((imm >> 24) & 0xFF); |
| 805 | } |
| 806 | |
| 807 | void X86Mir2Lir::EmitShiftRegImm(const X86EncodingMap* entry, uint8_t reg, int imm) { |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 808 | EmitPrefix(entry); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 809 | if (imm != 1) { |
| 810 | code_buffer_.push_back(entry->skeleton.opcode); |
| 811 | } else { |
| 812 | // Shorter encoding for 1 bit shift |
| 813 | code_buffer_.push_back(entry->skeleton.ax_opcode); |
| 814 | } |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 815 | DCHECK_NE(0x0F, entry->skeleton.opcode); |
| 816 | DCHECK_EQ(0, entry->skeleton.extra_opcode1); |
| 817 | DCHECK_EQ(0, entry->skeleton.extra_opcode2); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 818 | if (reg >= 4) { |
| 819 | DCHECK(strchr(entry->name, '8') == NULL) << entry->name << " " << static_cast<int>(reg) |
| 820 | << " in " << PrettyMethod(cu_->method_idx, *cu_->dex_file); |
| 821 | } |
| 822 | DCHECK_LT(reg, 8); |
| 823 | uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; |
| 824 | code_buffer_.push_back(modrm); |
| 825 | if (imm != 1) { |
| 826 | DCHECK_EQ(entry->skeleton.immediate_bytes, 1); |
| 827 | DCHECK(IS_SIMM8(imm)); |
| 828 | code_buffer_.push_back(imm & 0xFF); |
| 829 | } |
| 830 | } |
| 831 | |
| 832 | void X86Mir2Lir::EmitShiftRegCl(const X86EncodingMap* entry, uint8_t reg, uint8_t cl) { |
| 833 | DCHECK_EQ(cl, static_cast<uint8_t>(rCX)); |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 834 | EmitPrefix(entry); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 835 | code_buffer_.push_back(entry->skeleton.opcode); |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 836 | DCHECK_NE(0x0F, entry->skeleton.opcode); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 837 | DCHECK_EQ(0, entry->skeleton.extra_opcode1); |
| 838 | DCHECK_EQ(0, entry->skeleton.extra_opcode2); |
| 839 | DCHECK_LT(reg, 8); |
| 840 | uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; |
| 841 | code_buffer_.push_back(modrm); |
| 842 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 843 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 844 | } |
| 845 | |
| 846 | void X86Mir2Lir::EmitRegCond(const X86EncodingMap* entry, uint8_t reg, uint8_t condition) { |
| 847 | if (entry->skeleton.prefix1 != 0) { |
| 848 | code_buffer_.push_back(entry->skeleton.prefix1); |
| 849 | if (entry->skeleton.prefix2 != 0) { |
| 850 | code_buffer_.push_back(entry->skeleton.prefix2); |
| 851 | } |
| 852 | } else { |
| 853 | DCHECK_EQ(0, entry->skeleton.prefix2); |
| 854 | } |
| 855 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 856 | DCHECK_EQ(0x0F, entry->skeleton.opcode); |
| 857 | code_buffer_.push_back(0x0F); |
| 858 | DCHECK_EQ(0x90, entry->skeleton.extra_opcode1); |
| 859 | code_buffer_.push_back(0x90 | condition); |
| 860 | DCHECK_EQ(0, entry->skeleton.extra_opcode2); |
| 861 | DCHECK_LT(reg, 8); |
| 862 | uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; |
| 863 | code_buffer_.push_back(modrm); |
| 864 | DCHECK_EQ(entry->skeleton.immediate_bytes, 0); |
| 865 | } |
| 866 | |
Razvan A Lupusoru | bd288c2 | 2013-12-20 17:27:23 -0800 | [diff] [blame^] | 867 | void X86Mir2Lir::EmitRegRegCond(const X86EncodingMap* entry, uint8_t reg1, uint8_t reg2, uint8_t condition) { |
| 868 | // Generate prefix and opcode without the condition |
| 869 | EmitPrefixAndOpcode(entry); |
| 870 | |
| 871 | // Now add the condition. The last byte of opcode is the one that receives it. |
| 872 | DCHECK_LE(condition, 0xF); |
| 873 | code_buffer_.back() += condition; |
| 874 | |
| 875 | // Not expecting to have to encode immediate or do anything special for ModR/M since there are two registers. |
| 876 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 877 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 878 | |
| 879 | // Check that registers requested for encoding are sane. |
| 880 | DCHECK_LT(reg1, 8); |
| 881 | DCHECK_LT(reg2, 8); |
| 882 | |
| 883 | // For register to register encoding, the mod is 3. |
| 884 | const uint8_t mod = (3 << 6); |
| 885 | |
| 886 | // Encode the ModR/M byte now. |
| 887 | const uint8_t modrm = mod | (reg1 << 3) | reg2; |
| 888 | code_buffer_.push_back(modrm); |
| 889 | } |
| 890 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 891 | void X86Mir2Lir::EmitJmp(const X86EncodingMap* entry, int rel) { |
| 892 | if (entry->opcode == kX86Jmp8) { |
| 893 | DCHECK(IS_SIMM8(rel)); |
| 894 | code_buffer_.push_back(0xEB); |
| 895 | code_buffer_.push_back(rel & 0xFF); |
| 896 | } else if (entry->opcode == kX86Jmp32) { |
| 897 | code_buffer_.push_back(0xE9); |
| 898 | code_buffer_.push_back(rel & 0xFF); |
| 899 | code_buffer_.push_back((rel >> 8) & 0xFF); |
| 900 | code_buffer_.push_back((rel >> 16) & 0xFF); |
| 901 | code_buffer_.push_back((rel >> 24) & 0xFF); |
| 902 | } else { |
| 903 | DCHECK(entry->opcode == kX86JmpR); |
| 904 | code_buffer_.push_back(entry->skeleton.opcode); |
| 905 | uint8_t reg = static_cast<uint8_t>(rel); |
| 906 | DCHECK_LT(reg, 8); |
| 907 | uint8_t modrm = (3 << 6) | (entry->skeleton.modrm_opcode << 3) | reg; |
| 908 | code_buffer_.push_back(modrm); |
| 909 | } |
| 910 | } |
| 911 | |
| 912 | void X86Mir2Lir::EmitJcc(const X86EncodingMap* entry, int rel, uint8_t cc) { |
| 913 | DCHECK_LT(cc, 16); |
| 914 | if (entry->opcode == kX86Jcc8) { |
| 915 | DCHECK(IS_SIMM8(rel)); |
| 916 | code_buffer_.push_back(0x70 | cc); |
| 917 | code_buffer_.push_back(rel & 0xFF); |
| 918 | } else { |
| 919 | DCHECK(entry->opcode == kX86Jcc32); |
| 920 | code_buffer_.push_back(0x0F); |
| 921 | code_buffer_.push_back(0x80 | cc); |
| 922 | code_buffer_.push_back(rel & 0xFF); |
| 923 | code_buffer_.push_back((rel >> 8) & 0xFF); |
| 924 | code_buffer_.push_back((rel >> 16) & 0xFF); |
| 925 | code_buffer_.push_back((rel >> 24) & 0xFF); |
| 926 | } |
| 927 | } |
| 928 | |
| 929 | void X86Mir2Lir::EmitCallMem(const X86EncodingMap* entry, uint8_t base, int disp) { |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 930 | EmitPrefixAndOpcode(entry); |
| 931 | EmitModrmDisp(entry->skeleton.modrm_opcode, base, disp); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 932 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 933 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 934 | } |
| 935 | |
| 936 | void X86Mir2Lir::EmitCallThread(const X86EncodingMap* entry, int disp) { |
| 937 | DCHECK_NE(entry->skeleton.prefix1, 0); |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 938 | EmitPrefixAndOpcode(entry); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 939 | uint8_t modrm = (0 << 6) | (entry->skeleton.modrm_opcode << 3) | rBP; |
| 940 | code_buffer_.push_back(modrm); |
| 941 | code_buffer_.push_back(disp & 0xFF); |
| 942 | code_buffer_.push_back((disp >> 8) & 0xFF); |
| 943 | code_buffer_.push_back((disp >> 16) & 0xFF); |
| 944 | code_buffer_.push_back((disp >> 24) & 0xFF); |
| 945 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 946 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 947 | } |
| 948 | |
| 949 | void X86Mir2Lir::EmitPcRel(const X86EncodingMap* entry, uint8_t reg, |
| 950 | int base_or_table, uint8_t index, int scale, int table_or_disp) { |
| 951 | int disp; |
| 952 | if (entry->opcode == kX86PcRelLoadRA) { |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 953 | Mir2Lir::EmbeddedData *tab_rec = |
| 954 | reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(table_or_disp)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 955 | disp = tab_rec->offset; |
| 956 | } else { |
| 957 | DCHECK(entry->opcode == kX86PcRelAdr); |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 958 | Mir2Lir::EmbeddedData *tab_rec = |
| 959 | reinterpret_cast<Mir2Lir::EmbeddedData*>(UnwrapPointer(base_or_table)); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 960 | disp = tab_rec->offset; |
| 961 | } |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 962 | EmitPrefix(entry); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 963 | if (X86_FPREG(reg)) { |
| 964 | reg = reg & X86_FP_REG_MASK; |
| 965 | } |
| 966 | DCHECK_LT(reg, 8); |
| 967 | if (entry->opcode == kX86PcRelLoadRA) { |
| 968 | code_buffer_.push_back(entry->skeleton.opcode); |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 969 | DCHECK_NE(0x0F, entry->skeleton.opcode); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 970 | DCHECK_EQ(0, entry->skeleton.extra_opcode1); |
| 971 | DCHECK_EQ(0, entry->skeleton.extra_opcode2); |
| 972 | uint8_t modrm = (2 << 6) | (reg << 3) | rX86_SP; |
| 973 | code_buffer_.push_back(modrm); |
| 974 | DCHECK_LT(scale, 4); |
| 975 | DCHECK_LT(index, 8); |
| 976 | DCHECK_LT(base_or_table, 8); |
| 977 | uint8_t base = static_cast<uint8_t>(base_or_table); |
| 978 | uint8_t sib = (scale << 6) | (index << 3) | base; |
| 979 | code_buffer_.push_back(sib); |
| 980 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 981 | } else { |
| 982 | code_buffer_.push_back(entry->skeleton.opcode + reg); |
| 983 | } |
| 984 | code_buffer_.push_back(disp & 0xFF); |
| 985 | code_buffer_.push_back((disp >> 8) & 0xFF); |
| 986 | code_buffer_.push_back((disp >> 16) & 0xFF); |
| 987 | code_buffer_.push_back((disp >> 24) & 0xFF); |
| 988 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 989 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 990 | } |
| 991 | |
| 992 | void X86Mir2Lir::EmitMacro(const X86EncodingMap* entry, uint8_t reg, int offset) { |
| 993 | DCHECK(entry->opcode == kX86StartOfMethod) << entry->name; |
| 994 | code_buffer_.push_back(0xE8); // call +0 |
| 995 | code_buffer_.push_back(0); |
| 996 | code_buffer_.push_back(0); |
| 997 | code_buffer_.push_back(0); |
| 998 | code_buffer_.push_back(0); |
| 999 | |
| 1000 | DCHECK_LT(reg, 8); |
| 1001 | code_buffer_.push_back(0x58 + reg); // pop reg |
| 1002 | |
| 1003 | EmitRegImm(&X86Mir2Lir::EncodingMap[kX86Sub32RI], reg, offset + 5 /* size of call +0 */); |
| 1004 | } |
| 1005 | |
| 1006 | void X86Mir2Lir::EmitUnimplemented(const X86EncodingMap* entry, LIR* lir) { |
| 1007 | UNIMPLEMENTED(WARNING) << "encoding kind for " << entry->name << " " |
| 1008 | << BuildInsnString(entry->fmt, lir, 0); |
| 1009 | for (int i = 0; i < GetInsnSize(lir); ++i) { |
| 1010 | code_buffer_.push_back(0xCC); // push breakpoint instruction - int 3 |
| 1011 | } |
| 1012 | } |
| 1013 | |
| 1014 | /* |
| 1015 | * Assemble the LIR into binary instruction format. Note that we may |
| 1016 | * discover that pc-relative displacements may not fit the selected |
| 1017 | * instruction. In those cases we will try to substitute a new code |
| 1018 | * sequence or request that the trace be shortened and retried. |
| 1019 | */ |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 1020 | AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1021 | LIR *lir; |
| 1022 | AssemblerStatus res = kSuccess; // Assume success |
| 1023 | |
| 1024 | const bool kVerbosePcFixup = false; |
| 1025 | for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { |
buzbee | 409fe94 | 2013-10-11 10:49:56 -0700 | [diff] [blame] | 1026 | if (IsPseudoLirOp(lir->opcode)) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1027 | continue; |
| 1028 | } |
| 1029 | |
| 1030 | if (lir->flags.is_nop) { |
| 1031 | continue; |
| 1032 | } |
| 1033 | |
buzbee | b48819d | 2013-09-14 16:15:25 -0700 | [diff] [blame] | 1034 | if (lir->flags.fixup != kFixupNone) { |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1035 | switch (lir->opcode) { |
| 1036 | case kX86Jcc8: { |
| 1037 | LIR *target_lir = lir->target; |
| 1038 | DCHECK(target_lir != NULL); |
| 1039 | int delta = 0; |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 1040 | CodeOffset pc; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1041 | if (IS_SIMM8(lir->operands[0])) { |
| 1042 | pc = lir->offset + 2 /* opcode + rel8 */; |
| 1043 | } else { |
| 1044 | pc = lir->offset + 6 /* 2 byte opcode + rel32 */; |
| 1045 | } |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 1046 | CodeOffset target = target_lir->offset; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1047 | delta = target - pc; |
| 1048 | if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) { |
| 1049 | if (kVerbosePcFixup) { |
| 1050 | LOG(INFO) << "Retry for JCC growth at " << lir->offset |
| 1051 | << " delta: " << delta << " old delta: " << lir->operands[0]; |
| 1052 | } |
| 1053 | lir->opcode = kX86Jcc32; |
| 1054 | SetupResourceMasks(lir); |
| 1055 | res = kRetryAll; |
| 1056 | } |
| 1057 | if (kVerbosePcFixup) { |
| 1058 | LOG(INFO) << "Source:"; |
| 1059 | DumpLIRInsn(lir, 0); |
| 1060 | LOG(INFO) << "Target:"; |
| 1061 | DumpLIRInsn(target_lir, 0); |
| 1062 | LOG(INFO) << "Delta " << delta; |
| 1063 | } |
| 1064 | lir->operands[0] = delta; |
| 1065 | break; |
| 1066 | } |
| 1067 | case kX86Jcc32: { |
| 1068 | LIR *target_lir = lir->target; |
| 1069 | DCHECK(target_lir != NULL); |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 1070 | CodeOffset pc = lir->offset + 6 /* 2 byte opcode + rel32 */; |
| 1071 | CodeOffset target = target_lir->offset; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1072 | int delta = target - pc; |
| 1073 | if (kVerbosePcFixup) { |
| 1074 | LOG(INFO) << "Source:"; |
| 1075 | DumpLIRInsn(lir, 0); |
| 1076 | LOG(INFO) << "Target:"; |
| 1077 | DumpLIRInsn(target_lir, 0); |
| 1078 | LOG(INFO) << "Delta " << delta; |
| 1079 | } |
| 1080 | lir->operands[0] = delta; |
| 1081 | break; |
| 1082 | } |
| 1083 | case kX86Jmp8: { |
| 1084 | LIR *target_lir = lir->target; |
| 1085 | DCHECK(target_lir != NULL); |
| 1086 | int delta = 0; |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 1087 | CodeOffset pc; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1088 | if (IS_SIMM8(lir->operands[0])) { |
| 1089 | pc = lir->offset + 2 /* opcode + rel8 */; |
| 1090 | } else { |
| 1091 | pc = lir->offset + 5 /* opcode + rel32 */; |
| 1092 | } |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 1093 | CodeOffset target = target_lir->offset; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1094 | delta = target - pc; |
| 1095 | if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && delta == 0) { |
| 1096 | // Useless branch |
buzbee | 252254b | 2013-09-08 16:20:53 -0700 | [diff] [blame] | 1097 | NopLIR(lir); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1098 | if (kVerbosePcFixup) { |
| 1099 | LOG(INFO) << "Retry for useless branch at " << lir->offset; |
| 1100 | } |
| 1101 | res = kRetryAll; |
| 1102 | } else if (IS_SIMM8(delta) != IS_SIMM8(lir->operands[0])) { |
| 1103 | if (kVerbosePcFixup) { |
| 1104 | LOG(INFO) << "Retry for JMP growth at " << lir->offset; |
| 1105 | } |
| 1106 | lir->opcode = kX86Jmp32; |
| 1107 | SetupResourceMasks(lir); |
| 1108 | res = kRetryAll; |
| 1109 | } |
| 1110 | lir->operands[0] = delta; |
| 1111 | break; |
| 1112 | } |
| 1113 | case kX86Jmp32: { |
| 1114 | LIR *target_lir = lir->target; |
| 1115 | DCHECK(target_lir != NULL); |
buzbee | 0d82948 | 2013-10-11 15:24:55 -0700 | [diff] [blame] | 1116 | CodeOffset pc = lir->offset + 5 /* opcode + rel32 */; |
| 1117 | CodeOffset target = target_lir->offset; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1118 | int delta = target - pc; |
| 1119 | lir->operands[0] = delta; |
| 1120 | break; |
| 1121 | } |
| 1122 | default: |
| 1123 | break; |
| 1124 | } |
| 1125 | } |
| 1126 | |
| 1127 | /* |
| 1128 | * If one of the pc-relative instructions expanded we'll have |
| 1129 | * to make another pass. Don't bother to fully assemble the |
| 1130 | * instruction. |
| 1131 | */ |
| 1132 | if (res != kSuccess) { |
| 1133 | continue; |
| 1134 | } |
| 1135 | CHECK_EQ(static_cast<size_t>(lir->offset), code_buffer_.size()); |
| 1136 | const X86EncodingMap *entry = &X86Mir2Lir::EncodingMap[lir->opcode]; |
| 1137 | size_t starting_cbuf_size = code_buffer_.size(); |
| 1138 | switch (entry->kind) { |
| 1139 | case kData: // 4 bytes of data |
| 1140 | code_buffer_.push_back(lir->operands[0]); |
| 1141 | break; |
| 1142 | case kNullary: // 1 byte of opcode |
| 1143 | DCHECK_EQ(0, entry->skeleton.prefix1); |
| 1144 | DCHECK_EQ(0, entry->skeleton.prefix2); |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 1145 | EmitOpcode(entry); |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1146 | DCHECK_EQ(0, entry->skeleton.modrm_opcode); |
| 1147 | DCHECK_EQ(0, entry->skeleton.ax_opcode); |
| 1148 | DCHECK_EQ(0, entry->skeleton.immediate_bytes); |
| 1149 | break; |
Vladimir Marko | a8b4caf | 2013-10-24 15:08:57 +0100 | [diff] [blame] | 1150 | case kRegOpcode: // lir operands - 0: reg |
| 1151 | EmitOpRegOpcode(entry, lir->operands[0]); |
| 1152 | break; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1153 | case kReg: // lir operands - 0: reg |
| 1154 | EmitOpReg(entry, lir->operands[0]); |
| 1155 | break; |
| 1156 | case kMem: // lir operands - 0: base, 1: disp |
| 1157 | EmitOpMem(entry, lir->operands[0], lir->operands[1]); |
| 1158 | break; |
Vladimir Marko | 057c74a | 2013-12-03 15:20:45 +0000 | [diff] [blame] | 1159 | case kArray: // lir operands - 0: base, 1: index, 2: scale, 3: disp |
| 1160 | EmitOpArray(entry, lir->operands[0], lir->operands[1], lir->operands[2], lir->operands[3]); |
| 1161 | break; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1162 | case kMemReg: // lir operands - 0: base, 1: disp, 2: reg |
| 1163 | EmitMemReg(entry, lir->operands[0], lir->operands[1], lir->operands[2]); |
| 1164 | break; |
Mark Mendell | 343adb5 | 2013-12-18 06:02:17 -0800 | [diff] [blame] | 1165 | case kMemImm: // lir operands - 0: base, 1: disp, 2: immediate |
| 1166 | EmitMemImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]); |
| 1167 | break; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1168 | case kArrayReg: // lir operands - 0: base, 1: index, 2: scale, 3: disp, 4: reg |
| 1169 | EmitArrayReg(entry, lir->operands[0], lir->operands[1], lir->operands[2], |
| 1170 | lir->operands[3], lir->operands[4]); |
| 1171 | break; |
| 1172 | case kRegMem: // lir operands - 0: reg, 1: base, 2: disp |
| 1173 | EmitRegMem(entry, lir->operands[0], lir->operands[1], lir->operands[2]); |
| 1174 | break; |
| 1175 | case kRegArray: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: disp |
| 1176 | EmitRegArray(entry, lir->operands[0], lir->operands[1], lir->operands[2], |
| 1177 | lir->operands[3], lir->operands[4]); |
| 1178 | break; |
| 1179 | case kRegThread: // lir operands - 0: reg, 1: disp |
| 1180 | EmitRegThread(entry, lir->operands[0], lir->operands[1]); |
| 1181 | break; |
| 1182 | case kRegReg: // lir operands - 0: reg1, 1: reg2 |
| 1183 | EmitRegReg(entry, lir->operands[0], lir->operands[1]); |
| 1184 | break; |
| 1185 | case kRegRegStore: // lir operands - 0: reg2, 1: reg1 |
| 1186 | EmitRegReg(entry, lir->operands[1], lir->operands[0]); |
| 1187 | break; |
| 1188 | case kRegRegImm: |
| 1189 | EmitRegRegImm(entry, lir->operands[0], lir->operands[1], lir->operands[2]); |
| 1190 | break; |
| 1191 | case kRegImm: // lir operands - 0: reg, 1: immediate |
| 1192 | EmitRegImm(entry, lir->operands[0], lir->operands[1]); |
| 1193 | break; |
| 1194 | case kThreadImm: // lir operands - 0: disp, 1: immediate |
| 1195 | EmitThreadImm(entry, lir->operands[0], lir->operands[1]); |
| 1196 | break; |
| 1197 | case kMovRegImm: // lir operands - 0: reg, 1: immediate |
| 1198 | EmitMovRegImm(entry, lir->operands[0], lir->operands[1]); |
| 1199 | break; |
| 1200 | case kShiftRegImm: // lir operands - 0: reg, 1: immediate |
| 1201 | EmitShiftRegImm(entry, lir->operands[0], lir->operands[1]); |
| 1202 | break; |
Brian Carlstrom | 7934ac2 | 2013-07-26 10:54:15 -0700 | [diff] [blame] | 1203 | case kShiftRegCl: // lir operands - 0: reg, 1: cl |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1204 | EmitShiftRegCl(entry, lir->operands[0], lir->operands[1]); |
| 1205 | break; |
| 1206 | case kRegCond: // lir operands - 0: reg, 1: condition |
| 1207 | EmitRegCond(entry, lir->operands[0], lir->operands[1]); |
| 1208 | break; |
Razvan A Lupusoru | bd288c2 | 2013-12-20 17:27:23 -0800 | [diff] [blame^] | 1209 | case kRegRegCond: // lir operands - 0: reg, 1: reg, 2: condition |
| 1210 | EmitRegRegCond(entry, lir->operands[0], lir->operands[1], lir->operands[2]); |
| 1211 | break; |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1212 | case kJmp: // lir operands - 0: rel |
| 1213 | EmitJmp(entry, lir->operands[0]); |
| 1214 | break; |
| 1215 | case kJcc: // lir operands - 0: rel, 1: CC, target assigned |
| 1216 | EmitJcc(entry, lir->operands[0], lir->operands[1]); |
| 1217 | break; |
| 1218 | case kCall: |
| 1219 | switch (entry->opcode) { |
| 1220 | case kX86CallM: // lir operands - 0: base, 1: disp |
| 1221 | EmitCallMem(entry, lir->operands[0], lir->operands[1]); |
| 1222 | break; |
| 1223 | case kX86CallT: // lir operands - 0: disp |
| 1224 | EmitCallThread(entry, lir->operands[0]); |
| 1225 | break; |
| 1226 | default: |
| 1227 | EmitUnimplemented(entry, lir); |
| 1228 | break; |
| 1229 | } |
| 1230 | break; |
| 1231 | case kPcRel: // lir operands - 0: reg, 1: base, 2: index, 3: scale, 4: table |
| 1232 | EmitPcRel(entry, lir->operands[0], lir->operands[1], lir->operands[2], |
| 1233 | lir->operands[3], lir->operands[4]); |
| 1234 | break; |
| 1235 | case kMacro: |
| 1236 | EmitMacro(entry, lir->operands[0], lir->offset); |
| 1237 | break; |
| 1238 | default: |
| 1239 | EmitUnimplemented(entry, lir); |
| 1240 | break; |
| 1241 | } |
| 1242 | CHECK_EQ(static_cast<size_t>(GetInsnSize(lir)), |
| 1243 | code_buffer_.size() - starting_cbuf_size) |
| 1244 | << "Instruction size mismatch for entry: " << X86Mir2Lir::EncodingMap[lir->opcode].name; |
| 1245 | } |
| 1246 | return res; |
| 1247 | } |
| 1248 | |
buzbee | b48819d | 2013-09-14 16:15:25 -0700 | [diff] [blame] | 1249 | // LIR offset assignment. |
| 1250 | // TODO: consolidate w/ Arm assembly mechanism. |
| 1251 | int X86Mir2Lir::AssignInsnOffsets() { |
| 1252 | LIR* lir; |
| 1253 | int offset = 0; |
| 1254 | |
| 1255 | for (lir = first_lir_insn_; lir != NULL; lir = NEXT_LIR(lir)) { |
| 1256 | lir->offset = offset; |
buzbee | 409fe94 | 2013-10-11 10:49:56 -0700 | [diff] [blame] | 1257 | if (LIKELY(!IsPseudoLirOp(lir->opcode))) { |
buzbee | b48819d | 2013-09-14 16:15:25 -0700 | [diff] [blame] | 1258 | if (!lir->flags.is_nop) { |
| 1259 | offset += lir->flags.size; |
| 1260 | } |
| 1261 | } else if (UNLIKELY(lir->opcode == kPseudoPseudoAlign4)) { |
| 1262 | if (offset & 0x2) { |
| 1263 | offset += 2; |
| 1264 | lir->operands[0] = 1; |
| 1265 | } else { |
| 1266 | lir->operands[0] = 0; |
| 1267 | } |
| 1268 | } |
| 1269 | /* Pseudo opcodes don't consume space */ |
| 1270 | } |
| 1271 | return offset; |
| 1272 | } |
| 1273 | |
| 1274 | /* |
| 1275 | * Walk the compilation unit and assign offsets to instructions |
| 1276 | * and literals and compute the total size of the compiled unit. |
| 1277 | * TODO: consolidate w/ Arm assembly mechanism. |
| 1278 | */ |
| 1279 | void X86Mir2Lir::AssignOffsets() { |
| 1280 | int offset = AssignInsnOffsets(); |
| 1281 | |
| 1282 | /* Const values have to be word aligned */ |
| 1283 | offset = (offset + 3) & ~3; |
| 1284 | |
| 1285 | /* Set up offsets for literals */ |
| 1286 | data_offset_ = offset; |
| 1287 | |
| 1288 | offset = AssignLiteralOffset(offset); |
| 1289 | |
| 1290 | offset = AssignSwitchTablesOffset(offset); |
| 1291 | |
| 1292 | offset = AssignFillArrayDataOffset(offset); |
| 1293 | |
| 1294 | total_size_ = offset; |
| 1295 | } |
| 1296 | |
| 1297 | /* |
| 1298 | * Go over each instruction in the list and calculate the offset from the top |
| 1299 | * before sending them off to the assembler. If out-of-range branch distance is |
| 1300 | * seen rearrange the instructions a bit to correct it. |
| 1301 | * TODO: consolidate w/ Arm assembly mechanism. |
| 1302 | */ |
| 1303 | void X86Mir2Lir::AssembleLIR() { |
buzbee | a61f495 | 2013-08-23 14:27:06 -0700 | [diff] [blame] | 1304 | cu_->NewTimingSplit("Assemble"); |
buzbee | b48819d | 2013-09-14 16:15:25 -0700 | [diff] [blame] | 1305 | AssignOffsets(); |
| 1306 | int assembler_retries = 0; |
| 1307 | /* |
| 1308 | * Assemble here. Note that we generate code with optimistic assumptions |
| 1309 | * and if found now to work, we'll have to redo the sequence and retry. |
| 1310 | */ |
| 1311 | |
| 1312 | while (true) { |
| 1313 | AssemblerStatus res = AssembleInstructions(0); |
| 1314 | if (res == kSuccess) { |
| 1315 | break; |
| 1316 | } else { |
| 1317 | assembler_retries++; |
| 1318 | if (assembler_retries > MAX_ASSEMBLER_RETRIES) { |
| 1319 | CodegenDump(); |
| 1320 | LOG(FATAL) << "Assembler error - too many retries"; |
| 1321 | } |
| 1322 | // Redo offsets and try again |
| 1323 | AssignOffsets(); |
| 1324 | code_buffer_.clear(); |
| 1325 | } |
| 1326 | } |
| 1327 | |
| 1328 | // Install literals |
| 1329 | InstallLiteralPools(); |
| 1330 | |
| 1331 | // Install switch tables |
| 1332 | InstallSwitchTables(); |
| 1333 | |
| 1334 | // Install fill array data |
| 1335 | InstallFillArrayData(); |
| 1336 | |
| 1337 | // Create the mapping table and native offset to reference map. |
buzbee | a61f495 | 2013-08-23 14:27:06 -0700 | [diff] [blame] | 1338 | cu_->NewTimingSplit("PcMappingTable"); |
buzbee | b48819d | 2013-09-14 16:15:25 -0700 | [diff] [blame] | 1339 | CreateMappingTables(); |
| 1340 | |
buzbee | a61f495 | 2013-08-23 14:27:06 -0700 | [diff] [blame] | 1341 | cu_->NewTimingSplit("GcMap"); |
buzbee | b48819d | 2013-09-14 16:15:25 -0700 | [diff] [blame] | 1342 | CreateNativeGcMap(); |
| 1343 | } |
| 1344 | |
Brian Carlstrom | 7940e44 | 2013-07-12 13:46:57 -0700 | [diff] [blame] | 1345 | } // namespace art |