Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 1 | //===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===// |
| 2 | // |
| 3 | // The LLVM Compiler Infrastructure |
| 4 | // |
| 5 | // This file is distributed under the University of Illinois Open Source |
| 6 | // License. See LICENSE.TXT for details. |
| 7 | // |
| 8 | //===----------------------------------------------------------------------===// |
| 9 | |
Evan Cheng | b253100 | 2011-07-25 19:33:48 +0000 | [diff] [blame] | 10 | #include "MCTargetDesc/X86BaseInfo.h" |
Evan Cheng | 7e763d8 | 2011-07-25 18:43:53 +0000 | [diff] [blame] | 11 | #include "MCTargetDesc/X86FixupKinds.h" |
Craig Topper | b25fda9 | 2012-03-17 18:46:09 +0000 | [diff] [blame] | 12 | #include "llvm/MC/MCAsmBackend.h" |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 13 | #include "llvm/MC/MCAssembler.h" |
Rafael Espindola | f0e24d4 | 2010-12-17 16:59:53 +0000 | [diff] [blame] | 14 | #include "llvm/MC/MCELFObjectWriter.h" |
Daniel Dunbar | 358b29c | 2010-05-06 20:34:01 +0000 | [diff] [blame] | 15 | #include "llvm/MC/MCExpr.h" |
Daniel Dunbar | 0c9d9fd | 2010-12-16 03:20:06 +0000 | [diff] [blame] | 16 | #include "llvm/MC/MCFixupKindInfo.h" |
Daniel Dunbar | 73b8713 | 2010-12-16 16:08:33 +0000 | [diff] [blame] | 17 | #include "llvm/MC/MCMachObjectWriter.h" |
Daniel Dunbar | 86face8 | 2010-03-23 03:13:05 +0000 | [diff] [blame] | 18 | #include "llvm/MC/MCObjectWriter.h" |
Michael J. Spencer | f8270bd | 2010-07-27 06:46:15 +0000 | [diff] [blame] | 19 | #include "llvm/MC/MCSectionCOFF.h" |
Daniel Dunbar | c5084cc | 2010-03-19 09:29:03 +0000 | [diff] [blame] | 20 | #include "llvm/MC/MCSectionELF.h" |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 21 | #include "llvm/MC/MCSectionMachO.h" |
Daniel Dunbar | a86188b | 2011-04-28 21:23:31 +0000 | [diff] [blame] | 22 | #include "llvm/Support/CommandLine.h" |
Wesley Peck | 1851090 | 2010-10-22 15:52:49 +0000 | [diff] [blame] | 23 | #include "llvm/Support/ELF.h" |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 24 | #include "llvm/Support/ErrorHandling.h" |
Charles Davis | 8bdfafd | 2013-09-01 04:28:48 +0000 | [diff] [blame] | 25 | #include "llvm/Support/MachO.h" |
Evan Cheng | 2bb4035 | 2011-08-24 18:08:43 +0000 | [diff] [blame] | 26 | #include "llvm/Support/TargetRegistry.h" |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 27 | #include "llvm/Support/raw_ostream.h" |
Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 28 | using namespace llvm; |
| 29 | |
Daniel Dunbar | a86188b | 2011-04-28 21:23:31 +0000 | [diff] [blame] | 30 | // Option to allow disabling arithmetic relaxation to workaround PR9807, which |
| 31 | // is useful when running bitwise comparison experiments on Darwin. We should be |
| 32 | // able to remove this once PR9807 is resolved. |
| 33 | static cl::opt<bool> |
| 34 | MCDisableArithRelaxation("mc-x86-disable-arith-relaxation", |
| 35 | cl::desc("Disable relaxation of arithmetic instruction for X86")); |
| 36 | |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 37 | static unsigned getFixupKindLog2Size(unsigned Kind) { |
| 38 | switch (Kind) { |
Craig Topper | 4ed7278 | 2012-02-05 05:38:58 +0000 | [diff] [blame] | 39 | default: llvm_unreachable("invalid fixup kind!"); |
Rafael Espindola | 8a3a792 | 2010-11-28 14:17:56 +0000 | [diff] [blame] | 40 | case FK_PCRel_1: |
Rafael Espindola | a56ab0ed | 2011-12-24 14:47:52 +0000 | [diff] [blame] | 41 | case FK_SecRel_1: |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 42 | case FK_Data_1: return 0; |
Rafael Espindola | 8a3a792 | 2010-11-28 14:17:56 +0000 | [diff] [blame] | 43 | case FK_PCRel_2: |
Rafael Espindola | a56ab0ed | 2011-12-24 14:47:52 +0000 | [diff] [blame] | 44 | case FK_SecRel_2: |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 45 | case FK_Data_2: return 1; |
Rafael Espindola | 8a3a792 | 2010-11-28 14:17:56 +0000 | [diff] [blame] | 46 | case FK_PCRel_4: |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 47 | case X86::reloc_riprel_4byte: |
| 48 | case X86::reloc_riprel_4byte_movq_load: |
Rafael Espindola | 70d6e0e | 2010-09-30 03:11:42 +0000 | [diff] [blame] | 49 | case X86::reloc_signed_4byte: |
Rafael Espindola | 800fd35 | 2010-10-24 17:35:42 +0000 | [diff] [blame] | 50 | case X86::reloc_global_offset_table: |
Rafael Espindola | a56ab0ed | 2011-12-24 14:47:52 +0000 | [diff] [blame] | 51 | case FK_SecRel_4: |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 52 | case FK_Data_4: return 2; |
Rafael Espindola | 2ac8355 | 2010-12-27 00:36:05 +0000 | [diff] [blame] | 53 | case FK_PCRel_8: |
Rafael Espindola | a56ab0ed | 2011-12-24 14:47:52 +0000 | [diff] [blame] | 54 | case FK_SecRel_8: |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 55 | case FK_Data_8: return 3; |
| 56 | } |
| 57 | } |
| 58 | |
Chris Lattner | ac58812 | 2010-07-07 22:27:31 +0000 | [diff] [blame] | 59 | namespace { |
Daniel Dunbar | 8888a96 | 2010-12-16 16:09:19 +0000 | [diff] [blame] | 60 | |
Rafael Espindola | 6b5e56c | 2010-12-17 17:45:22 +0000 | [diff] [blame] | 61 | class X86ELFObjectWriter : public MCELFObjectTargetWriter { |
| 62 | public: |
Rafael Espindola | 1ad4095 | 2011-12-21 17:00:36 +0000 | [diff] [blame] | 63 | X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine, |
| 64 | bool HasRelocationAddend, bool foobar) |
| 65 | : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {} |
Rafael Espindola | 6b5e56c | 2010-12-17 17:45:22 +0000 | [diff] [blame] | 66 | }; |
| 67 | |
Evan Cheng | 5928e69 | 2011-07-25 23:24:55 +0000 | [diff] [blame] | 68 | class X86AsmBackend : public MCAsmBackend { |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 69 | StringRef CPU; |
Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 70 | public: |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 71 | X86AsmBackend(const Target &T, StringRef _CPU) |
| 72 | : MCAsmBackend(), CPU(_CPU) {} |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 73 | |
Daniel Dunbar | 0c9d9fd | 2010-12-16 03:20:06 +0000 | [diff] [blame] | 74 | unsigned getNumFixupKinds() const { |
| 75 | return X86::NumTargetFixupKinds; |
| 76 | } |
| 77 | |
| 78 | const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const { |
| 79 | const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = { |
| 80 | { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel }, |
| 81 | { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel}, |
| 82 | { "reloc_signed_4byte", 0, 4 * 8, 0}, |
Rafael Espindola | a56ab0ed | 2011-12-24 14:47:52 +0000 | [diff] [blame] | 83 | { "reloc_global_offset_table", 0, 4 * 8, 0} |
Daniel Dunbar | 0c9d9fd | 2010-12-16 03:20:06 +0000 | [diff] [blame] | 84 | }; |
| 85 | |
| 86 | if (Kind < FirstTargetFixupKind) |
Evan Cheng | 5928e69 | 2011-07-25 23:24:55 +0000 | [diff] [blame] | 87 | return MCAsmBackend::getFixupKindInfo(Kind); |
Daniel Dunbar | 0c9d9fd | 2010-12-16 03:20:06 +0000 | [diff] [blame] | 88 | |
| 89 | assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() && |
| 90 | "Invalid kind!"); |
| 91 | return Infos[Kind - FirstTargetFixupKind]; |
| 92 | } |
| 93 | |
Jim Grosbach | aba3de9 | 2012-01-18 18:52:16 +0000 | [diff] [blame] | 94 | void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize, |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 95 | uint64_t Value) const { |
Daniel Dunbar | 353a91ff | 2010-05-26 15:18:31 +0000 | [diff] [blame] | 96 | unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind()); |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 97 | |
Rafael Espindola | 0f30fec | 2010-12-06 19:08:48 +0000 | [diff] [blame] | 98 | assert(Fixup.getOffset() + Size <= DataSize && |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 99 | "Invalid fixup offset!"); |
Jason W Kim | e4df09f | 2011-08-04 00:38:45 +0000 | [diff] [blame] | 100 | |
Jason W Kim | 239370c | 2011-08-05 00:53:03 +0000 | [diff] [blame] | 101 | // Check that uppper bits are either all zeros or all ones. |
| 102 | // Specifically ignore overflow/underflow as long as the leakage is |
| 103 | // limited to the lower bits. This is to remain compatible with |
| 104 | // other assemblers. |
Eli Friedman | a5abd03 | 2011-10-13 23:27:48 +0000 | [diff] [blame] | 105 | assert(isIntN(Size * 8 + 1, Value) && |
Jason W Kim | 239370c | 2011-08-05 00:53:03 +0000 | [diff] [blame] | 106 | "Value does not fit in the Fixup field"); |
Jason W Kim | e4df09f | 2011-08-04 00:38:45 +0000 | [diff] [blame] | 107 | |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 108 | for (unsigned i = 0; i != Size; ++i) |
Rafael Espindola | 0f30fec | 2010-12-06 19:08:48 +0000 | [diff] [blame] | 109 | Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8)); |
Daniel Dunbar | f0517ef | 2010-03-19 09:28:12 +0000 | [diff] [blame] | 110 | } |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 111 | |
Jim Grosbach | aba3de9 | 2012-01-18 18:52:16 +0000 | [diff] [blame] | 112 | bool mayNeedRelaxation(const MCInst &Inst) const; |
Daniel Dunbar | 86face8 | 2010-03-23 03:13:05 +0000 | [diff] [blame] | 113 | |
Jim Grosbach | 25b63fa | 2011-12-06 00:47:03 +0000 | [diff] [blame] | 114 | bool fixupNeedsRelaxation(const MCFixup &Fixup, |
| 115 | uint64_t Value, |
Eli Bendersky | 4d9ada0 | 2013-01-08 00:22:56 +0000 | [diff] [blame] | 116 | const MCRelaxableFragment *DF, |
Jim Grosbach | 25b63fa | 2011-12-06 00:47:03 +0000 | [diff] [blame] | 117 | const MCAsmLayout &Layout) const; |
| 118 | |
Jim Grosbach | aba3de9 | 2012-01-18 18:52:16 +0000 | [diff] [blame] | 119 | void relaxInstruction(const MCInst &Inst, MCInst &Res) const; |
Daniel Dunbar | a9ae3ae | 2010-03-23 02:36:58 +0000 | [diff] [blame] | 120 | |
Jim Grosbach | aba3de9 | 2012-01-18 18:52:16 +0000 | [diff] [blame] | 121 | bool writeNopData(uint64_t Count, MCObjectWriter *OW) const; |
Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 122 | }; |
Michael J. Spencer | bee1f7f | 2010-10-10 22:04:20 +0000 | [diff] [blame] | 123 | } // end anonymous namespace |
Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 124 | |
Rafael Espindola | e8ae9881 | 2010-10-26 14:09:12 +0000 | [diff] [blame] | 125 | static unsigned getRelaxedOpcodeBranch(unsigned Op) { |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 126 | switch (Op) { |
| 127 | default: |
| 128 | return Op; |
| 129 | |
| 130 | case X86::JAE_1: return X86::JAE_4; |
| 131 | case X86::JA_1: return X86::JA_4; |
| 132 | case X86::JBE_1: return X86::JBE_4; |
| 133 | case X86::JB_1: return X86::JB_4; |
| 134 | case X86::JE_1: return X86::JE_4; |
| 135 | case X86::JGE_1: return X86::JGE_4; |
| 136 | case X86::JG_1: return X86::JG_4; |
| 137 | case X86::JLE_1: return X86::JLE_4; |
| 138 | case X86::JL_1: return X86::JL_4; |
| 139 | case X86::JMP_1: return X86::JMP_4; |
| 140 | case X86::JNE_1: return X86::JNE_4; |
| 141 | case X86::JNO_1: return X86::JNO_4; |
| 142 | case X86::JNP_1: return X86::JNP_4; |
| 143 | case X86::JNS_1: return X86::JNS_4; |
| 144 | case X86::JO_1: return X86::JO_4; |
| 145 | case X86::JP_1: return X86::JP_4; |
| 146 | case X86::JS_1: return X86::JS_4; |
| 147 | } |
| 148 | } |
| 149 | |
Rafael Espindola | e8ae9881 | 2010-10-26 14:09:12 +0000 | [diff] [blame] | 150 | static unsigned getRelaxedOpcodeArith(unsigned Op) { |
| 151 | switch (Op) { |
| 152 | default: |
| 153 | return Op; |
| 154 | |
| 155 | // IMUL |
| 156 | case X86::IMUL16rri8: return X86::IMUL16rri; |
| 157 | case X86::IMUL16rmi8: return X86::IMUL16rmi; |
| 158 | case X86::IMUL32rri8: return X86::IMUL32rri; |
| 159 | case X86::IMUL32rmi8: return X86::IMUL32rmi; |
| 160 | case X86::IMUL64rri8: return X86::IMUL64rri32; |
| 161 | case X86::IMUL64rmi8: return X86::IMUL64rmi32; |
| 162 | |
| 163 | // AND |
| 164 | case X86::AND16ri8: return X86::AND16ri; |
| 165 | case X86::AND16mi8: return X86::AND16mi; |
| 166 | case X86::AND32ri8: return X86::AND32ri; |
| 167 | case X86::AND32mi8: return X86::AND32mi; |
| 168 | case X86::AND64ri8: return X86::AND64ri32; |
| 169 | case X86::AND64mi8: return X86::AND64mi32; |
| 170 | |
| 171 | // OR |
| 172 | case X86::OR16ri8: return X86::OR16ri; |
| 173 | case X86::OR16mi8: return X86::OR16mi; |
| 174 | case X86::OR32ri8: return X86::OR32ri; |
| 175 | case X86::OR32mi8: return X86::OR32mi; |
| 176 | case X86::OR64ri8: return X86::OR64ri32; |
| 177 | case X86::OR64mi8: return X86::OR64mi32; |
| 178 | |
| 179 | // XOR |
| 180 | case X86::XOR16ri8: return X86::XOR16ri; |
| 181 | case X86::XOR16mi8: return X86::XOR16mi; |
| 182 | case X86::XOR32ri8: return X86::XOR32ri; |
| 183 | case X86::XOR32mi8: return X86::XOR32mi; |
| 184 | case X86::XOR64ri8: return X86::XOR64ri32; |
| 185 | case X86::XOR64mi8: return X86::XOR64mi32; |
| 186 | |
| 187 | // ADD |
| 188 | case X86::ADD16ri8: return X86::ADD16ri; |
| 189 | case X86::ADD16mi8: return X86::ADD16mi; |
| 190 | case X86::ADD32ri8: return X86::ADD32ri; |
| 191 | case X86::ADD32mi8: return X86::ADD32mi; |
| 192 | case X86::ADD64ri8: return X86::ADD64ri32; |
| 193 | case X86::ADD64mi8: return X86::ADD64mi32; |
| 194 | |
| 195 | // SUB |
| 196 | case X86::SUB16ri8: return X86::SUB16ri; |
| 197 | case X86::SUB16mi8: return X86::SUB16mi; |
| 198 | case X86::SUB32ri8: return X86::SUB32ri; |
| 199 | case X86::SUB32mi8: return X86::SUB32mi; |
| 200 | case X86::SUB64ri8: return X86::SUB64ri32; |
| 201 | case X86::SUB64mi8: return X86::SUB64mi32; |
| 202 | |
| 203 | // CMP |
| 204 | case X86::CMP16ri8: return X86::CMP16ri; |
| 205 | case X86::CMP16mi8: return X86::CMP16mi; |
| 206 | case X86::CMP32ri8: return X86::CMP32ri; |
| 207 | case X86::CMP32mi8: return X86::CMP32mi; |
| 208 | case X86::CMP64ri8: return X86::CMP64ri32; |
| 209 | case X86::CMP64mi8: return X86::CMP64mi32; |
Rafael Espindola | 625ccf8 | 2010-12-18 01:01:34 +0000 | [diff] [blame] | 210 | |
| 211 | // PUSH |
| 212 | case X86::PUSHi8: return X86::PUSHi32; |
Eli Friedman | 3846acc | 2011-07-15 21:28:39 +0000 | [diff] [blame] | 213 | case X86::PUSHi16: return X86::PUSHi32; |
| 214 | case X86::PUSH64i8: return X86::PUSH64i32; |
| 215 | case X86::PUSH64i16: return X86::PUSH64i32; |
Rafael Espindola | e8ae9881 | 2010-10-26 14:09:12 +0000 | [diff] [blame] | 216 | } |
| 217 | } |
| 218 | |
| 219 | static unsigned getRelaxedOpcode(unsigned Op) { |
| 220 | unsigned R = getRelaxedOpcodeArith(Op); |
| 221 | if (R != Op) |
| 222 | return R; |
| 223 | return getRelaxedOpcodeBranch(Op); |
| 224 | } |
| 225 | |
Jim Grosbach | aba3de9 | 2012-01-18 18:52:16 +0000 | [diff] [blame] | 226 | bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const { |
Rafael Espindola | e8ae9881 | 2010-10-26 14:09:12 +0000 | [diff] [blame] | 227 | // Branches can always be relaxed. |
| 228 | if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode()) |
| 229 | return true; |
| 230 | |
Daniel Dunbar | a86188b | 2011-04-28 21:23:31 +0000 | [diff] [blame] | 231 | if (MCDisableArithRelaxation) |
| 232 | return false; |
| 233 | |
Daniel Dunbar | a19838e | 2010-05-26 17:45:29 +0000 | [diff] [blame] | 234 | // Check if this instruction is ever relaxable. |
Rafael Espindola | e8ae9881 | 2010-10-26 14:09:12 +0000 | [diff] [blame] | 235 | if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode()) |
Daniel Dunbar | a19838e | 2010-05-26 17:45:29 +0000 | [diff] [blame] | 236 | return false; |
Daniel Dunbar | 353a91ff | 2010-05-26 15:18:31 +0000 | [diff] [blame] | 237 | |
Rafael Espindola | e8ae9881 | 2010-10-26 14:09:12 +0000 | [diff] [blame] | 238 | |
| 239 | // Check if it has an expression and is not RIP relative. |
| 240 | bool hasExp = false; |
| 241 | bool hasRIP = false; |
| 242 | for (unsigned i = 0; i < Inst.getNumOperands(); ++i) { |
| 243 | const MCOperand &Op = Inst.getOperand(i); |
| 244 | if (Op.isExpr()) |
| 245 | hasExp = true; |
| 246 | |
| 247 | if (Op.isReg() && Op.getReg() == X86::RIP) |
| 248 | hasRIP = true; |
| 249 | } |
| 250 | |
| 251 | // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on |
| 252 | // how we do relaxations? |
| 253 | return hasExp && !hasRIP; |
Daniel Dunbar | 86face8 | 2010-03-23 03:13:05 +0000 | [diff] [blame] | 254 | } |
| 255 | |
Jim Grosbach | 25b63fa | 2011-12-06 00:47:03 +0000 | [diff] [blame] | 256 | bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup, |
| 257 | uint64_t Value, |
Eli Bendersky | 4d9ada0 | 2013-01-08 00:22:56 +0000 | [diff] [blame] | 258 | const MCRelaxableFragment *DF, |
Jim Grosbach | 25b63fa | 2011-12-06 00:47:03 +0000 | [diff] [blame] | 259 | const MCAsmLayout &Layout) const { |
| 260 | // Relax if the value is too big for a (signed) i8. |
| 261 | return int64_t(Value) != int64_t(int8_t(Value)); |
| 262 | } |
| 263 | |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 264 | // FIXME: Can tblgen help at all here to verify there aren't other instructions |
| 265 | // we can relax? |
Jim Grosbach | aba3de9 | 2012-01-18 18:52:16 +0000 | [diff] [blame] | 266 | void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const { |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 267 | // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel. |
Daniel Dunbar | 7c8bd0f | 2010-05-26 18:15:06 +0000 | [diff] [blame] | 268 | unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode()); |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 269 | |
Daniel Dunbar | 7c8bd0f | 2010-05-26 18:15:06 +0000 | [diff] [blame] | 270 | if (RelaxedOp == Inst.getOpcode()) { |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 271 | SmallString<256> Tmp; |
| 272 | raw_svector_ostream OS(Tmp); |
Daniel Dunbar | 7c8bd0f | 2010-05-26 18:15:06 +0000 | [diff] [blame] | 273 | Inst.dump_pretty(OS); |
Daniel Dunbar | 3627af5 | 2010-05-26 15:18:13 +0000 | [diff] [blame] | 274 | OS << "\n"; |
Chris Lattner | 2104b8d | 2010-04-07 22:58:41 +0000 | [diff] [blame] | 275 | report_fatal_error("unexpected instruction to relax: " + OS.str()); |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 276 | } |
| 277 | |
Daniel Dunbar | 7c8bd0f | 2010-05-26 18:15:06 +0000 | [diff] [blame] | 278 | Res = Inst; |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 279 | Res.setOpcode(RelaxedOp); |
| 280 | } |
| 281 | |
Eli Bendersky | b2022f3 | 2012-12-13 00:24:56 +0000 | [diff] [blame] | 282 | /// \brief Write a sequence of optimal nops to the output, covering \p Count |
| 283 | /// bytes. |
| 284 | /// \return - true on success, false on failure |
Jim Grosbach | aba3de9 | 2012-01-18 18:52:16 +0000 | [diff] [blame] | 285 | bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const { |
Rafael Espindola | 7c2acd0 | 2010-11-25 17:14:16 +0000 | [diff] [blame] | 286 | static const uint8_t Nops[10][10] = { |
Daniel Dunbar | a9ae3ae | 2010-03-23 02:36:58 +0000 | [diff] [blame] | 287 | // nop |
| 288 | {0x90}, |
| 289 | // xchg %ax,%ax |
| 290 | {0x66, 0x90}, |
| 291 | // nopl (%[re]ax) |
| 292 | {0x0f, 0x1f, 0x00}, |
| 293 | // nopl 0(%[re]ax) |
| 294 | {0x0f, 0x1f, 0x40, 0x00}, |
| 295 | // nopl 0(%[re]ax,%[re]ax,1) |
| 296 | {0x0f, 0x1f, 0x44, 0x00, 0x00}, |
| 297 | // nopw 0(%[re]ax,%[re]ax,1) |
| 298 | {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, |
| 299 | // nopl 0L(%[re]ax) |
| 300 | {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00}, |
| 301 | // nopl 0L(%[re]ax,%[re]ax,1) |
| 302 | {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, |
| 303 | // nopw 0L(%[re]ax,%[re]ax,1) |
| 304 | {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, |
| 305 | // nopw %cs:0L(%[re]ax,%[re]ax,1) |
| 306 | {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, |
Daniel Dunbar | a9ae3ae | 2010-03-23 02:36:58 +0000 | [diff] [blame] | 307 | }; |
| 308 | |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 309 | // This CPU doesnt support long nops. If needed add more. |
Benjamin Kramer | 3548028 | 2012-10-13 17:28:35 +0000 | [diff] [blame] | 310 | // FIXME: Can we get this from the subtarget somehow? |
| 311 | if (CPU == "generic" || CPU == "i386" || CPU == "i486" || CPU == "i586" || |
| 312 | CPU == "pentium" || CPU == "pentium-mmx" || CPU == "geode") { |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 313 | for (uint64_t i = 0; i < Count; ++i) |
| 314 | OW->Write8(0x90); |
| 315 | return true; |
| 316 | } |
| 317 | |
David Sehr | 4c8979c | 2013-03-05 00:02:23 +0000 | [diff] [blame] | 318 | // 15 is the longest single nop instruction. Emit as many 15-byte nops as |
| 319 | // needed, then emit a nop of the remaining length. |
| 320 | do { |
| 321 | const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15); |
| 322 | const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10; |
| 323 | for (uint8_t i = 0; i < Prefixes; i++) |
| 324 | OW->Write8(0x66); |
| 325 | const uint8_t Rest = ThisNopLength - Prefixes; |
| 326 | for (uint8_t i = 0; i < Rest; i++) |
| 327 | OW->Write8(Nops[Rest - 1][i]); |
| 328 | Count -= ThisNopLength; |
| 329 | } while (Count != 0); |
Daniel Dunbar | a9ae3ae | 2010-03-23 02:36:58 +0000 | [diff] [blame] | 330 | |
| 331 | return true; |
| 332 | } |
| 333 | |
Daniel Dunbar | e0c4357 | 2010-03-23 01:39:09 +0000 | [diff] [blame] | 334 | /* *** */ |
| 335 | |
Chris Lattner | ac58812 | 2010-07-07 22:27:31 +0000 | [diff] [blame] | 336 | namespace { |
Bill Wendling | 184d5d3 | 2013-09-11 20:38:09 +0000 | [diff] [blame^] | 337 | |
Daniel Dunbar | c5084cc | 2010-03-19 09:29:03 +0000 | [diff] [blame] | 338 | class ELFX86AsmBackend : public X86AsmBackend { |
| 339 | public: |
Rafael Espindola | 1ad4095 | 2011-12-21 17:00:36 +0000 | [diff] [blame] | 340 | uint8_t OSABI; |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 341 | ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU) |
| 342 | : X86AsmBackend(T, CPU), OSABI(_OSABI) { |
Rafael Espindola | 75d65b9 | 2010-09-25 05:42:19 +0000 | [diff] [blame] | 343 | HasReliableSymbolDifference = true; |
| 344 | } |
| 345 | |
| 346 | virtual bool doesSectionRequireSymbols(const MCSection &Section) const { |
| 347 | const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section); |
Rafael Espindola | 0e7e34e | 2011-01-23 04:43:11 +0000 | [diff] [blame] | 348 | return ES.getFlags() & ELF::SHF_MERGE; |
Daniel Dunbar | c5084cc | 2010-03-19 09:29:03 +0000 | [diff] [blame] | 349 | } |
Daniel Dunbar | c5084cc | 2010-03-19 09:29:03 +0000 | [diff] [blame] | 350 | }; |
| 351 | |
Matt Fleming | 5abb6dd | 2010-05-21 11:39:07 +0000 | [diff] [blame] | 352 | class ELFX86_32AsmBackend : public ELFX86AsmBackend { |
| 353 | public: |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 354 | ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) |
| 355 | : ELFX86AsmBackend(T, OSABI, CPU) {} |
Matt Fleming | f751d85 | 2010-08-16 18:36:14 +0000 | [diff] [blame] | 356 | |
| 357 | MCObjectWriter *createObjectWriter(raw_ostream &OS) const { |
Michael Liao | 83a77c3 | 2012-10-30 17:33:39 +0000 | [diff] [blame] | 358 | return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386); |
Jan Sjödin | 6348dc0 | 2011-03-09 18:44:41 +0000 | [diff] [blame] | 359 | } |
Matt Fleming | 5abb6dd | 2010-05-21 11:39:07 +0000 | [diff] [blame] | 360 | }; |
| 361 | |
| 362 | class ELFX86_64AsmBackend : public ELFX86AsmBackend { |
| 363 | public: |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 364 | ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU) |
| 365 | : ELFX86AsmBackend(T, OSABI, CPU) {} |
Matt Fleming | f751d85 | 2010-08-16 18:36:14 +0000 | [diff] [blame] | 366 | |
| 367 | MCObjectWriter *createObjectWriter(raw_ostream &OS) const { |
Michael Liao | 83a77c3 | 2012-10-30 17:33:39 +0000 | [diff] [blame] | 368 | return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64); |
Jan Sjödin | 6348dc0 | 2011-03-09 18:44:41 +0000 | [diff] [blame] | 369 | } |
Matt Fleming | 5abb6dd | 2010-05-21 11:39:07 +0000 | [diff] [blame] | 370 | }; |
| 371 | |
Michael J. Spencer | f8270bd | 2010-07-27 06:46:15 +0000 | [diff] [blame] | 372 | class WindowsX86AsmBackend : public X86AsmBackend { |
Michael J. Spencer | 377aa20 | 2010-08-21 05:58:13 +0000 | [diff] [blame] | 373 | bool Is64Bit; |
Rafael Espindola | 4262a22 | 2010-10-16 18:23:53 +0000 | [diff] [blame] | 374 | |
Michael J. Spencer | f8270bd | 2010-07-27 06:46:15 +0000 | [diff] [blame] | 375 | public: |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 376 | WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU) |
| 377 | : X86AsmBackend(T, CPU) |
Michael J. Spencer | 377aa20 | 2010-08-21 05:58:13 +0000 | [diff] [blame] | 378 | , Is64Bit(is64Bit) { |
Michael J. Spencer | f8270bd | 2010-07-27 06:46:15 +0000 | [diff] [blame] | 379 | } |
| 380 | |
| 381 | MCObjectWriter *createObjectWriter(raw_ostream &OS) const { |
Rafael Espindola | 908d2ed | 2011-12-24 02:14:02 +0000 | [diff] [blame] | 382 | return createX86WinCOFFObjectWriter(OS, Is64Bit); |
Michael J. Spencer | f8270bd | 2010-07-27 06:46:15 +0000 | [diff] [blame] | 383 | } |
Michael J. Spencer | f8270bd | 2010-07-27 06:46:15 +0000 | [diff] [blame] | 384 | }; |
| 385 | |
Bill Wendling | 184d5d3 | 2013-09-11 20:38:09 +0000 | [diff] [blame^] | 386 | namespace CU { |
| 387 | |
| 388 | /// Compact unwind encoding values. |
| 389 | enum CompactUnwindEncodings { |
| 390 | /// [RE]BP based frame where [RE]BP is pused on the stack immediately after |
| 391 | /// the return address, then [RE]SP is moved to [RE]BP. |
| 392 | UNWIND_MODE_BP_FRAME = 0x01000000, |
| 393 | |
| 394 | /// A frameless function with a small constant stack size. |
| 395 | UNWIND_MODE_STACK_IMMD = 0x02000000, |
| 396 | |
| 397 | /// A frameless function with a large constant stack size. |
| 398 | UNWIND_MODE_STACK_IND = 0x03000000, |
| 399 | |
| 400 | /// No compact unwind encoding is available. |
| 401 | UNWIND_MODE_DWARF = 0x04000000, |
| 402 | |
| 403 | /// Mask for encoding the frame registers. |
| 404 | UNWIND_BP_FRAME_REGISTERS = 0x00007FFF, |
| 405 | |
| 406 | /// Mask for encoding the frameless registers. |
| 407 | UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF |
| 408 | }; |
| 409 | |
| 410 | } // end CU namespace |
| 411 | |
Daniel Dunbar | 77c4141 | 2010-03-11 01:34:21 +0000 | [diff] [blame] | 412 | class DarwinX86AsmBackend : public X86AsmBackend { |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 413 | const MCRegisterInfo &MRI; |
| 414 | |
| 415 | /// \brief Number of registers that can be saved in a compact unwind encoding. |
| 416 | enum { CU_NUM_SAVED_REGS = 6 }; |
| 417 | |
| 418 | mutable unsigned SavedRegs[CU_NUM_SAVED_REGS]; |
| 419 | bool Is64Bit; |
| 420 | |
| 421 | unsigned OffsetSize; ///< Offset of a "push" instruction. |
| 422 | unsigned PushInstrSize; ///< Size of a "push" instruction. |
| 423 | unsigned MoveInstrSize; ///< Size of a "move" instruction. |
| 424 | unsigned StackDivide; ///< Amount to adjust stack stize by. |
| 425 | protected: |
| 426 | /// \brief Implementation of algorithm to generate the compact unwind encoding |
| 427 | /// for the CFI instructions. |
| 428 | uint32_t |
| 429 | generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const { |
| 430 | if (Instrs.empty()) return 0; |
| 431 | |
| 432 | // Reset the saved registers. |
| 433 | unsigned SavedRegIdx = 0; |
| 434 | memset(SavedRegs, 0, sizeof(SavedRegs)); |
| 435 | |
| 436 | bool HasFP = false; |
| 437 | |
| 438 | // Encode that we are using EBP/RBP as the frame pointer. |
| 439 | uint32_t CompactUnwindEncoding = 0; |
| 440 | |
| 441 | unsigned SubtractInstrIdx = Is64Bit ? 3 : 2; |
| 442 | unsigned InstrOffset = 0; |
| 443 | unsigned StackAdjust = 0; |
| 444 | unsigned StackSize = 0; |
| 445 | unsigned PrevStackSize = 0; |
| 446 | unsigned NumDefCFAOffsets = 0; |
| 447 | |
| 448 | for (unsigned i = 0, e = Instrs.size(); i != e; ++i) { |
| 449 | const MCCFIInstruction &Inst = Instrs[i]; |
| 450 | |
| 451 | switch (Inst.getOperation()) { |
| 452 | default: |
| 453 | llvm_unreachable("cannot handle CFI directive for compact unwind!"); |
| 454 | case MCCFIInstruction::OpDefCfaRegister: { |
| 455 | // Defines a frame pointer. E.g. |
| 456 | // |
| 457 | // movq %rsp, %rbp |
| 458 | // L0: |
| 459 | // .cfi_def_cfa_register %rbp |
| 460 | // |
| 461 | HasFP = true; |
| 462 | assert(MRI.getLLVMRegNum(Inst.getRegister(), true) == |
| 463 | (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!"); |
| 464 | |
| 465 | // Reset the counts. |
| 466 | memset(SavedRegs, 0, sizeof(SavedRegs)); |
| 467 | StackAdjust = 0; |
| 468 | SavedRegIdx = 0; |
| 469 | InstrOffset += MoveInstrSize; |
| 470 | break; |
| 471 | } |
| 472 | case MCCFIInstruction::OpDefCfaOffset: { |
| 473 | // Defines a new offset for the CFA. E.g. |
| 474 | // |
| 475 | // With frame: |
| 476 | // |
| 477 | // pushq %rbp |
| 478 | // L0: |
| 479 | // .cfi_def_cfa_offset 16 |
| 480 | // |
| 481 | // Without frame: |
| 482 | // |
| 483 | // subq $72, %rsp |
| 484 | // L0: |
| 485 | // .cfi_def_cfa_offset 80 |
| 486 | // |
| 487 | PrevStackSize = StackSize; |
| 488 | StackSize = std::abs(Inst.getOffset()) / StackDivide; |
| 489 | ++NumDefCFAOffsets; |
| 490 | break; |
| 491 | } |
| 492 | case MCCFIInstruction::OpOffset: { |
| 493 | // Defines a "push" of a callee-saved register. E.g. |
| 494 | // |
| 495 | // pushq %r15 |
| 496 | // pushq %r14 |
| 497 | // pushq %rbx |
| 498 | // L0: |
| 499 | // subq $120, %rsp |
| 500 | // L1: |
| 501 | // .cfi_offset %rbx, -40 |
| 502 | // .cfi_offset %r14, -32 |
| 503 | // .cfi_offset %r15, -24 |
| 504 | // |
| 505 | if (SavedRegIdx == CU_NUM_SAVED_REGS) |
| 506 | // If there are too many saved registers, we cannot use a compact |
| 507 | // unwind encoding. |
| 508 | return CU::UNWIND_MODE_DWARF; |
| 509 | |
| 510 | unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true); |
| 511 | SavedRegs[SavedRegIdx++] = Reg; |
| 512 | StackAdjust += OffsetSize; |
| 513 | InstrOffset += PushInstrSize; |
| 514 | break; |
| 515 | } |
| 516 | } |
| 517 | } |
| 518 | |
| 519 | StackAdjust /= StackDivide; |
| 520 | |
| 521 | if (HasFP) { |
| 522 | if ((StackAdjust & 0xFF) != StackAdjust) |
| 523 | // Offset was too big for a compact unwind encoding. |
| 524 | return CU::UNWIND_MODE_DWARF; |
| 525 | |
| 526 | // Get the encoding of the saved registers when we have a frame pointer. |
| 527 | uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(); |
| 528 | if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; |
| 529 | |
| 530 | CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME; |
| 531 | CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16; |
| 532 | CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS; |
| 533 | } else { |
| 534 | // If the amount of the stack allocation is the size of a register, then |
| 535 | // we "push" the RAX/EAX register onto the stack instead of adjusting the |
| 536 | // stack pointer with a SUB instruction. We don't support the push of the |
| 537 | // RAX/EAX register with compact unwind. So we check for that situation |
| 538 | // here. |
| 539 | if ((NumDefCFAOffsets == SavedRegIdx + 1 && |
| 540 | StackSize - PrevStackSize == 1) || |
| 541 | (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2)) |
| 542 | return CU::UNWIND_MODE_DWARF; |
| 543 | |
| 544 | SubtractInstrIdx += InstrOffset; |
| 545 | ++StackAdjust; |
| 546 | |
| 547 | if ((StackSize & 0xFF) == StackSize) { |
| 548 | // Frameless stack with a small stack size. |
| 549 | CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD; |
| 550 | |
| 551 | // Encode the stack size. |
| 552 | CompactUnwindEncoding |= (StackSize & 0xFF) << 16; |
| 553 | } else { |
| 554 | if ((StackAdjust & 0x7) != StackAdjust) |
| 555 | // The extra stack adjustments are too big for us to handle. |
| 556 | return CU::UNWIND_MODE_DWARF; |
| 557 | |
| 558 | // Frameless stack with an offset too large for us to encode compactly. |
| 559 | CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND; |
| 560 | |
| 561 | // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP' |
| 562 | // instruction. |
| 563 | CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16; |
| 564 | |
| 565 | // Encode any extra stack stack adjustments (done via push |
| 566 | // instructions). |
| 567 | CompactUnwindEncoding |= (StackAdjust & 0x7) << 13; |
| 568 | } |
| 569 | |
| 570 | // Encode the number of registers saved. (Reverse the list first.) |
| 571 | std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]); |
| 572 | CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10; |
| 573 | |
| 574 | // Get the encoding of the saved registers when we don't have a frame |
| 575 | // pointer. |
| 576 | uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx); |
| 577 | if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF; |
| 578 | |
| 579 | // Encode the register encoding. |
| 580 | CompactUnwindEncoding |= |
| 581 | RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION; |
| 582 | } |
| 583 | |
| 584 | return CompactUnwindEncoding; |
| 585 | } |
| 586 | |
| 587 | private: |
| 588 | /// \brief Get the compact unwind number for a given register. The number |
| 589 | /// corresponds to the enum lists in compact_unwind_encoding.h. |
| 590 | int getCompactUnwindRegNum(unsigned Reg) const { |
| 591 | static const uint16_t CU32BitRegs[7] = { |
| 592 | X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0 |
| 593 | }; |
| 594 | static const uint16_t CU64BitRegs[] = { |
| 595 | X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0 |
| 596 | }; |
| 597 | const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs; |
| 598 | for (int Idx = 1; *CURegs; ++CURegs, ++Idx) |
| 599 | if (*CURegs == Reg) |
| 600 | return Idx; |
| 601 | |
| 602 | return -1; |
| 603 | } |
| 604 | |
| 605 | /// \brief Return the registers encoded for a compact encoding with a frame |
| 606 | /// pointer. |
| 607 | uint32_t encodeCompactUnwindRegistersWithFrame() const { |
| 608 | // Encode the registers in the order they were saved --- 3-bits per |
| 609 | // register. The list of saved registers is assumed to be in reverse |
| 610 | // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS. |
| 611 | uint32_t RegEnc = 0; |
| 612 | for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) { |
| 613 | unsigned Reg = SavedRegs[i]; |
| 614 | if (Reg == 0) break; |
| 615 | |
| 616 | int CURegNum = getCompactUnwindRegNum(Reg); |
| 617 | if (CURegNum == -1) return ~0U; |
| 618 | |
| 619 | // Encode the 3-bit register number in order, skipping over 3-bits for |
| 620 | // each register. |
| 621 | RegEnc |= (CURegNum & 0x7) << (Idx++ * 3); |
| 622 | } |
| 623 | |
| 624 | assert((RegEnc & 0x3FFFF) == RegEnc && |
| 625 | "Invalid compact register encoding!"); |
| 626 | return RegEnc; |
| 627 | } |
| 628 | |
| 629 | /// \brief Create the permutation encoding used with frameless stacks. It is |
| 630 | /// passed the number of registers to be saved and an array of the registers |
| 631 | /// saved. |
| 632 | uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const { |
| 633 | // The saved registers are numbered from 1 to 6. In order to encode the |
| 634 | // order in which they were saved, we re-number them according to their |
| 635 | // place in the register order. The re-numbering is relative to the last |
| 636 | // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in |
| 637 | // that order: |
| 638 | // |
| 639 | // Orig Re-Num |
| 640 | // ---- ------ |
| 641 | // 6 6 |
| 642 | // 2 2 |
| 643 | // 4 3 |
| 644 | // 5 3 |
| 645 | // |
| 646 | for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) { |
| 647 | int CUReg = getCompactUnwindRegNum(SavedRegs[i]); |
| 648 | if (CUReg == -1) return ~0U; |
| 649 | SavedRegs[i] = CUReg; |
| 650 | } |
| 651 | |
| 652 | // Reverse the list. |
| 653 | std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]); |
| 654 | |
| 655 | uint32_t RenumRegs[CU_NUM_SAVED_REGS]; |
| 656 | for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){ |
| 657 | unsigned Countless = 0; |
| 658 | for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j) |
| 659 | if (SavedRegs[j] < SavedRegs[i]) |
| 660 | ++Countless; |
| 661 | |
| 662 | RenumRegs[i] = SavedRegs[i] - Countless - 1; |
| 663 | } |
| 664 | |
| 665 | // Take the renumbered values and encode them into a 10-bit number. |
| 666 | uint32_t permutationEncoding = 0; |
| 667 | switch (RegCount) { |
| 668 | case 6: |
| 669 | permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1] |
| 670 | + 6 * RenumRegs[2] + 2 * RenumRegs[3] |
| 671 | + RenumRegs[4]; |
| 672 | break; |
| 673 | case 5: |
| 674 | permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2] |
| 675 | + 6 * RenumRegs[3] + 2 * RenumRegs[4] |
| 676 | + RenumRegs[5]; |
| 677 | break; |
| 678 | case 4: |
| 679 | permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3] |
| 680 | + 3 * RenumRegs[4] + RenumRegs[5]; |
| 681 | break; |
| 682 | case 3: |
| 683 | permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4] |
| 684 | + RenumRegs[5]; |
| 685 | break; |
| 686 | case 2: |
| 687 | permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5]; |
| 688 | break; |
| 689 | case 1: |
| 690 | permutationEncoding |= RenumRegs[5]; |
| 691 | break; |
| 692 | } |
| 693 | |
| 694 | assert((permutationEncoding & 0x3FF) == permutationEncoding && |
| 695 | "Invalid compact register encoding!"); |
| 696 | return permutationEncoding; |
| 697 | } |
| 698 | |
Daniel Dunbar | 77c4141 | 2010-03-11 01:34:21 +0000 | [diff] [blame] | 699 | public: |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 700 | DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU, |
| 701 | bool Is64Bit) |
| 702 | : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) { |
| 703 | memset(SavedRegs, 0, sizeof(SavedRegs)); |
| 704 | OffsetSize = Is64Bit ? 8 : 4; |
| 705 | MoveInstrSize = Is64Bit ? 3 : 2; |
| 706 | StackDivide = Is64Bit ? 8 : 4; |
| 707 | PushInstrSize = 1; |
| 708 | } |
Daniel Dunbar | 77c4141 | 2010-03-11 01:34:21 +0000 | [diff] [blame] | 709 | }; |
| 710 | |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 711 | class DarwinX86_32AsmBackend : public DarwinX86AsmBackend { |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 712 | bool SupportsCU; |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 713 | public: |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 714 | DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI, |
| 715 | StringRef CPU, bool SupportsCU) |
| 716 | : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {} |
Daniel Dunbar | 4d7c864 | 2010-03-19 10:43:26 +0000 | [diff] [blame] | 717 | |
| 718 | MCObjectWriter *createObjectWriter(raw_ostream &OS) const { |
Daniel Dunbar | 7da045e | 2010-12-20 15:07:39 +0000 | [diff] [blame] | 719 | return createX86MachObjectWriter(OS, /*Is64Bit=*/false, |
Charles Davis | 8bdfafd | 2013-09-01 04:28:48 +0000 | [diff] [blame] | 720 | MachO::CPU_TYPE_I386, |
| 721 | MachO::CPU_SUBTYPE_I386_ALL); |
Daniel Dunbar | 4d7c864 | 2010-03-19 10:43:26 +0000 | [diff] [blame] | 722 | } |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 723 | |
| 724 | /// \brief Generate the compact unwind encoding for the CFI instructions. |
| 725 | virtual unsigned |
| 726 | generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const { |
| 727 | return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; |
| 728 | } |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 729 | }; |
| 730 | |
| 731 | class DarwinX86_64AsmBackend : public DarwinX86AsmBackend { |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 732 | bool SupportsCU; |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 733 | public: |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 734 | DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI, |
| 735 | StringRef CPU, bool SupportsCU) |
| 736 | : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU) { |
Daniel Dunbar | 6544baf | 2010-03-18 00:58:53 +0000 | [diff] [blame] | 737 | HasReliableSymbolDifference = true; |
| 738 | } |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 739 | |
Daniel Dunbar | 4d7c864 | 2010-03-19 10:43:26 +0000 | [diff] [blame] | 740 | MCObjectWriter *createObjectWriter(raw_ostream &OS) const { |
Daniel Dunbar | 7da045e | 2010-12-20 15:07:39 +0000 | [diff] [blame] | 741 | return createX86MachObjectWriter(OS, /*Is64Bit=*/true, |
Charles Davis | 8bdfafd | 2013-09-01 04:28:48 +0000 | [diff] [blame] | 742 | MachO::CPU_TYPE_X86_64, |
| 743 | MachO::CPU_SUBTYPE_X86_64_ALL); |
Daniel Dunbar | 4d7c864 | 2010-03-19 10:43:26 +0000 | [diff] [blame] | 744 | } |
| 745 | |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 746 | virtual bool doesSectionRequireSymbols(const MCSection &Section) const { |
| 747 | // Temporary labels in the string literals sections require symbols. The |
| 748 | // issue is that the x86_64 relocation format does not allow symbol + |
| 749 | // offset, and so the linker does not have enough information to resolve the |
| 750 | // access to the appropriate atom unless an external relocation is used. For |
| 751 | // non-cstring sections, we expect the compiler to use a non-temporary label |
| 752 | // for anything that could have an addend pointing outside the symbol. |
| 753 | // |
| 754 | // See <rdar://problem/4765733>. |
| 755 | const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); |
| 756 | return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS; |
| 757 | } |
Daniel Dunbar | ba2f4c3 | 2010-05-12 00:38:17 +0000 | [diff] [blame] | 758 | |
| 759 | virtual bool isSectionAtomizable(const MCSection &Section) const { |
| 760 | const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section); |
| 761 | // Fixed sized data sections are uniqued, they cannot be diced into atoms. |
| 762 | switch (SMO.getType()) { |
| 763 | default: |
| 764 | return true; |
| 765 | |
| 766 | case MCSectionMachO::S_4BYTE_LITERALS: |
| 767 | case MCSectionMachO::S_8BYTE_LITERALS: |
| 768 | case MCSectionMachO::S_16BYTE_LITERALS: |
| 769 | case MCSectionMachO::S_LITERAL_POINTERS: |
| 770 | case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS: |
| 771 | case MCSectionMachO::S_LAZY_SYMBOL_POINTERS: |
| 772 | case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS: |
| 773 | case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS: |
| 774 | case MCSectionMachO::S_INTERPOSING: |
| 775 | return false; |
| 776 | } |
| 777 | } |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 778 | |
| 779 | /// \brief Generate the compact unwind encoding for the CFI instructions. |
| 780 | virtual unsigned |
| 781 | generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const { |
| 782 | return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0; |
| 783 | } |
Daniel Dunbar | fe8d866 | 2010-03-15 21:56:50 +0000 | [diff] [blame] | 784 | }; |
| 785 | |
Michael J. Spencer | bee1f7f | 2010-10-10 22:04:20 +0000 | [diff] [blame] | 786 | } // end anonymous namespace |
Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 787 | |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 788 | MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T, |
| 789 | const MCRegisterInfo &MRI, |
| 790 | StringRef TT, |
| 791 | StringRef CPU) { |
Daniel Dunbar | 2b9b0e3 | 2011-04-19 21:14:45 +0000 | [diff] [blame] | 792 | Triple TheTriple(TT); |
| 793 | |
| 794 | if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 795 | return new DarwinX86_32AsmBackend(T, MRI, CPU, |
| 796 | TheTriple.isMacOSX() && |
| 797 | !TheTriple.isMacOSXVersionLT(10, 7)); |
Daniel Dunbar | 2b9b0e3 | 2011-04-19 21:14:45 +0000 | [diff] [blame] | 798 | |
Andrew Kaylor | feb805f | 2012-10-02 18:38:34 +0000 | [diff] [blame] | 799 | if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 800 | return new WindowsX86AsmBackend(T, false, CPU); |
Daniel Dunbar | 2b9b0e3 | 2011-04-19 21:14:45 +0000 | [diff] [blame] | 801 | |
Rafael Espindola | 1ad4095 | 2011-12-21 17:00:36 +0000 | [diff] [blame] | 802 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 803 | return new ELFX86_32AsmBackend(T, OSABI, CPU); |
Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 804 | } |
| 805 | |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 806 | MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T, |
| 807 | const MCRegisterInfo &MRI, |
| 808 | StringRef TT, |
| 809 | StringRef CPU) { |
Daniel Dunbar | 2b9b0e3 | 2011-04-19 21:14:45 +0000 | [diff] [blame] | 810 | Triple TheTriple(TT); |
| 811 | |
| 812 | if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO) |
Bill Wendling | 58e2d3d | 2013-09-09 02:37:14 +0000 | [diff] [blame] | 813 | return new DarwinX86_64AsmBackend(T, MRI, CPU, |
| 814 | TheTriple.isMacOSX() && |
| 815 | !TheTriple.isMacOSXVersionLT(10, 7)); |
Daniel Dunbar | 2b9b0e3 | 2011-04-19 21:14:45 +0000 | [diff] [blame] | 816 | |
Andrew Kaylor | feb805f | 2012-10-02 18:38:34 +0000 | [diff] [blame] | 817 | if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF) |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 818 | return new WindowsX86AsmBackend(T, true, CPU); |
Daniel Dunbar | 2b9b0e3 | 2011-04-19 21:14:45 +0000 | [diff] [blame] | 819 | |
Rafael Espindola | 1ad4095 | 2011-12-21 17:00:36 +0000 | [diff] [blame] | 820 | uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS()); |
Roman Divacky | 5dd4ccb | 2012-09-18 16:08:49 +0000 | [diff] [blame] | 821 | return new ELFX86_64AsmBackend(T, OSABI, CPU); |
Daniel Dunbar | 40eb7f0 | 2010-02-21 21:54:14 +0000 | [diff] [blame] | 822 | } |