blob: c12a57a06d66bbb331e0503d572f6dbcbfa485ee [file] [log] [blame]
Daniel Dunbar40eb7f02010-02-21 21:54:14 +00001//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Evan Chengb2531002011-07-25 19:33:48 +000010#include "MCTargetDesc/X86BaseInfo.h"
Evan Cheng7e763d82011-07-25 18:43:53 +000011#include "MCTargetDesc/X86FixupKinds.h"
Jim Grosbach664d1482013-11-16 00:52:57 +000012#include "llvm/ADT/StringSwitch.h"
Craig Topperb25fda92012-03-17 18:46:09 +000013#include "llvm/MC/MCAsmBackend.h"
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000014#include "llvm/MC/MCAssembler.h"
Rafael Espindolaf0e24d42010-12-17 16:59:53 +000015#include "llvm/MC/MCELFObjectWriter.h"
Daniel Dunbar358b29c2010-05-06 20:34:01 +000016#include "llvm/MC/MCExpr.h"
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000017#include "llvm/MC/MCFixupKindInfo.h"
Daniel Dunbar73b87132010-12-16 16:08:33 +000018#include "llvm/MC/MCMachObjectWriter.h"
Daniel Dunbar86face82010-03-23 03:13:05 +000019#include "llvm/MC/MCObjectWriter.h"
Michael J. Spencerf8270bd2010-07-27 06:46:15 +000020#include "llvm/MC/MCSectionCOFF.h"
Daniel Dunbarc5084cc2010-03-19 09:29:03 +000021#include "llvm/MC/MCSectionELF.h"
Daniel Dunbarfe8d8662010-03-15 21:56:50 +000022#include "llvm/MC/MCSectionMachO.h"
Daniel Dunbara86188b2011-04-28 21:23:31 +000023#include "llvm/Support/CommandLine.h"
Wesley Peck18510902010-10-22 15:52:49 +000024#include "llvm/Support/ELF.h"
Daniel Dunbare0c43572010-03-23 01:39:09 +000025#include "llvm/Support/ErrorHandling.h"
Charles Davis8bdfafd2013-09-01 04:28:48 +000026#include "llvm/Support/MachO.h"
Evan Cheng2bb40352011-08-24 18:08:43 +000027#include "llvm/Support/TargetRegistry.h"
Daniel Dunbare0c43572010-03-23 01:39:09 +000028#include "llvm/Support/raw_ostream.h"
Daniel Dunbar40eb7f02010-02-21 21:54:14 +000029using namespace llvm;
30
Daniel Dunbara86188b2011-04-28 21:23:31 +000031// Option to allow disabling arithmetic relaxation to workaround PR9807, which
32// is useful when running bitwise comparison experiments on Darwin. We should be
33// able to remove this once PR9807 is resolved.
34static cl::opt<bool>
35MCDisableArithRelaxation("mc-x86-disable-arith-relaxation",
36 cl::desc("Disable relaxation of arithmetic instruction for X86"));
37
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000038static unsigned getFixupKindLog2Size(unsigned Kind) {
39 switch (Kind) {
Rafael Espindola83752532014-04-21 21:00:58 +000040 default:
41 llvm_unreachable("invalid fixup kind!");
Rafael Espindola8a3a7922010-11-28 14:17:56 +000042 case FK_PCRel_1:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000043 case FK_SecRel_1:
Rafael Espindola83752532014-04-21 21:00:58 +000044 case FK_Data_1:
45 return 0;
Rafael Espindola8a3a7922010-11-28 14:17:56 +000046 case FK_PCRel_2:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000047 case FK_SecRel_2:
Rafael Espindola83752532014-04-21 21:00:58 +000048 case FK_Data_2:
49 return 1;
Rafael Espindola8a3a7922010-11-28 14:17:56 +000050 case FK_PCRel_4:
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000051 case X86::reloc_riprel_4byte:
52 case X86::reloc_riprel_4byte_movq_load:
Rafael Espindola70d6e0e2010-09-30 03:11:42 +000053 case X86::reloc_signed_4byte:
Rafael Espindola800fd352010-10-24 17:35:42 +000054 case X86::reloc_global_offset_table:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000055 case FK_SecRel_4:
Rafael Espindola83752532014-04-21 21:00:58 +000056 case FK_Data_4:
57 return 2;
Rafael Espindola2ac83552010-12-27 00:36:05 +000058 case FK_PCRel_8:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000059 case FK_SecRel_8:
Rafael Espindola83752532014-04-21 21:00:58 +000060 case FK_Data_8:
Rafael Espindola6c76d1d2014-04-21 21:15:45 +000061 case X86::reloc_global_offset_table8:
Rafael Espindola83752532014-04-21 21:00:58 +000062 return 3;
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000063 }
64}
65
Chris Lattnerac588122010-07-07 22:27:31 +000066namespace {
Daniel Dunbar8888a962010-12-16 16:09:19 +000067
Rafael Espindola6b5e56c2010-12-17 17:45:22 +000068class X86ELFObjectWriter : public MCELFObjectTargetWriter {
69public:
Rafael Espindola1ad40952011-12-21 17:00:36 +000070 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
71 bool HasRelocationAddend, bool foobar)
72 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
Rafael Espindola6b5e56c2010-12-17 17:45:22 +000073};
74
Evan Cheng5928e692011-07-25 23:24:55 +000075class X86AsmBackend : public MCAsmBackend {
Alexey Volkov302309f2014-07-04 07:14:56 +000076 const StringRef CPU;
Rafael Espindolaa834e302013-11-25 20:50:03 +000077 bool HasNopl;
Alexey Volkov302309f2014-07-04 07:14:56 +000078 const uint64_t MaxNopLength;
Daniel Dunbar40eb7f02010-02-21 21:54:14 +000079public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +000080 X86AsmBackend(const Target &T, StringRef _CPU)
Alexey Volkov302309f2014-07-04 07:14:56 +000081 : MCAsmBackend(), CPU(_CPU), MaxNopLength(_CPU == "slm" ? 7 : 15) {
Rafael Espindolaa834e302013-11-25 20:50:03 +000082 HasNopl = CPU != "generic" && CPU != "i386" && CPU != "i486" &&
83 CPU != "i586" && CPU != "pentium" && CPU != "pentium-mmx" &&
84 CPU != "i686" && CPU != "k6" && CPU != "k6-2" && CPU != "k6-3" &&
85 CPU != "geode" && CPU != "winchip-c6" && CPU != "winchip2" &&
86 CPU != "c3" && CPU != "c3-2";
87 }
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000088
Craig Topper39012cc2014-03-09 18:03:14 +000089 unsigned getNumFixupKinds() const override {
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000090 return X86::NumTargetFixupKinds;
91 }
92
Craig Topper39012cc2014-03-09 18:03:14 +000093 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override {
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000094 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
95 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
96 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
97 { "reloc_signed_4byte", 0, 4 * 8, 0},
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000098 { "reloc_global_offset_table", 0, 4 * 8, 0}
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000099 };
100
101 if (Kind < FirstTargetFixupKind)
Evan Cheng5928e692011-07-25 23:24:55 +0000102 return MCAsmBackend::getFixupKindInfo(Kind);
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +0000103
104 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
105 "Invalid kind!");
106 return Infos[Kind - FirstTargetFixupKind];
107 }
108
Jim Grosbachaba3de92012-01-18 18:52:16 +0000109 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
Rafael Espindola5904e122014-03-29 06:26:49 +0000110 uint64_t Value, bool IsPCRel) const override {
Daniel Dunbar353a91ff2010-05-26 15:18:31 +0000111 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
Daniel Dunbarf0517ef2010-03-19 09:28:12 +0000112
Rafael Espindola0f30fec2010-12-06 19:08:48 +0000113 assert(Fixup.getOffset() + Size <= DataSize &&
Daniel Dunbarf0517ef2010-03-19 09:28:12 +0000114 "Invalid fixup offset!");
Jason W Kime4df09f2011-08-04 00:38:45 +0000115
Jason W Kim239370c2011-08-05 00:53:03 +0000116 // Check that uppper bits are either all zeros or all ones.
117 // Specifically ignore overflow/underflow as long as the leakage is
118 // limited to the lower bits. This is to remain compatible with
119 // other assemblers.
Eli Friedmana5abd032011-10-13 23:27:48 +0000120 assert(isIntN(Size * 8 + 1, Value) &&
Jason W Kim239370c2011-08-05 00:53:03 +0000121 "Value does not fit in the Fixup field");
Jason W Kime4df09f2011-08-04 00:38:45 +0000122
Daniel Dunbarf0517ef2010-03-19 09:28:12 +0000123 for (unsigned i = 0; i != Size; ++i)
Rafael Espindola0f30fec2010-12-06 19:08:48 +0000124 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
Daniel Dunbarf0517ef2010-03-19 09:28:12 +0000125 }
Daniel Dunbare0c43572010-03-23 01:39:09 +0000126
Craig Topper39012cc2014-03-09 18:03:14 +0000127 bool mayNeedRelaxation(const MCInst &Inst) const override;
Daniel Dunbar86face82010-03-23 03:13:05 +0000128
Craig Topper39012cc2014-03-09 18:03:14 +0000129 bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value,
Eli Bendersky4d9ada02013-01-08 00:22:56 +0000130 const MCRelaxableFragment *DF,
Craig Topper39012cc2014-03-09 18:03:14 +0000131 const MCAsmLayout &Layout) const override;
Jim Grosbach25b63fa2011-12-06 00:47:03 +0000132
Craig Topper39012cc2014-03-09 18:03:14 +0000133 void relaxInstruction(const MCInst &Inst, MCInst &Res) const override;
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000134
Craig Topper39012cc2014-03-09 18:03:14 +0000135 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const override;
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000136};
Michael J. Spencerbee1f7f2010-10-10 22:04:20 +0000137} // end anonymous namespace
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000138
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000139static unsigned getRelaxedOpcodeBranch(unsigned Op) {
Daniel Dunbare0c43572010-03-23 01:39:09 +0000140 switch (Op) {
141 default:
142 return Op;
143
144 case X86::JAE_1: return X86::JAE_4;
145 case X86::JA_1: return X86::JA_4;
146 case X86::JBE_1: return X86::JBE_4;
147 case X86::JB_1: return X86::JB_4;
148 case X86::JE_1: return X86::JE_4;
149 case X86::JGE_1: return X86::JGE_4;
150 case X86::JG_1: return X86::JG_4;
151 case X86::JLE_1: return X86::JLE_4;
152 case X86::JL_1: return X86::JL_4;
153 case X86::JMP_1: return X86::JMP_4;
154 case X86::JNE_1: return X86::JNE_4;
155 case X86::JNO_1: return X86::JNO_4;
156 case X86::JNP_1: return X86::JNP_4;
157 case X86::JNS_1: return X86::JNS_4;
158 case X86::JO_1: return X86::JO_4;
159 case X86::JP_1: return X86::JP_4;
160 case X86::JS_1: return X86::JS_4;
161 }
162}
163
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000164static unsigned getRelaxedOpcodeArith(unsigned Op) {
165 switch (Op) {
166 default:
167 return Op;
168
169 // IMUL
170 case X86::IMUL16rri8: return X86::IMUL16rri;
171 case X86::IMUL16rmi8: return X86::IMUL16rmi;
172 case X86::IMUL32rri8: return X86::IMUL32rri;
173 case X86::IMUL32rmi8: return X86::IMUL32rmi;
174 case X86::IMUL64rri8: return X86::IMUL64rri32;
175 case X86::IMUL64rmi8: return X86::IMUL64rmi32;
176
177 // AND
178 case X86::AND16ri8: return X86::AND16ri;
179 case X86::AND16mi8: return X86::AND16mi;
180 case X86::AND32ri8: return X86::AND32ri;
181 case X86::AND32mi8: return X86::AND32mi;
182 case X86::AND64ri8: return X86::AND64ri32;
183 case X86::AND64mi8: return X86::AND64mi32;
184
185 // OR
186 case X86::OR16ri8: return X86::OR16ri;
187 case X86::OR16mi8: return X86::OR16mi;
188 case X86::OR32ri8: return X86::OR32ri;
189 case X86::OR32mi8: return X86::OR32mi;
190 case X86::OR64ri8: return X86::OR64ri32;
191 case X86::OR64mi8: return X86::OR64mi32;
192
193 // XOR
194 case X86::XOR16ri8: return X86::XOR16ri;
195 case X86::XOR16mi8: return X86::XOR16mi;
196 case X86::XOR32ri8: return X86::XOR32ri;
197 case X86::XOR32mi8: return X86::XOR32mi;
198 case X86::XOR64ri8: return X86::XOR64ri32;
199 case X86::XOR64mi8: return X86::XOR64mi32;
200
201 // ADD
202 case X86::ADD16ri8: return X86::ADD16ri;
203 case X86::ADD16mi8: return X86::ADD16mi;
204 case X86::ADD32ri8: return X86::ADD32ri;
205 case X86::ADD32mi8: return X86::ADD32mi;
206 case X86::ADD64ri8: return X86::ADD64ri32;
207 case X86::ADD64mi8: return X86::ADD64mi32;
208
209 // SUB
210 case X86::SUB16ri8: return X86::SUB16ri;
211 case X86::SUB16mi8: return X86::SUB16mi;
212 case X86::SUB32ri8: return X86::SUB32ri;
213 case X86::SUB32mi8: return X86::SUB32mi;
214 case X86::SUB64ri8: return X86::SUB64ri32;
215 case X86::SUB64mi8: return X86::SUB64mi32;
216
217 // CMP
218 case X86::CMP16ri8: return X86::CMP16ri;
219 case X86::CMP16mi8: return X86::CMP16mi;
220 case X86::CMP32ri8: return X86::CMP32ri;
221 case X86::CMP32mi8: return X86::CMP32mi;
222 case X86::CMP64ri8: return X86::CMP64ri32;
223 case X86::CMP64mi8: return X86::CMP64mi32;
Rafael Espindola625ccf82010-12-18 01:01:34 +0000224
225 // PUSH
David Woodhouse8bceb5d2014-01-08 12:58:32 +0000226 case X86::PUSH32i8: return X86::PUSHi32;
227 case X86::PUSH16i8: return X86::PUSHi16;
228 case X86::PUSH64i8: return X86::PUSH64i32;
Eli Friedman3846acc2011-07-15 21:28:39 +0000229 case X86::PUSH64i16: return X86::PUSH64i32;
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000230 }
231}
232
233static unsigned getRelaxedOpcode(unsigned Op) {
234 unsigned R = getRelaxedOpcodeArith(Op);
235 if (R != Op)
236 return R;
237 return getRelaxedOpcodeBranch(Op);
238}
239
Jim Grosbachaba3de92012-01-18 18:52:16 +0000240bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000241 // Branches can always be relaxed.
242 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
243 return true;
244
Daniel Dunbara86188b2011-04-28 21:23:31 +0000245 if (MCDisableArithRelaxation)
246 return false;
247
Daniel Dunbara19838e2010-05-26 17:45:29 +0000248 // Check if this instruction is ever relaxable.
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000249 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
Daniel Dunbara19838e2010-05-26 17:45:29 +0000250 return false;
Daniel Dunbar353a91ff2010-05-26 15:18:31 +0000251
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000252
253 // Check if it has an expression and is not RIP relative.
254 bool hasExp = false;
255 bool hasRIP = false;
256 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
257 const MCOperand &Op = Inst.getOperand(i);
258 if (Op.isExpr())
259 hasExp = true;
260
261 if (Op.isReg() && Op.getReg() == X86::RIP)
262 hasRIP = true;
263 }
264
265 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on
266 // how we do relaxations?
267 return hasExp && !hasRIP;
Daniel Dunbar86face82010-03-23 03:13:05 +0000268}
269
Jim Grosbach25b63fa2011-12-06 00:47:03 +0000270bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
271 uint64_t Value,
Eli Bendersky4d9ada02013-01-08 00:22:56 +0000272 const MCRelaxableFragment *DF,
Jim Grosbach25b63fa2011-12-06 00:47:03 +0000273 const MCAsmLayout &Layout) const {
274 // Relax if the value is too big for a (signed) i8.
275 return int64_t(Value) != int64_t(int8_t(Value));
276}
277
Daniel Dunbare0c43572010-03-23 01:39:09 +0000278// FIXME: Can tblgen help at all here to verify there aren't other instructions
279// we can relax?
Jim Grosbachaba3de92012-01-18 18:52:16 +0000280void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
Daniel Dunbare0c43572010-03-23 01:39:09 +0000281 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000282 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
Daniel Dunbare0c43572010-03-23 01:39:09 +0000283
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000284 if (RelaxedOp == Inst.getOpcode()) {
Alp Tokere69170a2014-06-26 22:52:05 +0000285 SmallString<256> Tmp;
286 raw_svector_ostream OS(Tmp);
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000287 Inst.dump_pretty(OS);
Daniel Dunbar3627af52010-05-26 15:18:13 +0000288 OS << "\n";
Chris Lattner2104b8d2010-04-07 22:58:41 +0000289 report_fatal_error("unexpected instruction to relax: " + OS.str());
Daniel Dunbare0c43572010-03-23 01:39:09 +0000290 }
291
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000292 Res = Inst;
Daniel Dunbare0c43572010-03-23 01:39:09 +0000293 Res.setOpcode(RelaxedOp);
294}
295
Eli Benderskyb2022f32012-12-13 00:24:56 +0000296/// \brief Write a sequence of optimal nops to the output, covering \p Count
297/// bytes.
298/// \return - true on success, false on failure
Jim Grosbachaba3de92012-01-18 18:52:16 +0000299bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
Rafael Espindola7c2acd02010-11-25 17:14:16 +0000300 static const uint8_t Nops[10][10] = {
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000301 // nop
302 {0x90},
303 // xchg %ax,%ax
304 {0x66, 0x90},
305 // nopl (%[re]ax)
306 {0x0f, 0x1f, 0x00},
307 // nopl 0(%[re]ax)
308 {0x0f, 0x1f, 0x40, 0x00},
309 // nopl 0(%[re]ax,%[re]ax,1)
310 {0x0f, 0x1f, 0x44, 0x00, 0x00},
311 // nopw 0(%[re]ax,%[re]ax,1)
312 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
313 // nopl 0L(%[re]ax)
314 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
315 // nopl 0L(%[re]ax,%[re]ax,1)
316 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
317 // nopw 0L(%[re]ax,%[re]ax,1)
318 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
319 // nopw %cs:0L(%[re]ax,%[re]ax,1)
320 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000321 };
322
Alp Tokerf907b892013-12-05 05:44:44 +0000323 // This CPU doesn't support long nops. If needed add more.
Benjamin Kramer35480282012-10-13 17:28:35 +0000324 // FIXME: Can we get this from the subtarget somehow?
Rafael Espindola1b8bfda2013-11-25 20:15:14 +0000325 // FIXME: We could generated something better than plain 0x90.
Rafael Espindolaa834e302013-11-25 20:50:03 +0000326 if (!HasNopl) {
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000327 for (uint64_t i = 0; i < Count; ++i)
328 OW->Write8(0x90);
329 return true;
330 }
331
David Sehr4c8979c2013-03-05 00:02:23 +0000332 // 15 is the longest single nop instruction. Emit as many 15-byte nops as
333 // needed, then emit a nop of the remaining length.
334 do {
Alexey Volkov302309f2014-07-04 07:14:56 +0000335 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
David Sehr4c8979c2013-03-05 00:02:23 +0000336 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
337 for (uint8_t i = 0; i < Prefixes; i++)
338 OW->Write8(0x66);
339 const uint8_t Rest = ThisNopLength - Prefixes;
340 for (uint8_t i = 0; i < Rest; i++)
341 OW->Write8(Nops[Rest - 1][i]);
342 Count -= ThisNopLength;
343 } while (Count != 0);
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000344
345 return true;
346}
347
Daniel Dunbare0c43572010-03-23 01:39:09 +0000348/* *** */
349
Chris Lattnerac588122010-07-07 22:27:31 +0000350namespace {
Bill Wendling184d5d32013-09-11 20:38:09 +0000351
Daniel Dunbarc5084cc2010-03-19 09:29:03 +0000352class ELFX86AsmBackend : public X86AsmBackend {
353public:
Rafael Espindola1ad40952011-12-21 17:00:36 +0000354 uint8_t OSABI;
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000355 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU)
Rafael Espindola6a383f92014-02-06 01:06:31 +0000356 : X86AsmBackend(T, CPU), OSABI(_OSABI) {}
Daniel Dunbarc5084cc2010-03-19 09:29:03 +0000357};
358
Matt Fleming5abb6dd2010-05-21 11:39:07 +0000359class ELFX86_32AsmBackend : public ELFX86AsmBackend {
360public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000361 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
362 : ELFX86AsmBackend(T, OSABI, CPU) {}
Matt Flemingf751d852010-08-16 18:36:14 +0000363
Craig Topper39012cc2014-03-09 18:03:14 +0000364 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
Michael Liao83a77c32012-10-30 17:33:39 +0000365 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386);
Jan Sjödin6348dc02011-03-09 18:44:41 +0000366 }
Matt Fleming5abb6dd2010-05-21 11:39:07 +0000367};
368
369class ELFX86_64AsmBackend : public ELFX86AsmBackend {
370public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000371 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
372 : ELFX86AsmBackend(T, OSABI, CPU) {}
Matt Flemingf751d852010-08-16 18:36:14 +0000373
Craig Topper39012cc2014-03-09 18:03:14 +0000374 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
Michael Liao83a77c32012-10-30 17:33:39 +0000375 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64);
Jan Sjödin6348dc02011-03-09 18:44:41 +0000376 }
Matt Fleming5abb6dd2010-05-21 11:39:07 +0000377};
378
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000379class WindowsX86AsmBackend : public X86AsmBackend {
Michael J. Spencer377aa202010-08-21 05:58:13 +0000380 bool Is64Bit;
Rafael Espindola4262a222010-10-16 18:23:53 +0000381
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000382public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000383 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU)
384 : X86AsmBackend(T, CPU)
Michael J. Spencer377aa202010-08-21 05:58:13 +0000385 , Is64Bit(is64Bit) {
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000386 }
387
Craig Topper39012cc2014-03-09 18:03:14 +0000388 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
Rafael Espindola908d2ed2011-12-24 02:14:02 +0000389 return createX86WinCOFFObjectWriter(OS, Is64Bit);
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000390 }
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000391};
392
Bill Wendling184d5d32013-09-11 20:38:09 +0000393namespace CU {
394
395 /// Compact unwind encoding values.
396 enum CompactUnwindEncodings {
397 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
398 /// the return address, then [RE]SP is moved to [RE]BP.
399 UNWIND_MODE_BP_FRAME = 0x01000000,
400
401 /// A frameless function with a small constant stack size.
402 UNWIND_MODE_STACK_IMMD = 0x02000000,
403
404 /// A frameless function with a large constant stack size.
405 UNWIND_MODE_STACK_IND = 0x03000000,
406
407 /// No compact unwind encoding is available.
408 UNWIND_MODE_DWARF = 0x04000000,
409
410 /// Mask for encoding the frame registers.
411 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
412
413 /// Mask for encoding the frameless registers.
414 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
415 };
416
417} // end CU namespace
418
Daniel Dunbar77c41412010-03-11 01:34:21 +0000419class DarwinX86AsmBackend : public X86AsmBackend {
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000420 const MCRegisterInfo &MRI;
421
422 /// \brief Number of registers that can be saved in a compact unwind encoding.
423 enum { CU_NUM_SAVED_REGS = 6 };
424
425 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
426 bool Is64Bit;
427
428 unsigned OffsetSize; ///< Offset of a "push" instruction.
429 unsigned PushInstrSize; ///< Size of a "push" instruction.
430 unsigned MoveInstrSize; ///< Size of a "move" instruction.
431 unsigned StackDivide; ///< Amount to adjust stack stize by.
432protected:
433 /// \brief Implementation of algorithm to generate the compact unwind encoding
434 /// for the CFI instructions.
435 uint32_t
436 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const {
437 if (Instrs.empty()) return 0;
438
439 // Reset the saved registers.
440 unsigned SavedRegIdx = 0;
441 memset(SavedRegs, 0, sizeof(SavedRegs));
442
443 bool HasFP = false;
444
445 // Encode that we are using EBP/RBP as the frame pointer.
446 uint32_t CompactUnwindEncoding = 0;
447
448 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
449 unsigned InstrOffset = 0;
450 unsigned StackAdjust = 0;
451 unsigned StackSize = 0;
452 unsigned PrevStackSize = 0;
453 unsigned NumDefCFAOffsets = 0;
454
455 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
456 const MCCFIInstruction &Inst = Instrs[i];
457
458 switch (Inst.getOperation()) {
459 default:
Jim Grosbach2fca51d2013-11-08 22:33:06 +0000460 // Any other CFI directives indicate a frame that we aren't prepared
461 // to represent via compact unwind, so just bail out.
462 return 0;
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000463 case MCCFIInstruction::OpDefCfaRegister: {
464 // Defines a frame pointer. E.g.
465 //
466 // movq %rsp, %rbp
467 // L0:
468 // .cfi_def_cfa_register %rbp
469 //
470 HasFP = true;
471 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) ==
472 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!");
473
474 // Reset the counts.
475 memset(SavedRegs, 0, sizeof(SavedRegs));
476 StackAdjust = 0;
477 SavedRegIdx = 0;
478 InstrOffset += MoveInstrSize;
479 break;
480 }
481 case MCCFIInstruction::OpDefCfaOffset: {
482 // Defines a new offset for the CFA. E.g.
483 //
484 // With frame:
485 //
486 // pushq %rbp
487 // L0:
488 // .cfi_def_cfa_offset 16
489 //
490 // Without frame:
491 //
492 // subq $72, %rsp
493 // L0:
494 // .cfi_def_cfa_offset 80
495 //
496 PrevStackSize = StackSize;
497 StackSize = std::abs(Inst.getOffset()) / StackDivide;
498 ++NumDefCFAOffsets;
499 break;
500 }
501 case MCCFIInstruction::OpOffset: {
502 // Defines a "push" of a callee-saved register. E.g.
503 //
504 // pushq %r15
505 // pushq %r14
506 // pushq %rbx
507 // L0:
508 // subq $120, %rsp
509 // L1:
510 // .cfi_offset %rbx, -40
511 // .cfi_offset %r14, -32
512 // .cfi_offset %r15, -24
513 //
514 if (SavedRegIdx == CU_NUM_SAVED_REGS)
515 // If there are too many saved registers, we cannot use a compact
516 // unwind encoding.
517 return CU::UNWIND_MODE_DWARF;
518
519 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
520 SavedRegs[SavedRegIdx++] = Reg;
521 StackAdjust += OffsetSize;
522 InstrOffset += PushInstrSize;
523 break;
524 }
525 }
526 }
527
528 StackAdjust /= StackDivide;
529
530 if (HasFP) {
531 if ((StackAdjust & 0xFF) != StackAdjust)
532 // Offset was too big for a compact unwind encoding.
533 return CU::UNWIND_MODE_DWARF;
534
535 // Get the encoding of the saved registers when we have a frame pointer.
536 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
537 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
538
539 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
540 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
541 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
542 } else {
543 // If the amount of the stack allocation is the size of a register, then
544 // we "push" the RAX/EAX register onto the stack instead of adjusting the
545 // stack pointer with a SUB instruction. We don't support the push of the
546 // RAX/EAX register with compact unwind. So we check for that situation
547 // here.
548 if ((NumDefCFAOffsets == SavedRegIdx + 1 &&
549 StackSize - PrevStackSize == 1) ||
550 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2))
551 return CU::UNWIND_MODE_DWARF;
552
553 SubtractInstrIdx += InstrOffset;
554 ++StackAdjust;
555
556 if ((StackSize & 0xFF) == StackSize) {
557 // Frameless stack with a small stack size.
558 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
559
560 // Encode the stack size.
561 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
562 } else {
563 if ((StackAdjust & 0x7) != StackAdjust)
564 // The extra stack adjustments are too big for us to handle.
565 return CU::UNWIND_MODE_DWARF;
566
567 // Frameless stack with an offset too large for us to encode compactly.
568 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
569
570 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
571 // instruction.
572 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
573
574 // Encode any extra stack stack adjustments (done via push
575 // instructions).
576 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
577 }
578
579 // Encode the number of registers saved. (Reverse the list first.)
580 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
581 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
582
583 // Get the encoding of the saved registers when we don't have a frame
584 // pointer.
585 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
586 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
587
588 // Encode the register encoding.
589 CompactUnwindEncoding |=
590 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
591 }
592
593 return CompactUnwindEncoding;
594 }
595
596private:
597 /// \brief Get the compact unwind number for a given register. The number
598 /// corresponds to the enum lists in compact_unwind_encoding.h.
599 int getCompactUnwindRegNum(unsigned Reg) const {
600 static const uint16_t CU32BitRegs[7] = {
601 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
602 };
603 static const uint16_t CU64BitRegs[] = {
604 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
605 };
606 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
607 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
608 if (*CURegs == Reg)
609 return Idx;
610
611 return -1;
612 }
613
614 /// \brief Return the registers encoded for a compact encoding with a frame
615 /// pointer.
616 uint32_t encodeCompactUnwindRegistersWithFrame() const {
617 // Encode the registers in the order they were saved --- 3-bits per
618 // register. The list of saved registers is assumed to be in reverse
619 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
620 uint32_t RegEnc = 0;
621 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
622 unsigned Reg = SavedRegs[i];
623 if (Reg == 0) break;
624
625 int CURegNum = getCompactUnwindRegNum(Reg);
626 if (CURegNum == -1) return ~0U;
627
628 // Encode the 3-bit register number in order, skipping over 3-bits for
629 // each register.
630 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
631 }
632
633 assert((RegEnc & 0x3FFFF) == RegEnc &&
634 "Invalid compact register encoding!");
635 return RegEnc;
636 }
637
638 /// \brief Create the permutation encoding used with frameless stacks. It is
639 /// passed the number of registers to be saved and an array of the registers
640 /// saved.
641 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
642 // The saved registers are numbered from 1 to 6. In order to encode the
643 // order in which they were saved, we re-number them according to their
644 // place in the register order. The re-numbering is relative to the last
645 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
646 // that order:
647 //
648 // Orig Re-Num
649 // ---- ------
650 // 6 6
651 // 2 2
652 // 4 3
653 // 5 3
654 //
655 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
656 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
657 if (CUReg == -1) return ~0U;
658 SavedRegs[i] = CUReg;
659 }
660
661 // Reverse the list.
662 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
663
664 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
665 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
666 unsigned Countless = 0;
667 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
668 if (SavedRegs[j] < SavedRegs[i])
669 ++Countless;
670
671 RenumRegs[i] = SavedRegs[i] - Countless - 1;
672 }
673
674 // Take the renumbered values and encode them into a 10-bit number.
675 uint32_t permutationEncoding = 0;
676 switch (RegCount) {
677 case 6:
678 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
679 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
680 + RenumRegs[4];
681 break;
682 case 5:
683 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
684 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
685 + RenumRegs[5];
686 break;
687 case 4:
688 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
689 + 3 * RenumRegs[4] + RenumRegs[5];
690 break;
691 case 3:
692 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
693 + RenumRegs[5];
694 break;
695 case 2:
696 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
697 break;
698 case 1:
699 permutationEncoding |= RenumRegs[5];
700 break;
701 }
702
703 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
704 "Invalid compact register encoding!");
705 return permutationEncoding;
706 }
707
Daniel Dunbar77c41412010-03-11 01:34:21 +0000708public:
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000709 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU,
710 bool Is64Bit)
711 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) {
712 memset(SavedRegs, 0, sizeof(SavedRegs));
713 OffsetSize = Is64Bit ? 8 : 4;
714 MoveInstrSize = Is64Bit ? 3 : 2;
715 StackDivide = Is64Bit ? 8 : 4;
716 PushInstrSize = 1;
717 }
Daniel Dunbar77c41412010-03-11 01:34:21 +0000718};
719
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000720class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
721public:
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000722 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
Rafael Espindoladf100c32014-06-20 22:30:31 +0000723 StringRef CPU)
724 : DarwinX86AsmBackend(T, MRI, CPU, false) {}
Daniel Dunbar4d7c8642010-03-19 10:43:26 +0000725
Craig Topper39012cc2014-03-09 18:03:14 +0000726 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
Daniel Dunbar7da045e2010-12-20 15:07:39 +0000727 return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
Charles Davis8bdfafd2013-09-01 04:28:48 +0000728 MachO::CPU_TYPE_I386,
729 MachO::CPU_SUBTYPE_I386_ALL);
Daniel Dunbar4d7c8642010-03-19 10:43:26 +0000730 }
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000731
732 /// \brief Generate the compact unwind encoding for the CFI instructions.
Craig Topper39012cc2014-03-09 18:03:14 +0000733 uint32_t generateCompactUnwindEncoding(
734 ArrayRef<MCCFIInstruction> Instrs) const override {
Rafael Espindoladf100c32014-06-20 22:30:31 +0000735 return generateCompactUnwindEncodingImpl(Instrs);
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000736 }
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000737};
738
739class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
Jim Grosbach664d1482013-11-16 00:52:57 +0000740 const MachO::CPUSubTypeX86 Subtype;
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000741public:
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000742 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
Rafael Espindoladf100c32014-06-20 22:30:31 +0000743 StringRef CPU, MachO::CPUSubTypeX86 st)
744 : DarwinX86AsmBackend(T, MRI, CPU, true), Subtype(st) {}
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000745
Craig Topper39012cc2014-03-09 18:03:14 +0000746 MCObjectWriter *createObjectWriter(raw_ostream &OS) const override {
Daniel Dunbar7da045e2010-12-20 15:07:39 +0000747 return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
Jim Grosbach664d1482013-11-16 00:52:57 +0000748 MachO::CPU_TYPE_X86_64, Subtype);
Daniel Dunbar4d7c8642010-03-19 10:43:26 +0000749 }
750
Craig Topper39012cc2014-03-09 18:03:14 +0000751 bool doesSectionRequireSymbols(const MCSection &Section) const override {
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000752 // Temporary labels in the string literals sections require symbols. The
753 // issue is that the x86_64 relocation format does not allow symbol +
754 // offset, and so the linker does not have enough information to resolve the
755 // access to the appropriate atom unless an external relocation is used. For
756 // non-cstring sections, we expect the compiler to use a non-temporary label
757 // for anything that could have an addend pointing outside the symbol.
758 //
759 // See <rdar://problem/4765733>.
760 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
David Majnemer7b583052014-03-07 07:36:05 +0000761 return SMO.getType() == MachO::S_CSTRING_LITERALS;
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000762 }
Daniel Dunbarba2f4c32010-05-12 00:38:17 +0000763
Craig Topper39012cc2014-03-09 18:03:14 +0000764 bool isSectionAtomizable(const MCSection &Section) const override {
Daniel Dunbarba2f4c32010-05-12 00:38:17 +0000765 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
766 // Fixed sized data sections are uniqued, they cannot be diced into atoms.
767 switch (SMO.getType()) {
768 default:
769 return true;
770
David Majnemer7b583052014-03-07 07:36:05 +0000771 case MachO::S_4BYTE_LITERALS:
772 case MachO::S_8BYTE_LITERALS:
773 case MachO::S_16BYTE_LITERALS:
774 case MachO::S_LITERAL_POINTERS:
775 case MachO::S_NON_LAZY_SYMBOL_POINTERS:
776 case MachO::S_LAZY_SYMBOL_POINTERS:
777 case MachO::S_MOD_INIT_FUNC_POINTERS:
778 case MachO::S_MOD_TERM_FUNC_POINTERS:
779 case MachO::S_INTERPOSING:
Daniel Dunbarba2f4c32010-05-12 00:38:17 +0000780 return false;
781 }
782 }
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000783
784 /// \brief Generate the compact unwind encoding for the CFI instructions.
Craig Topper39012cc2014-03-09 18:03:14 +0000785 uint32_t generateCompactUnwindEncoding(
786 ArrayRef<MCCFIInstruction> Instrs) const override {
Rafael Espindoladf100c32014-06-20 22:30:31 +0000787 return generateCompactUnwindEncodingImpl(Instrs);
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000788 }
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000789};
790
Michael J. Spencerbee1f7f2010-10-10 22:04:20 +0000791} // end anonymous namespace
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000792
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000793MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
794 const MCRegisterInfo &MRI,
795 StringRef TT,
796 StringRef CPU) {
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000797 Triple TheTriple(TT);
798
Tim Northoverd6a729b2014-01-06 14:28:05 +0000799 if (TheTriple.isOSBinFormatMachO())
Rafael Espindoladf100c32014-06-20 22:30:31 +0000800 return new DarwinX86_32AsmBackend(T, MRI, CPU);
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000801
Saleem Abdulrasool35476332014-03-06 20:47:11 +0000802 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000803 return new WindowsX86AsmBackend(T, false, CPU);
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000804
Rafael Espindola1ad40952011-12-21 17:00:36 +0000805 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000806 return new ELFX86_32AsmBackend(T, OSABI, CPU);
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000807}
808
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000809MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
810 const MCRegisterInfo &MRI,
811 StringRef TT,
812 StringRef CPU) {
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000813 Triple TheTriple(TT);
814
Tim Northoverd6a729b2014-01-06 14:28:05 +0000815 if (TheTriple.isOSBinFormatMachO()) {
Jim Grosbach664d1482013-11-16 00:52:57 +0000816 MachO::CPUSubTypeX86 CS =
817 StringSwitch<MachO::CPUSubTypeX86>(TheTriple.getArchName())
818 .Case("x86_64h", MachO::CPU_SUBTYPE_X86_64_H)
819 .Default(MachO::CPU_SUBTYPE_X86_64_ALL);
Rafael Espindoladf100c32014-06-20 22:30:31 +0000820 return new DarwinX86_64AsmBackend(T, MRI, CPU, CS);
Jim Grosbach664d1482013-11-16 00:52:57 +0000821 }
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000822
Saleem Abdulrasool35476332014-03-06 20:47:11 +0000823 if (TheTriple.isOSWindows() && !TheTriple.isOSBinFormatELF())
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000824 return new WindowsX86AsmBackend(T, true, CPU);
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000825
Rafael Espindola1ad40952011-12-21 17:00:36 +0000826 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000827 return new ELFX86_64AsmBackend(T, OSABI, CPU);
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000828}