blob: e5d89e35de10451b5d48eaf8673485c8fa6baeb4 [file] [log] [blame]
Daniel Dunbar40eb7f02010-02-21 21:54:14 +00001//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
Evan Chengb2531002011-07-25 19:33:48 +000010#include "MCTargetDesc/X86BaseInfo.h"
Evan Cheng7e763d82011-07-25 18:43:53 +000011#include "MCTargetDesc/X86FixupKinds.h"
Craig Topperb25fda92012-03-17 18:46:09 +000012#include "llvm/MC/MCAsmBackend.h"
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000013#include "llvm/MC/MCAssembler.h"
Rafael Espindolaf0e24d42010-12-17 16:59:53 +000014#include "llvm/MC/MCELFObjectWriter.h"
Daniel Dunbar358b29c2010-05-06 20:34:01 +000015#include "llvm/MC/MCExpr.h"
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000016#include "llvm/MC/MCFixupKindInfo.h"
Daniel Dunbar73b87132010-12-16 16:08:33 +000017#include "llvm/MC/MCMachObjectWriter.h"
Daniel Dunbar86face82010-03-23 03:13:05 +000018#include "llvm/MC/MCObjectWriter.h"
Michael J. Spencerf8270bd2010-07-27 06:46:15 +000019#include "llvm/MC/MCSectionCOFF.h"
Daniel Dunbarc5084cc2010-03-19 09:29:03 +000020#include "llvm/MC/MCSectionELF.h"
Daniel Dunbarfe8d8662010-03-15 21:56:50 +000021#include "llvm/MC/MCSectionMachO.h"
Daniel Dunbara86188b2011-04-28 21:23:31 +000022#include "llvm/Support/CommandLine.h"
Wesley Peck18510902010-10-22 15:52:49 +000023#include "llvm/Support/ELF.h"
Daniel Dunbare0c43572010-03-23 01:39:09 +000024#include "llvm/Support/ErrorHandling.h"
Charles Davis8bdfafd2013-09-01 04:28:48 +000025#include "llvm/Support/MachO.h"
Evan Cheng2bb40352011-08-24 18:08:43 +000026#include "llvm/Support/TargetRegistry.h"
Daniel Dunbare0c43572010-03-23 01:39:09 +000027#include "llvm/Support/raw_ostream.h"
Daniel Dunbar40eb7f02010-02-21 21:54:14 +000028using namespace llvm;
29
Daniel Dunbara86188b2011-04-28 21:23:31 +000030// Option to allow disabling arithmetic relaxation to workaround PR9807, which
31// is useful when running bitwise comparison experiments on Darwin. We should be
32// able to remove this once PR9807 is resolved.
33static cl::opt<bool>
34MCDisableArithRelaxation("mc-x86-disable-arith-relaxation",
35 cl::desc("Disable relaxation of arithmetic instruction for X86"));
36
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000037static unsigned getFixupKindLog2Size(unsigned Kind) {
38 switch (Kind) {
Craig Topper4ed72782012-02-05 05:38:58 +000039 default: llvm_unreachable("invalid fixup kind!");
Rafael Espindola8a3a7922010-11-28 14:17:56 +000040 case FK_PCRel_1:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000041 case FK_SecRel_1:
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000042 case FK_Data_1: return 0;
Rafael Espindola8a3a7922010-11-28 14:17:56 +000043 case FK_PCRel_2:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000044 case FK_SecRel_2:
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000045 case FK_Data_2: return 1;
Rafael Espindola8a3a7922010-11-28 14:17:56 +000046 case FK_PCRel_4:
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000047 case X86::reloc_riprel_4byte:
48 case X86::reloc_riprel_4byte_movq_load:
Rafael Espindola70d6e0e2010-09-30 03:11:42 +000049 case X86::reloc_signed_4byte:
Rafael Espindola800fd352010-10-24 17:35:42 +000050 case X86::reloc_global_offset_table:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000051 case FK_SecRel_4:
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000052 case FK_Data_4: return 2;
Rafael Espindola2ac83552010-12-27 00:36:05 +000053 case FK_PCRel_8:
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000054 case FK_SecRel_8:
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000055 case FK_Data_8: return 3;
56 }
57}
58
Chris Lattnerac588122010-07-07 22:27:31 +000059namespace {
Daniel Dunbar8888a962010-12-16 16:09:19 +000060
Rafael Espindola6b5e56c2010-12-17 17:45:22 +000061class X86ELFObjectWriter : public MCELFObjectTargetWriter {
62public:
Rafael Espindola1ad40952011-12-21 17:00:36 +000063 X86ELFObjectWriter(bool is64Bit, uint8_t OSABI, uint16_t EMachine,
64 bool HasRelocationAddend, bool foobar)
65 : MCELFObjectTargetWriter(is64Bit, OSABI, EMachine, HasRelocationAddend) {}
Rafael Espindola6b5e56c2010-12-17 17:45:22 +000066};
67
Evan Cheng5928e692011-07-25 23:24:55 +000068class X86AsmBackend : public MCAsmBackend {
Roman Divacky5dd4ccb2012-09-18 16:08:49 +000069 StringRef CPU;
Daniel Dunbar40eb7f02010-02-21 21:54:14 +000070public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +000071 X86AsmBackend(const Target &T, StringRef _CPU)
72 : MCAsmBackend(), CPU(_CPU) {}
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000073
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000074 unsigned getNumFixupKinds() const {
75 return X86::NumTargetFixupKinds;
76 }
77
78 const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const {
79 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
80 { "reloc_riprel_4byte", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel },
81 { "reloc_riprel_4byte_movq_load", 0, 4 * 8, MCFixupKindInfo::FKF_IsPCRel},
82 { "reloc_signed_4byte", 0, 4 * 8, 0},
Rafael Espindolaa56ab0ed2011-12-24 14:47:52 +000083 { "reloc_global_offset_table", 0, 4 * 8, 0}
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000084 };
85
86 if (Kind < FirstTargetFixupKind)
Evan Cheng5928e692011-07-25 23:24:55 +000087 return MCAsmBackend::getFixupKindInfo(Kind);
Daniel Dunbar0c9d9fd2010-12-16 03:20:06 +000088
89 assert(unsigned(Kind - FirstTargetFixupKind) < getNumFixupKinds() &&
90 "Invalid kind!");
91 return Infos[Kind - FirstTargetFixupKind];
92 }
93
Jim Grosbachaba3de92012-01-18 18:52:16 +000094 void applyFixup(const MCFixup &Fixup, char *Data, unsigned DataSize,
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000095 uint64_t Value) const {
Daniel Dunbar353a91ff2010-05-26 15:18:31 +000096 unsigned Size = 1 << getFixupKindLog2Size(Fixup.getKind());
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000097
Rafael Espindola0f30fec2010-12-06 19:08:48 +000098 assert(Fixup.getOffset() + Size <= DataSize &&
Daniel Dunbarf0517ef2010-03-19 09:28:12 +000099 "Invalid fixup offset!");
Jason W Kime4df09f2011-08-04 00:38:45 +0000100
Jason W Kim239370c2011-08-05 00:53:03 +0000101 // Check that uppper bits are either all zeros or all ones.
102 // Specifically ignore overflow/underflow as long as the leakage is
103 // limited to the lower bits. This is to remain compatible with
104 // other assemblers.
Eli Friedmana5abd032011-10-13 23:27:48 +0000105 assert(isIntN(Size * 8 + 1, Value) &&
Jason W Kim239370c2011-08-05 00:53:03 +0000106 "Value does not fit in the Fixup field");
Jason W Kime4df09f2011-08-04 00:38:45 +0000107
Daniel Dunbarf0517ef2010-03-19 09:28:12 +0000108 for (unsigned i = 0; i != Size; ++i)
Rafael Espindola0f30fec2010-12-06 19:08:48 +0000109 Data[Fixup.getOffset() + i] = uint8_t(Value >> (i * 8));
Daniel Dunbarf0517ef2010-03-19 09:28:12 +0000110 }
Daniel Dunbare0c43572010-03-23 01:39:09 +0000111
Jim Grosbachaba3de92012-01-18 18:52:16 +0000112 bool mayNeedRelaxation(const MCInst &Inst) const;
Daniel Dunbar86face82010-03-23 03:13:05 +0000113
Jim Grosbach25b63fa2011-12-06 00:47:03 +0000114 bool fixupNeedsRelaxation(const MCFixup &Fixup,
115 uint64_t Value,
Eli Bendersky4d9ada02013-01-08 00:22:56 +0000116 const MCRelaxableFragment *DF,
Jim Grosbach25b63fa2011-12-06 00:47:03 +0000117 const MCAsmLayout &Layout) const;
118
Jim Grosbachaba3de92012-01-18 18:52:16 +0000119 void relaxInstruction(const MCInst &Inst, MCInst &Res) const;
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000120
Jim Grosbachaba3de92012-01-18 18:52:16 +0000121 bool writeNopData(uint64_t Count, MCObjectWriter *OW) const;
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000122};
Michael J. Spencerbee1f7f2010-10-10 22:04:20 +0000123} // end anonymous namespace
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000124
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000125static unsigned getRelaxedOpcodeBranch(unsigned Op) {
Daniel Dunbare0c43572010-03-23 01:39:09 +0000126 switch (Op) {
127 default:
128 return Op;
129
130 case X86::JAE_1: return X86::JAE_4;
131 case X86::JA_1: return X86::JA_4;
132 case X86::JBE_1: return X86::JBE_4;
133 case X86::JB_1: return X86::JB_4;
134 case X86::JE_1: return X86::JE_4;
135 case X86::JGE_1: return X86::JGE_4;
136 case X86::JG_1: return X86::JG_4;
137 case X86::JLE_1: return X86::JLE_4;
138 case X86::JL_1: return X86::JL_4;
139 case X86::JMP_1: return X86::JMP_4;
140 case X86::JNE_1: return X86::JNE_4;
141 case X86::JNO_1: return X86::JNO_4;
142 case X86::JNP_1: return X86::JNP_4;
143 case X86::JNS_1: return X86::JNS_4;
144 case X86::JO_1: return X86::JO_4;
145 case X86::JP_1: return X86::JP_4;
146 case X86::JS_1: return X86::JS_4;
147 }
148}
149
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000150static unsigned getRelaxedOpcodeArith(unsigned Op) {
151 switch (Op) {
152 default:
153 return Op;
154
155 // IMUL
156 case X86::IMUL16rri8: return X86::IMUL16rri;
157 case X86::IMUL16rmi8: return X86::IMUL16rmi;
158 case X86::IMUL32rri8: return X86::IMUL32rri;
159 case X86::IMUL32rmi8: return X86::IMUL32rmi;
160 case X86::IMUL64rri8: return X86::IMUL64rri32;
161 case X86::IMUL64rmi8: return X86::IMUL64rmi32;
162
163 // AND
164 case X86::AND16ri8: return X86::AND16ri;
165 case X86::AND16mi8: return X86::AND16mi;
166 case X86::AND32ri8: return X86::AND32ri;
167 case X86::AND32mi8: return X86::AND32mi;
168 case X86::AND64ri8: return X86::AND64ri32;
169 case X86::AND64mi8: return X86::AND64mi32;
170
171 // OR
172 case X86::OR16ri8: return X86::OR16ri;
173 case X86::OR16mi8: return X86::OR16mi;
174 case X86::OR32ri8: return X86::OR32ri;
175 case X86::OR32mi8: return X86::OR32mi;
176 case X86::OR64ri8: return X86::OR64ri32;
177 case X86::OR64mi8: return X86::OR64mi32;
178
179 // XOR
180 case X86::XOR16ri8: return X86::XOR16ri;
181 case X86::XOR16mi8: return X86::XOR16mi;
182 case X86::XOR32ri8: return X86::XOR32ri;
183 case X86::XOR32mi8: return X86::XOR32mi;
184 case X86::XOR64ri8: return X86::XOR64ri32;
185 case X86::XOR64mi8: return X86::XOR64mi32;
186
187 // ADD
188 case X86::ADD16ri8: return X86::ADD16ri;
189 case X86::ADD16mi8: return X86::ADD16mi;
190 case X86::ADD32ri8: return X86::ADD32ri;
191 case X86::ADD32mi8: return X86::ADD32mi;
192 case X86::ADD64ri8: return X86::ADD64ri32;
193 case X86::ADD64mi8: return X86::ADD64mi32;
194
195 // SUB
196 case X86::SUB16ri8: return X86::SUB16ri;
197 case X86::SUB16mi8: return X86::SUB16mi;
198 case X86::SUB32ri8: return X86::SUB32ri;
199 case X86::SUB32mi8: return X86::SUB32mi;
200 case X86::SUB64ri8: return X86::SUB64ri32;
201 case X86::SUB64mi8: return X86::SUB64mi32;
202
203 // CMP
204 case X86::CMP16ri8: return X86::CMP16ri;
205 case X86::CMP16mi8: return X86::CMP16mi;
206 case X86::CMP32ri8: return X86::CMP32ri;
207 case X86::CMP32mi8: return X86::CMP32mi;
208 case X86::CMP64ri8: return X86::CMP64ri32;
209 case X86::CMP64mi8: return X86::CMP64mi32;
Rafael Espindola625ccf82010-12-18 01:01:34 +0000210
211 // PUSH
212 case X86::PUSHi8: return X86::PUSHi32;
Eli Friedman3846acc2011-07-15 21:28:39 +0000213 case X86::PUSHi16: return X86::PUSHi32;
214 case X86::PUSH64i8: return X86::PUSH64i32;
215 case X86::PUSH64i16: return X86::PUSH64i32;
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000216 }
217}
218
219static unsigned getRelaxedOpcode(unsigned Op) {
220 unsigned R = getRelaxedOpcodeArith(Op);
221 if (R != Op)
222 return R;
223 return getRelaxedOpcodeBranch(Op);
224}
225
Jim Grosbachaba3de92012-01-18 18:52:16 +0000226bool X86AsmBackend::mayNeedRelaxation(const MCInst &Inst) const {
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000227 // Branches can always be relaxed.
228 if (getRelaxedOpcodeBranch(Inst.getOpcode()) != Inst.getOpcode())
229 return true;
230
Daniel Dunbara86188b2011-04-28 21:23:31 +0000231 if (MCDisableArithRelaxation)
232 return false;
233
Daniel Dunbara19838e2010-05-26 17:45:29 +0000234 // Check if this instruction is ever relaxable.
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000235 if (getRelaxedOpcodeArith(Inst.getOpcode()) == Inst.getOpcode())
Daniel Dunbara19838e2010-05-26 17:45:29 +0000236 return false;
Daniel Dunbar353a91ff2010-05-26 15:18:31 +0000237
Rafael Espindolae8ae98812010-10-26 14:09:12 +0000238
239 // Check if it has an expression and is not RIP relative.
240 bool hasExp = false;
241 bool hasRIP = false;
242 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
243 const MCOperand &Op = Inst.getOperand(i);
244 if (Op.isExpr())
245 hasExp = true;
246
247 if (Op.isReg() && Op.getReg() == X86::RIP)
248 hasRIP = true;
249 }
250
251 // FIXME: Why exactly do we need the !hasRIP? Is it just a limitation on
252 // how we do relaxations?
253 return hasExp && !hasRIP;
Daniel Dunbar86face82010-03-23 03:13:05 +0000254}
255
Jim Grosbach25b63fa2011-12-06 00:47:03 +0000256bool X86AsmBackend::fixupNeedsRelaxation(const MCFixup &Fixup,
257 uint64_t Value,
Eli Bendersky4d9ada02013-01-08 00:22:56 +0000258 const MCRelaxableFragment *DF,
Jim Grosbach25b63fa2011-12-06 00:47:03 +0000259 const MCAsmLayout &Layout) const {
260 // Relax if the value is too big for a (signed) i8.
261 return int64_t(Value) != int64_t(int8_t(Value));
262}
263
Daniel Dunbare0c43572010-03-23 01:39:09 +0000264// FIXME: Can tblgen help at all here to verify there aren't other instructions
265// we can relax?
Jim Grosbachaba3de92012-01-18 18:52:16 +0000266void X86AsmBackend::relaxInstruction(const MCInst &Inst, MCInst &Res) const {
Daniel Dunbare0c43572010-03-23 01:39:09 +0000267 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000268 unsigned RelaxedOp = getRelaxedOpcode(Inst.getOpcode());
Daniel Dunbare0c43572010-03-23 01:39:09 +0000269
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000270 if (RelaxedOp == Inst.getOpcode()) {
Daniel Dunbare0c43572010-03-23 01:39:09 +0000271 SmallString<256> Tmp;
272 raw_svector_ostream OS(Tmp);
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000273 Inst.dump_pretty(OS);
Daniel Dunbar3627af52010-05-26 15:18:13 +0000274 OS << "\n";
Chris Lattner2104b8d2010-04-07 22:58:41 +0000275 report_fatal_error("unexpected instruction to relax: " + OS.str());
Daniel Dunbare0c43572010-03-23 01:39:09 +0000276 }
277
Daniel Dunbar7c8bd0f2010-05-26 18:15:06 +0000278 Res = Inst;
Daniel Dunbare0c43572010-03-23 01:39:09 +0000279 Res.setOpcode(RelaxedOp);
280}
281
Eli Benderskyb2022f32012-12-13 00:24:56 +0000282/// \brief Write a sequence of optimal nops to the output, covering \p Count
283/// bytes.
284/// \return - true on success, false on failure
Jim Grosbachaba3de92012-01-18 18:52:16 +0000285bool X86AsmBackend::writeNopData(uint64_t Count, MCObjectWriter *OW) const {
Rafael Espindola7c2acd02010-11-25 17:14:16 +0000286 static const uint8_t Nops[10][10] = {
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000287 // nop
288 {0x90},
289 // xchg %ax,%ax
290 {0x66, 0x90},
291 // nopl (%[re]ax)
292 {0x0f, 0x1f, 0x00},
293 // nopl 0(%[re]ax)
294 {0x0f, 0x1f, 0x40, 0x00},
295 // nopl 0(%[re]ax,%[re]ax,1)
296 {0x0f, 0x1f, 0x44, 0x00, 0x00},
297 // nopw 0(%[re]ax,%[re]ax,1)
298 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00},
299 // nopl 0L(%[re]ax)
300 {0x0f, 0x1f, 0x80, 0x00, 0x00, 0x00, 0x00},
301 // nopl 0L(%[re]ax,%[re]ax,1)
302 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
303 // nopw 0L(%[re]ax,%[re]ax,1)
304 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
305 // nopw %cs:0L(%[re]ax,%[re]ax,1)
306 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00},
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000307 };
308
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000309 // This CPU doesnt support long nops. If needed add more.
Benjamin Kramer35480282012-10-13 17:28:35 +0000310 // FIXME: Can we get this from the subtarget somehow?
311 if (CPU == "generic" || CPU == "i386" || CPU == "i486" || CPU == "i586" ||
312 CPU == "pentium" || CPU == "pentium-mmx" || CPU == "geode") {
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000313 for (uint64_t i = 0; i < Count; ++i)
314 OW->Write8(0x90);
315 return true;
316 }
317
David Sehr4c8979c2013-03-05 00:02:23 +0000318 // 15 is the longest single nop instruction. Emit as many 15-byte nops as
319 // needed, then emit a nop of the remaining length.
320 do {
321 const uint8_t ThisNopLength = (uint8_t) std::min(Count, (uint64_t) 15);
322 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
323 for (uint8_t i = 0; i < Prefixes; i++)
324 OW->Write8(0x66);
325 const uint8_t Rest = ThisNopLength - Prefixes;
326 for (uint8_t i = 0; i < Rest; i++)
327 OW->Write8(Nops[Rest - 1][i]);
328 Count -= ThisNopLength;
329 } while (Count != 0);
Daniel Dunbara9ae3ae2010-03-23 02:36:58 +0000330
331 return true;
332}
333
Daniel Dunbare0c43572010-03-23 01:39:09 +0000334/* *** */
335
Chris Lattnerac588122010-07-07 22:27:31 +0000336namespace {
Bill Wendling184d5d32013-09-11 20:38:09 +0000337
Daniel Dunbarc5084cc2010-03-19 09:29:03 +0000338class ELFX86AsmBackend : public X86AsmBackend {
339public:
Rafael Espindola1ad40952011-12-21 17:00:36 +0000340 uint8_t OSABI;
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000341 ELFX86AsmBackend(const Target &T, uint8_t _OSABI, StringRef CPU)
342 : X86AsmBackend(T, CPU), OSABI(_OSABI) {
Rafael Espindola75d65b92010-09-25 05:42:19 +0000343 HasReliableSymbolDifference = true;
344 }
345
346 virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
347 const MCSectionELF &ES = static_cast<const MCSectionELF&>(Section);
Rafael Espindola0e7e34e2011-01-23 04:43:11 +0000348 return ES.getFlags() & ELF::SHF_MERGE;
Daniel Dunbarc5084cc2010-03-19 09:29:03 +0000349 }
Daniel Dunbarc5084cc2010-03-19 09:29:03 +0000350};
351
Matt Fleming5abb6dd2010-05-21 11:39:07 +0000352class ELFX86_32AsmBackend : public ELFX86AsmBackend {
353public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000354 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
355 : ELFX86AsmBackend(T, OSABI, CPU) {}
Matt Flemingf751d852010-08-16 18:36:14 +0000356
357 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
Michael Liao83a77c32012-10-30 17:33:39 +0000358 return createX86ELFObjectWriter(OS, /*IsELF64*/ false, OSABI, ELF::EM_386);
Jan Sjödin6348dc02011-03-09 18:44:41 +0000359 }
Matt Fleming5abb6dd2010-05-21 11:39:07 +0000360};
361
362class ELFX86_64AsmBackend : public ELFX86AsmBackend {
363public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000364 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI, StringRef CPU)
365 : ELFX86AsmBackend(T, OSABI, CPU) {}
Matt Flemingf751d852010-08-16 18:36:14 +0000366
367 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
Michael Liao83a77c32012-10-30 17:33:39 +0000368 return createX86ELFObjectWriter(OS, /*IsELF64*/ true, OSABI, ELF::EM_X86_64);
Jan Sjödin6348dc02011-03-09 18:44:41 +0000369 }
Matt Fleming5abb6dd2010-05-21 11:39:07 +0000370};
371
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000372class WindowsX86AsmBackend : public X86AsmBackend {
Michael J. Spencer377aa202010-08-21 05:58:13 +0000373 bool Is64Bit;
Rafael Espindola4262a222010-10-16 18:23:53 +0000374
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000375public:
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000376 WindowsX86AsmBackend(const Target &T, bool is64Bit, StringRef CPU)
377 : X86AsmBackend(T, CPU)
Michael J. Spencer377aa202010-08-21 05:58:13 +0000378 , Is64Bit(is64Bit) {
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000379 }
380
381 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
Rafael Espindola908d2ed2011-12-24 02:14:02 +0000382 return createX86WinCOFFObjectWriter(OS, Is64Bit);
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000383 }
Michael J. Spencerf8270bd2010-07-27 06:46:15 +0000384};
385
Bill Wendling184d5d32013-09-11 20:38:09 +0000386namespace CU {
387
388 /// Compact unwind encoding values.
389 enum CompactUnwindEncodings {
390 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
391 /// the return address, then [RE]SP is moved to [RE]BP.
392 UNWIND_MODE_BP_FRAME = 0x01000000,
393
394 /// A frameless function with a small constant stack size.
395 UNWIND_MODE_STACK_IMMD = 0x02000000,
396
397 /// A frameless function with a large constant stack size.
398 UNWIND_MODE_STACK_IND = 0x03000000,
399
400 /// No compact unwind encoding is available.
401 UNWIND_MODE_DWARF = 0x04000000,
402
403 /// Mask for encoding the frame registers.
404 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
405
406 /// Mask for encoding the frameless registers.
407 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
408 };
409
410} // end CU namespace
411
Daniel Dunbar77c41412010-03-11 01:34:21 +0000412class DarwinX86AsmBackend : public X86AsmBackend {
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000413 const MCRegisterInfo &MRI;
414
415 /// \brief Number of registers that can be saved in a compact unwind encoding.
416 enum { CU_NUM_SAVED_REGS = 6 };
417
418 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
419 bool Is64Bit;
420
421 unsigned OffsetSize; ///< Offset of a "push" instruction.
422 unsigned PushInstrSize; ///< Size of a "push" instruction.
423 unsigned MoveInstrSize; ///< Size of a "move" instruction.
424 unsigned StackDivide; ///< Amount to adjust stack stize by.
425protected:
426 /// \brief Implementation of algorithm to generate the compact unwind encoding
427 /// for the CFI instructions.
428 uint32_t
429 generateCompactUnwindEncodingImpl(ArrayRef<MCCFIInstruction> Instrs) const {
430 if (Instrs.empty()) return 0;
431
432 // Reset the saved registers.
433 unsigned SavedRegIdx = 0;
434 memset(SavedRegs, 0, sizeof(SavedRegs));
435
436 bool HasFP = false;
437
438 // Encode that we are using EBP/RBP as the frame pointer.
439 uint32_t CompactUnwindEncoding = 0;
440
441 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
442 unsigned InstrOffset = 0;
443 unsigned StackAdjust = 0;
444 unsigned StackSize = 0;
445 unsigned PrevStackSize = 0;
446 unsigned NumDefCFAOffsets = 0;
447
448 for (unsigned i = 0, e = Instrs.size(); i != e; ++i) {
449 const MCCFIInstruction &Inst = Instrs[i];
450
451 switch (Inst.getOperation()) {
452 default:
453 llvm_unreachable("cannot handle CFI directive for compact unwind!");
454 case MCCFIInstruction::OpDefCfaRegister: {
455 // Defines a frame pointer. E.g.
456 //
457 // movq %rsp, %rbp
458 // L0:
459 // .cfi_def_cfa_register %rbp
460 //
461 HasFP = true;
462 assert(MRI.getLLVMRegNum(Inst.getRegister(), true) ==
463 (Is64Bit ? X86::RBP : X86::EBP) && "Invalid frame pointer!");
464
465 // Reset the counts.
466 memset(SavedRegs, 0, sizeof(SavedRegs));
467 StackAdjust = 0;
468 SavedRegIdx = 0;
469 InstrOffset += MoveInstrSize;
470 break;
471 }
472 case MCCFIInstruction::OpDefCfaOffset: {
473 // Defines a new offset for the CFA. E.g.
474 //
475 // With frame:
476 //
477 // pushq %rbp
478 // L0:
479 // .cfi_def_cfa_offset 16
480 //
481 // Without frame:
482 //
483 // subq $72, %rsp
484 // L0:
485 // .cfi_def_cfa_offset 80
486 //
487 PrevStackSize = StackSize;
488 StackSize = std::abs(Inst.getOffset()) / StackDivide;
489 ++NumDefCFAOffsets;
490 break;
491 }
492 case MCCFIInstruction::OpOffset: {
493 // Defines a "push" of a callee-saved register. E.g.
494 //
495 // pushq %r15
496 // pushq %r14
497 // pushq %rbx
498 // L0:
499 // subq $120, %rsp
500 // L1:
501 // .cfi_offset %rbx, -40
502 // .cfi_offset %r14, -32
503 // .cfi_offset %r15, -24
504 //
505 if (SavedRegIdx == CU_NUM_SAVED_REGS)
506 // If there are too many saved registers, we cannot use a compact
507 // unwind encoding.
508 return CU::UNWIND_MODE_DWARF;
509
510 unsigned Reg = MRI.getLLVMRegNum(Inst.getRegister(), true);
511 SavedRegs[SavedRegIdx++] = Reg;
512 StackAdjust += OffsetSize;
513 InstrOffset += PushInstrSize;
514 break;
515 }
516 }
517 }
518
519 StackAdjust /= StackDivide;
520
521 if (HasFP) {
522 if ((StackAdjust & 0xFF) != StackAdjust)
523 // Offset was too big for a compact unwind encoding.
524 return CU::UNWIND_MODE_DWARF;
525
526 // Get the encoding of the saved registers when we have a frame pointer.
527 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
528 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
529
530 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
531 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
532 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
533 } else {
534 // If the amount of the stack allocation is the size of a register, then
535 // we "push" the RAX/EAX register onto the stack instead of adjusting the
536 // stack pointer with a SUB instruction. We don't support the push of the
537 // RAX/EAX register with compact unwind. So we check for that situation
538 // here.
539 if ((NumDefCFAOffsets == SavedRegIdx + 1 &&
540 StackSize - PrevStackSize == 1) ||
541 (Instrs.size() == 1 && NumDefCFAOffsets == 1 && StackSize == 2))
542 return CU::UNWIND_MODE_DWARF;
543
544 SubtractInstrIdx += InstrOffset;
545 ++StackAdjust;
546
547 if ((StackSize & 0xFF) == StackSize) {
548 // Frameless stack with a small stack size.
549 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
550
551 // Encode the stack size.
552 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
553 } else {
554 if ((StackAdjust & 0x7) != StackAdjust)
555 // The extra stack adjustments are too big for us to handle.
556 return CU::UNWIND_MODE_DWARF;
557
558 // Frameless stack with an offset too large for us to encode compactly.
559 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
560
561 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
562 // instruction.
563 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
564
565 // Encode any extra stack stack adjustments (done via push
566 // instructions).
567 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
568 }
569
570 // Encode the number of registers saved. (Reverse the list first.)
571 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
572 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
573
574 // Get the encoding of the saved registers when we don't have a frame
575 // pointer.
576 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
577 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
578
579 // Encode the register encoding.
580 CompactUnwindEncoding |=
581 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
582 }
583
584 return CompactUnwindEncoding;
585 }
586
587private:
588 /// \brief Get the compact unwind number for a given register. The number
589 /// corresponds to the enum lists in compact_unwind_encoding.h.
590 int getCompactUnwindRegNum(unsigned Reg) const {
591 static const uint16_t CU32BitRegs[7] = {
592 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
593 };
594 static const uint16_t CU64BitRegs[] = {
595 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
596 };
597 const uint16_t *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
598 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
599 if (*CURegs == Reg)
600 return Idx;
601
602 return -1;
603 }
604
605 /// \brief Return the registers encoded for a compact encoding with a frame
606 /// pointer.
607 uint32_t encodeCompactUnwindRegistersWithFrame() const {
608 // Encode the registers in the order they were saved --- 3-bits per
609 // register. The list of saved registers is assumed to be in reverse
610 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
611 uint32_t RegEnc = 0;
612 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
613 unsigned Reg = SavedRegs[i];
614 if (Reg == 0) break;
615
616 int CURegNum = getCompactUnwindRegNum(Reg);
617 if (CURegNum == -1) return ~0U;
618
619 // Encode the 3-bit register number in order, skipping over 3-bits for
620 // each register.
621 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
622 }
623
624 assert((RegEnc & 0x3FFFF) == RegEnc &&
625 "Invalid compact register encoding!");
626 return RegEnc;
627 }
628
629 /// \brief Create the permutation encoding used with frameless stacks. It is
630 /// passed the number of registers to be saved and an array of the registers
631 /// saved.
632 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
633 // The saved registers are numbered from 1 to 6. In order to encode the
634 // order in which they were saved, we re-number them according to their
635 // place in the register order. The re-numbering is relative to the last
636 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
637 // that order:
638 //
639 // Orig Re-Num
640 // ---- ------
641 // 6 6
642 // 2 2
643 // 4 3
644 // 5 3
645 //
646 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
647 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
648 if (CUReg == -1) return ~0U;
649 SavedRegs[i] = CUReg;
650 }
651
652 // Reverse the list.
653 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
654
655 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
656 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
657 unsigned Countless = 0;
658 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
659 if (SavedRegs[j] < SavedRegs[i])
660 ++Countless;
661
662 RenumRegs[i] = SavedRegs[i] - Countless - 1;
663 }
664
665 // Take the renumbered values and encode them into a 10-bit number.
666 uint32_t permutationEncoding = 0;
667 switch (RegCount) {
668 case 6:
669 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
670 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
671 + RenumRegs[4];
672 break;
673 case 5:
674 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
675 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
676 + RenumRegs[5];
677 break;
678 case 4:
679 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
680 + 3 * RenumRegs[4] + RenumRegs[5];
681 break;
682 case 3:
683 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
684 + RenumRegs[5];
685 break;
686 case 2:
687 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
688 break;
689 case 1:
690 permutationEncoding |= RenumRegs[5];
691 break;
692 }
693
694 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
695 "Invalid compact register encoding!");
696 return permutationEncoding;
697 }
698
Daniel Dunbar77c41412010-03-11 01:34:21 +0000699public:
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000700 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI, StringRef CPU,
701 bool Is64Bit)
702 : X86AsmBackend(T, CPU), MRI(MRI), Is64Bit(Is64Bit) {
703 memset(SavedRegs, 0, sizeof(SavedRegs));
704 OffsetSize = Is64Bit ? 8 : 4;
705 MoveInstrSize = Is64Bit ? 3 : 2;
706 StackDivide = Is64Bit ? 8 : 4;
707 PushInstrSize = 1;
708 }
Daniel Dunbar77c41412010-03-11 01:34:21 +0000709};
710
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000711class DarwinX86_32AsmBackend : public DarwinX86AsmBackend {
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000712 bool SupportsCU;
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000713public:
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000714 DarwinX86_32AsmBackend(const Target &T, const MCRegisterInfo &MRI,
715 StringRef CPU, bool SupportsCU)
716 : DarwinX86AsmBackend(T, MRI, CPU, false), SupportsCU(SupportsCU) {}
Daniel Dunbar4d7c8642010-03-19 10:43:26 +0000717
718 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
Daniel Dunbar7da045e2010-12-20 15:07:39 +0000719 return createX86MachObjectWriter(OS, /*Is64Bit=*/false,
Charles Davis8bdfafd2013-09-01 04:28:48 +0000720 MachO::CPU_TYPE_I386,
721 MachO::CPU_SUBTYPE_I386_ALL);
Daniel Dunbar4d7c8642010-03-19 10:43:26 +0000722 }
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000723
724 /// \brief Generate the compact unwind encoding for the CFI instructions.
725 virtual unsigned
726 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
727 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
728 }
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000729};
730
731class DarwinX86_64AsmBackend : public DarwinX86AsmBackend {
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000732 bool SupportsCU;
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000733public:
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000734 DarwinX86_64AsmBackend(const Target &T, const MCRegisterInfo &MRI,
735 StringRef CPU, bool SupportsCU)
736 : DarwinX86AsmBackend(T, MRI, CPU, true), SupportsCU(SupportsCU) {
Daniel Dunbar6544baf2010-03-18 00:58:53 +0000737 HasReliableSymbolDifference = true;
738 }
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000739
Daniel Dunbar4d7c8642010-03-19 10:43:26 +0000740 MCObjectWriter *createObjectWriter(raw_ostream &OS) const {
Daniel Dunbar7da045e2010-12-20 15:07:39 +0000741 return createX86MachObjectWriter(OS, /*Is64Bit=*/true,
Charles Davis8bdfafd2013-09-01 04:28:48 +0000742 MachO::CPU_TYPE_X86_64,
743 MachO::CPU_SUBTYPE_X86_64_ALL);
Daniel Dunbar4d7c8642010-03-19 10:43:26 +0000744 }
745
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000746 virtual bool doesSectionRequireSymbols(const MCSection &Section) const {
747 // Temporary labels in the string literals sections require symbols. The
748 // issue is that the x86_64 relocation format does not allow symbol +
749 // offset, and so the linker does not have enough information to resolve the
750 // access to the appropriate atom unless an external relocation is used. For
751 // non-cstring sections, we expect the compiler to use a non-temporary label
752 // for anything that could have an addend pointing outside the symbol.
753 //
754 // See <rdar://problem/4765733>.
755 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
756 return SMO.getType() == MCSectionMachO::S_CSTRING_LITERALS;
757 }
Daniel Dunbarba2f4c32010-05-12 00:38:17 +0000758
759 virtual bool isSectionAtomizable(const MCSection &Section) const {
760 const MCSectionMachO &SMO = static_cast<const MCSectionMachO&>(Section);
761 // Fixed sized data sections are uniqued, they cannot be diced into atoms.
762 switch (SMO.getType()) {
763 default:
764 return true;
765
766 case MCSectionMachO::S_4BYTE_LITERALS:
767 case MCSectionMachO::S_8BYTE_LITERALS:
768 case MCSectionMachO::S_16BYTE_LITERALS:
769 case MCSectionMachO::S_LITERAL_POINTERS:
770 case MCSectionMachO::S_NON_LAZY_SYMBOL_POINTERS:
771 case MCSectionMachO::S_LAZY_SYMBOL_POINTERS:
772 case MCSectionMachO::S_MOD_INIT_FUNC_POINTERS:
773 case MCSectionMachO::S_MOD_TERM_FUNC_POINTERS:
774 case MCSectionMachO::S_INTERPOSING:
775 return false;
776 }
777 }
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000778
779 /// \brief Generate the compact unwind encoding for the CFI instructions.
780 virtual unsigned
781 generateCompactUnwindEncoding(ArrayRef<MCCFIInstruction> Instrs) const {
782 return SupportsCU ? generateCompactUnwindEncodingImpl(Instrs) : 0;
783 }
Daniel Dunbarfe8d8662010-03-15 21:56:50 +0000784};
785
Michael J. Spencerbee1f7f2010-10-10 22:04:20 +0000786} // end anonymous namespace
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000787
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000788MCAsmBackend *llvm::createX86_32AsmBackend(const Target &T,
789 const MCRegisterInfo &MRI,
790 StringRef TT,
791 StringRef CPU) {
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000792 Triple TheTriple(TT);
793
794 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000795 return new DarwinX86_32AsmBackend(T, MRI, CPU,
796 TheTriple.isMacOSX() &&
797 !TheTriple.isMacOSXVersionLT(10, 7));
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000798
Andrew Kaylorfeb805f2012-10-02 18:38:34 +0000799 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000800 return new WindowsX86AsmBackend(T, false, CPU);
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000801
Rafael Espindola1ad40952011-12-21 17:00:36 +0000802 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000803 return new ELFX86_32AsmBackend(T, OSABI, CPU);
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000804}
805
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000806MCAsmBackend *llvm::createX86_64AsmBackend(const Target &T,
807 const MCRegisterInfo &MRI,
808 StringRef TT,
809 StringRef CPU) {
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000810 Triple TheTriple(TT);
811
812 if (TheTriple.isOSDarwin() || TheTriple.getEnvironment() == Triple::MachO)
Bill Wendling58e2d3d2013-09-09 02:37:14 +0000813 return new DarwinX86_64AsmBackend(T, MRI, CPU,
814 TheTriple.isMacOSX() &&
815 !TheTriple.isMacOSXVersionLT(10, 7));
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000816
Andrew Kaylorfeb805f2012-10-02 18:38:34 +0000817 if (TheTriple.isOSWindows() && TheTriple.getEnvironment() != Triple::ELF)
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000818 return new WindowsX86AsmBackend(T, true, CPU);
Daniel Dunbar2b9b0e32011-04-19 21:14:45 +0000819
Rafael Espindola1ad40952011-12-21 17:00:36 +0000820 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
Roman Divacky5dd4ccb2012-09-18 16:08:49 +0000821 return new ELFX86_64AsmBackend(T, OSABI, CPU);
Daniel Dunbar40eb7f02010-02-21 21:54:14 +0000822}