blob: 204a187dc5d7fb37f784813d808ae005e35e71ca [file] [log] [blame]
Misha Brukmana85d6bc2002-11-22 22:42:50 +00001//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002//
John Criswellb576c942003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Misha Brukman0e0a7a452005-04-21 23:38:14 +00007//
John Criswellb576c942003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattner72614082002-10-25 22:55:53 +00009//
Chris Lattner3501fea2003-01-14 22:00:31 +000010// This file contains the X86 implementation of the TargetInstrInfo class.
Chris Lattner72614082002-10-25 22:55:53 +000011//
12//===----------------------------------------------------------------------===//
13
Chris Lattner055c9652002-10-29 21:05:24 +000014#include "X86InstrInfo.h"
Chris Lattner4ce42a72002-12-03 05:42:53 +000015#include "X86.h"
Chris Lattnerabf05b22003-08-03 21:55:55 +000016#include "X86GenInstrInfo.inc"
Evan Chengaa3c1412006-05-30 21:45:53 +000017#include "X86InstrBuilder.h"
Owen Andersond94b6a12008-01-04 23:57:37 +000018#include "X86MachineFunctionInfo.h"
Evan Chengaa3c1412006-05-30 21:45:53 +000019#include "X86Subtarget.h"
20#include "X86TargetMachine.h"
Dan Gohmand68a0762009-01-05 17:59:02 +000021#include "llvm/DerivedTypes.h"
Owen Anderson0a5372e2009-07-13 04:09:18 +000022#include "llvm/LLVMContext.h"
Owen Anderson718cb662007-09-07 04:06:50 +000023#include "llvm/ADT/STLExtras.h"
Dan Gohman62c939d2008-12-03 05:21:24 +000024#include "llvm/CodeGen/MachineConstantPool.h"
Owen Andersond94b6a12008-01-04 23:57:37 +000025#include "llvm/CodeGen/MachineFrameInfo.h"
Evan Chengaa3c1412006-05-30 21:45:53 +000026#include "llvm/CodeGen/MachineInstrBuilder.h"
Chris Lattner84bc5422007-12-31 04:13:23 +000027#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Cheng258ff672006-12-01 21:52:41 +000028#include "llvm/CodeGen/LiveVariables.h"
David Greeneb87bc952009-11-12 20:55:29 +000029#include "llvm/CodeGen/PseudoSourceValue.h"
Chris Lattneree9eb412010-04-26 23:37:21 +000030#include "llvm/MC/MCInst.h"
Owen Anderson43dbe052008-01-07 01:35:02 +000031#include "llvm/Support/CommandLine.h"
David Greene5b901322010-01-05 01:29:29 +000032#include "llvm/Support/Debug.h"
Torok Edwinab7c09b2009-07-08 18:01:40 +000033#include "llvm/Support/ErrorHandling.h"
34#include "llvm/Support/raw_ostream.h"
Evan Cheng0488db92007-09-25 01:57:46 +000035#include "llvm/Target/TargetOptions.h"
Chris Lattneraf76e592009-08-22 20:48:53 +000036#include "llvm/MC/MCAsmInfo.h"
David Greeneb87bc952009-11-12 20:55:29 +000037#include <limits>
38
Brian Gaeked0fde302003-11-11 22:41:34 +000039using namespace llvm;
40
Chris Lattner705e07f2009-08-23 03:41:05 +000041static cl::opt<bool>
42NoFusing("disable-spill-fusing",
43 cl::desc("Disable fusing of spill code into instructions"));
44static cl::opt<bool>
45PrintFailedFusing("print-failed-fuse-candidates",
46 cl::desc("Print instructions that the allocator wants to"
47 " fuse, but the X86 backend currently can't"),
48 cl::Hidden);
49static cl::opt<bool>
50ReMatPICStubLoad("remat-pic-stub-load",
51 cl::desc("Re-materialize load from stub in PIC mode"),
52 cl::init(false), cl::Hidden);
Owen Anderson43dbe052008-01-07 01:35:02 +000053
Evan Chengaa3c1412006-05-30 21:45:53 +000054X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
Chris Lattner64105522008-01-01 01:03:04 +000055 : TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
Evan Cheng25ab6902006-09-08 06:48:29 +000056 TM(tm), RI(tm, *this) {
Chris Lattner99ae6652010-10-08 03:54:52 +000057 enum {
58 TB_NOT_REVERSABLE = 1U << 31,
59 TB_FLAGS = TB_NOT_REVERSABLE
60 };
61
Owen Anderson43dbe052008-01-07 01:35:02 +000062 static const unsigned OpTbl2Addr[][2] = {
63 { X86::ADC32ri, X86::ADC32mi },
64 { X86::ADC32ri8, X86::ADC32mi8 },
65 { X86::ADC32rr, X86::ADC32mr },
66 { X86::ADC64ri32, X86::ADC64mi32 },
67 { X86::ADC64ri8, X86::ADC64mi8 },
68 { X86::ADC64rr, X86::ADC64mr },
69 { X86::ADD16ri, X86::ADD16mi },
70 { X86::ADD16ri8, X86::ADD16mi8 },
Chris Lattner15df55d2010-10-08 03:57:25 +000071 { X86::ADD16ri_DB, X86::ADD16mi | TB_NOT_REVERSABLE },
72 { X86::ADD16ri8_DB, X86::ADD16mi8 | TB_NOT_REVERSABLE },
Owen Anderson43dbe052008-01-07 01:35:02 +000073 { X86::ADD16rr, X86::ADD16mr },
Chris Lattner99ae6652010-10-08 03:54:52 +000074 { X86::ADD16rr_DB, X86::ADD16mr | TB_NOT_REVERSABLE },
Owen Anderson43dbe052008-01-07 01:35:02 +000075 { X86::ADD32ri, X86::ADD32mi },
76 { X86::ADD32ri8, X86::ADD32mi8 },
Chris Lattner15df55d2010-10-08 03:57:25 +000077 { X86::ADD32ri_DB, X86::ADD32mi | TB_NOT_REVERSABLE },
78 { X86::ADD32ri8_DB, X86::ADD32mi8 | TB_NOT_REVERSABLE },
Owen Anderson43dbe052008-01-07 01:35:02 +000079 { X86::ADD32rr, X86::ADD32mr },
Chris Lattner99ae6652010-10-08 03:54:52 +000080 { X86::ADD32rr_DB, X86::ADD32mr | TB_NOT_REVERSABLE },
Owen Anderson43dbe052008-01-07 01:35:02 +000081 { X86::ADD64ri32, X86::ADD64mi32 },
82 { X86::ADD64ri8, X86::ADD64mi8 },
Chris Lattner15df55d2010-10-08 03:57:25 +000083 { X86::ADD64ri32_DB,X86::ADD64mi32 | TB_NOT_REVERSABLE },
84 { X86::ADD64ri8_DB, X86::ADD64mi8 | TB_NOT_REVERSABLE },
Owen Anderson43dbe052008-01-07 01:35:02 +000085 { X86::ADD64rr, X86::ADD64mr },
Chris Lattner99ae6652010-10-08 03:54:52 +000086 { X86::ADD64rr_DB, X86::ADD64mr | TB_NOT_REVERSABLE },
Owen Anderson43dbe052008-01-07 01:35:02 +000087 { X86::ADD8ri, X86::ADD8mi },
88 { X86::ADD8rr, X86::ADD8mr },
89 { X86::AND16ri, X86::AND16mi },
90 { X86::AND16ri8, X86::AND16mi8 },
91 { X86::AND16rr, X86::AND16mr },
92 { X86::AND32ri, X86::AND32mi },
93 { X86::AND32ri8, X86::AND32mi8 },
94 { X86::AND32rr, X86::AND32mr },
95 { X86::AND64ri32, X86::AND64mi32 },
96 { X86::AND64ri8, X86::AND64mi8 },
97 { X86::AND64rr, X86::AND64mr },
98 { X86::AND8ri, X86::AND8mi },
99 { X86::AND8rr, X86::AND8mr },
100 { X86::DEC16r, X86::DEC16m },
101 { X86::DEC32r, X86::DEC32m },
102 { X86::DEC64_16r, X86::DEC64_16m },
103 { X86::DEC64_32r, X86::DEC64_32m },
104 { X86::DEC64r, X86::DEC64m },
105 { X86::DEC8r, X86::DEC8m },
106 { X86::INC16r, X86::INC16m },
107 { X86::INC32r, X86::INC32m },
108 { X86::INC64_16r, X86::INC64_16m },
109 { X86::INC64_32r, X86::INC64_32m },
110 { X86::INC64r, X86::INC64m },
111 { X86::INC8r, X86::INC8m },
112 { X86::NEG16r, X86::NEG16m },
113 { X86::NEG32r, X86::NEG32m },
114 { X86::NEG64r, X86::NEG64m },
115 { X86::NEG8r, X86::NEG8m },
116 { X86::NOT16r, X86::NOT16m },
117 { X86::NOT32r, X86::NOT32m },
118 { X86::NOT64r, X86::NOT64m },
119 { X86::NOT8r, X86::NOT8m },
120 { X86::OR16ri, X86::OR16mi },
121 { X86::OR16ri8, X86::OR16mi8 },
122 { X86::OR16rr, X86::OR16mr },
123 { X86::OR32ri, X86::OR32mi },
124 { X86::OR32ri8, X86::OR32mi8 },
125 { X86::OR32rr, X86::OR32mr },
126 { X86::OR64ri32, X86::OR64mi32 },
127 { X86::OR64ri8, X86::OR64mi8 },
128 { X86::OR64rr, X86::OR64mr },
129 { X86::OR8ri, X86::OR8mi },
130 { X86::OR8rr, X86::OR8mr },
131 { X86::ROL16r1, X86::ROL16m1 },
132 { X86::ROL16rCL, X86::ROL16mCL },
133 { X86::ROL16ri, X86::ROL16mi },
134 { X86::ROL32r1, X86::ROL32m1 },
135 { X86::ROL32rCL, X86::ROL32mCL },
136 { X86::ROL32ri, X86::ROL32mi },
137 { X86::ROL64r1, X86::ROL64m1 },
138 { X86::ROL64rCL, X86::ROL64mCL },
139 { X86::ROL64ri, X86::ROL64mi },
140 { X86::ROL8r1, X86::ROL8m1 },
141 { X86::ROL8rCL, X86::ROL8mCL },
142 { X86::ROL8ri, X86::ROL8mi },
143 { X86::ROR16r1, X86::ROR16m1 },
144 { X86::ROR16rCL, X86::ROR16mCL },
145 { X86::ROR16ri, X86::ROR16mi },
146 { X86::ROR32r1, X86::ROR32m1 },
147 { X86::ROR32rCL, X86::ROR32mCL },
148 { X86::ROR32ri, X86::ROR32mi },
149 { X86::ROR64r1, X86::ROR64m1 },
150 { X86::ROR64rCL, X86::ROR64mCL },
151 { X86::ROR64ri, X86::ROR64mi },
152 { X86::ROR8r1, X86::ROR8m1 },
153 { X86::ROR8rCL, X86::ROR8mCL },
154 { X86::ROR8ri, X86::ROR8mi },
155 { X86::SAR16r1, X86::SAR16m1 },
156 { X86::SAR16rCL, X86::SAR16mCL },
157 { X86::SAR16ri, X86::SAR16mi },
158 { X86::SAR32r1, X86::SAR32m1 },
159 { X86::SAR32rCL, X86::SAR32mCL },
160 { X86::SAR32ri, X86::SAR32mi },
161 { X86::SAR64r1, X86::SAR64m1 },
162 { X86::SAR64rCL, X86::SAR64mCL },
163 { X86::SAR64ri, X86::SAR64mi },
164 { X86::SAR8r1, X86::SAR8m1 },
165 { X86::SAR8rCL, X86::SAR8mCL },
166 { X86::SAR8ri, X86::SAR8mi },
167 { X86::SBB32ri, X86::SBB32mi },
168 { X86::SBB32ri8, X86::SBB32mi8 },
169 { X86::SBB32rr, X86::SBB32mr },
170 { X86::SBB64ri32, X86::SBB64mi32 },
171 { X86::SBB64ri8, X86::SBB64mi8 },
172 { X86::SBB64rr, X86::SBB64mr },
Owen Anderson43dbe052008-01-07 01:35:02 +0000173 { X86::SHL16rCL, X86::SHL16mCL },
174 { X86::SHL16ri, X86::SHL16mi },
Owen Anderson43dbe052008-01-07 01:35:02 +0000175 { X86::SHL32rCL, X86::SHL32mCL },
176 { X86::SHL32ri, X86::SHL32mi },
Owen Anderson43dbe052008-01-07 01:35:02 +0000177 { X86::SHL64rCL, X86::SHL64mCL },
178 { X86::SHL64ri, X86::SHL64mi },
Owen Anderson43dbe052008-01-07 01:35:02 +0000179 { X86::SHL8rCL, X86::SHL8mCL },
180 { X86::SHL8ri, X86::SHL8mi },
181 { X86::SHLD16rrCL, X86::SHLD16mrCL },
182 { X86::SHLD16rri8, X86::SHLD16mri8 },
183 { X86::SHLD32rrCL, X86::SHLD32mrCL },
184 { X86::SHLD32rri8, X86::SHLD32mri8 },
185 { X86::SHLD64rrCL, X86::SHLD64mrCL },
186 { X86::SHLD64rri8, X86::SHLD64mri8 },
187 { X86::SHR16r1, X86::SHR16m1 },
188 { X86::SHR16rCL, X86::SHR16mCL },
189 { X86::SHR16ri, X86::SHR16mi },
190 { X86::SHR32r1, X86::SHR32m1 },
191 { X86::SHR32rCL, X86::SHR32mCL },
192 { X86::SHR32ri, X86::SHR32mi },
193 { X86::SHR64r1, X86::SHR64m1 },
194 { X86::SHR64rCL, X86::SHR64mCL },
195 { X86::SHR64ri, X86::SHR64mi },
196 { X86::SHR8r1, X86::SHR8m1 },
197 { X86::SHR8rCL, X86::SHR8mCL },
198 { X86::SHR8ri, X86::SHR8mi },
199 { X86::SHRD16rrCL, X86::SHRD16mrCL },
200 { X86::SHRD16rri8, X86::SHRD16mri8 },
201 { X86::SHRD32rrCL, X86::SHRD32mrCL },
202 { X86::SHRD32rri8, X86::SHRD32mri8 },
203 { X86::SHRD64rrCL, X86::SHRD64mrCL },
204 { X86::SHRD64rri8, X86::SHRD64mri8 },
205 { X86::SUB16ri, X86::SUB16mi },
206 { X86::SUB16ri8, X86::SUB16mi8 },
207 { X86::SUB16rr, X86::SUB16mr },
208 { X86::SUB32ri, X86::SUB32mi },
209 { X86::SUB32ri8, X86::SUB32mi8 },
210 { X86::SUB32rr, X86::SUB32mr },
211 { X86::SUB64ri32, X86::SUB64mi32 },
212 { X86::SUB64ri8, X86::SUB64mi8 },
213 { X86::SUB64rr, X86::SUB64mr },
214 { X86::SUB8ri, X86::SUB8mi },
215 { X86::SUB8rr, X86::SUB8mr },
216 { X86::XOR16ri, X86::XOR16mi },
217 { X86::XOR16ri8, X86::XOR16mi8 },
218 { X86::XOR16rr, X86::XOR16mr },
219 { X86::XOR32ri, X86::XOR32mi },
220 { X86::XOR32ri8, X86::XOR32mi8 },
221 { X86::XOR32rr, X86::XOR32mr },
222 { X86::XOR64ri32, X86::XOR64mi32 },
223 { X86::XOR64ri8, X86::XOR64mi8 },
224 { X86::XOR64rr, X86::XOR64mr },
225 { X86::XOR8ri, X86::XOR8mi },
226 { X86::XOR8rr, X86::XOR8mr }
227 };
228
229 for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
230 unsigned RegOp = OpTbl2Addr[i][0];
Chris Lattner99ae6652010-10-08 03:54:52 +0000231 unsigned MemOp = OpTbl2Addr[i][1] & ~TB_FLAGS;
232 assert(!RegOp2MemOpTable2Addr.count(RegOp) && "Duplicated entries?");
233 RegOp2MemOpTable2Addr[RegOp] = std::make_pair(MemOp, 0U);
234
235 // If this is not a reversable operation (because there is a many->one)
236 // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
237 if (OpTbl2Addr[i][1] & TB_NOT_REVERSABLE)
238 continue;
239
Evan Chengf9b36f02009-07-15 06:10:07 +0000240 // Index 0, folded load and store, no alignment requirement.
241 unsigned AuxInfo = 0 | (1 << 4) | (1 << 5);
Chris Lattner99ae6652010-10-08 03:54:52 +0000242
243 assert(!MemOp2RegOpTable.count(MemOp) &&
244 "Duplicated entries in unfolding maps?");
245 MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
Owen Anderson43dbe052008-01-07 01:35:02 +0000246 }
247
248 // If the third value is 1, then it's folding either a load or a store.
Evan Chengf9b36f02009-07-15 06:10:07 +0000249 static const unsigned OpTbl0[][4] = {
250 { X86::BT16ri8, X86::BT16mi8, 1, 0 },
251 { X86::BT32ri8, X86::BT32mi8, 1, 0 },
252 { X86::BT64ri8, X86::BT64mi8, 1, 0 },
253 { X86::CALL32r, X86::CALL32m, 1, 0 },
254 { X86::CALL64r, X86::CALL64m, 1, 0 },
Anton Korobeynikove9df15e2010-08-17 21:06:01 +0000255 { X86::WINCALL64r, X86::WINCALL64m, 1, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000256 { X86::CMP16ri, X86::CMP16mi, 1, 0 },
257 { X86::CMP16ri8, X86::CMP16mi8, 1, 0 },
258 { X86::CMP16rr, X86::CMP16mr, 1, 0 },
259 { X86::CMP32ri, X86::CMP32mi, 1, 0 },
260 { X86::CMP32ri8, X86::CMP32mi8, 1, 0 },
261 { X86::CMP32rr, X86::CMP32mr, 1, 0 },
262 { X86::CMP64ri32, X86::CMP64mi32, 1, 0 },
263 { X86::CMP64ri8, X86::CMP64mi8, 1, 0 },
264 { X86::CMP64rr, X86::CMP64mr, 1, 0 },
265 { X86::CMP8ri, X86::CMP8mi, 1, 0 },
266 { X86::CMP8rr, X86::CMP8mr, 1, 0 },
267 { X86::DIV16r, X86::DIV16m, 1, 0 },
268 { X86::DIV32r, X86::DIV32m, 1, 0 },
269 { X86::DIV64r, X86::DIV64m, 1, 0 },
270 { X86::DIV8r, X86::DIV8m, 1, 0 },
271 { X86::EXTRACTPSrr, X86::EXTRACTPSmr, 0, 16 },
Chris Lattner15df55d2010-10-08 03:57:25 +0000272 { X86::FsMOVAPDrr, X86::MOVSDmr | TB_NOT_REVERSABLE , 0, 0 },
273 { X86::FsMOVAPSrr, X86::MOVSSmr | TB_NOT_REVERSABLE , 0, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000274 { X86::IDIV16r, X86::IDIV16m, 1, 0 },
275 { X86::IDIV32r, X86::IDIV32m, 1, 0 },
276 { X86::IDIV64r, X86::IDIV64m, 1, 0 },
277 { X86::IDIV8r, X86::IDIV8m, 1, 0 },
278 { X86::IMUL16r, X86::IMUL16m, 1, 0 },
279 { X86::IMUL32r, X86::IMUL32m, 1, 0 },
280 { X86::IMUL64r, X86::IMUL64m, 1, 0 },
281 { X86::IMUL8r, X86::IMUL8m, 1, 0 },
282 { X86::JMP32r, X86::JMP32m, 1, 0 },
283 { X86::JMP64r, X86::JMP64m, 1, 0 },
284 { X86::MOV16ri, X86::MOV16mi, 0, 0 },
285 { X86::MOV16rr, X86::MOV16mr, 0, 0 },
286 { X86::MOV32ri, X86::MOV32mi, 0, 0 },
287 { X86::MOV32rr, X86::MOV32mr, 0, 0 },
Evan Chengf48ef032010-03-14 03:48:46 +0000288 { X86::MOV32rr_TC, X86::MOV32mr_TC, 0, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000289 { X86::MOV64ri32, X86::MOV64mi32, 0, 0 },
290 { X86::MOV64rr, X86::MOV64mr, 0, 0 },
291 { X86::MOV8ri, X86::MOV8mi, 0, 0 },
292 { X86::MOV8rr, X86::MOV8mr, 0, 0 },
293 { X86::MOV8rr_NOREX, X86::MOV8mr_NOREX, 0, 0 },
294 { X86::MOVAPDrr, X86::MOVAPDmr, 0, 16 },
295 { X86::MOVAPSrr, X86::MOVAPSmr, 0, 16 },
296 { X86::MOVDQArr, X86::MOVDQAmr, 0, 16 },
297 { X86::MOVPDI2DIrr, X86::MOVPDI2DImr, 0, 0 },
298 { X86::MOVPQIto64rr,X86::MOVPQI2QImr, 0, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000299 { X86::MOVSDto64rr, X86::MOVSDto64mr, 0, 0 },
300 { X86::MOVSS2DIrr, X86::MOVSS2DImr, 0, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000301 { X86::MOVUPDrr, X86::MOVUPDmr, 0, 0 },
302 { X86::MOVUPSrr, X86::MOVUPSmr, 0, 0 },
303 { X86::MUL16r, X86::MUL16m, 1, 0 },
304 { X86::MUL32r, X86::MUL32m, 1, 0 },
305 { X86::MUL64r, X86::MUL64m, 1, 0 },
306 { X86::MUL8r, X86::MUL8m, 1, 0 },
307 { X86::SETAEr, X86::SETAEm, 0, 0 },
308 { X86::SETAr, X86::SETAm, 0, 0 },
309 { X86::SETBEr, X86::SETBEm, 0, 0 },
310 { X86::SETBr, X86::SETBm, 0, 0 },
311 { X86::SETEr, X86::SETEm, 0, 0 },
312 { X86::SETGEr, X86::SETGEm, 0, 0 },
313 { X86::SETGr, X86::SETGm, 0, 0 },
314 { X86::SETLEr, X86::SETLEm, 0, 0 },
315 { X86::SETLr, X86::SETLm, 0, 0 },
316 { X86::SETNEr, X86::SETNEm, 0, 0 },
317 { X86::SETNOr, X86::SETNOm, 0, 0 },
318 { X86::SETNPr, X86::SETNPm, 0, 0 },
319 { X86::SETNSr, X86::SETNSm, 0, 0 },
320 { X86::SETOr, X86::SETOm, 0, 0 },
321 { X86::SETPr, X86::SETPm, 0, 0 },
322 { X86::SETSr, X86::SETSm, 0, 0 },
323 { X86::TAILJMPr, X86::TAILJMPm, 1, 0 },
Evan Chengf48ef032010-03-14 03:48:46 +0000324 { X86::TAILJMPr64, X86::TAILJMPm64, 1, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000325 { X86::TEST16ri, X86::TEST16mi, 1, 0 },
326 { X86::TEST32ri, X86::TEST32mi, 1, 0 },
327 { X86::TEST64ri32, X86::TEST64mi32, 1, 0 },
328 { X86::TEST8ri, X86::TEST8mi, 1, 0 }
Owen Anderson43dbe052008-01-07 01:35:02 +0000329 };
330
331 for (unsigned i = 0, e = array_lengthof(OpTbl0); i != e; ++i) {
Chris Lattner15df55d2010-10-08 03:57:25 +0000332 unsigned RegOp = OpTbl0[i][0];
333 unsigned MemOp = OpTbl0[i][1] & ~TB_FLAGS;
Daniel Dunbarb38109f2010-10-08 02:07:29 +0000334 unsigned FoldedLoad = OpTbl0[i][2];
Chris Lattner15df55d2010-10-08 03:57:25 +0000335 unsigned Align = OpTbl0[i][3];
336 assert(!RegOp2MemOpTable0.count(RegOp) && "Duplicated entries?");
337 RegOp2MemOpTable0[RegOp] = std::make_pair(MemOp, Align);
338
339 // If this is not a reversable operation (because there is a many->one)
340 // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
341 if (OpTbl0[i][1] & TB_NOT_REVERSABLE)
342 continue;
343
Owen Anderson43dbe052008-01-07 01:35:02 +0000344 // Index 0, folded load or store.
345 unsigned AuxInfo = 0 | (FoldedLoad << 4) | ((FoldedLoad^1) << 5);
Chris Lattner15df55d2010-10-08 03:57:25 +0000346 assert(!MemOp2RegOpTable.count(MemOp) && "Duplicated entries?");
347 MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
Owen Anderson43dbe052008-01-07 01:35:02 +0000348 }
349
Evan Chengf9b36f02009-07-15 06:10:07 +0000350 static const unsigned OpTbl1[][3] = {
351 { X86::CMP16rr, X86::CMP16rm, 0 },
352 { X86::CMP32rr, X86::CMP32rm, 0 },
353 { X86::CMP64rr, X86::CMP64rm, 0 },
354 { X86::CMP8rr, X86::CMP8rm, 0 },
355 { X86::CVTSD2SSrr, X86::CVTSD2SSrm, 0 },
356 { X86::CVTSI2SD64rr, X86::CVTSI2SD64rm, 0 },
357 { X86::CVTSI2SDrr, X86::CVTSI2SDrm, 0 },
358 { X86::CVTSI2SS64rr, X86::CVTSI2SS64rm, 0 },
359 { X86::CVTSI2SSrr, X86::CVTSI2SSrm, 0 },
360 { X86::CVTSS2SDrr, X86::CVTSS2SDrm, 0 },
361 { X86::CVTTSD2SI64rr, X86::CVTTSD2SI64rm, 0 },
362 { X86::CVTTSD2SIrr, X86::CVTTSD2SIrm, 0 },
363 { X86::CVTTSS2SI64rr, X86::CVTTSS2SI64rm, 0 },
364 { X86::CVTTSS2SIrr, X86::CVTTSS2SIrm, 0 },
Chris Lattner15df55d2010-10-08 03:57:25 +0000365 { X86::FsMOVAPDrr, X86::MOVSDrm | TB_NOT_REVERSABLE , 0 },
366 { X86::FsMOVAPSrr, X86::MOVSSrm | TB_NOT_REVERSABLE , 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000367 { X86::IMUL16rri, X86::IMUL16rmi, 0 },
368 { X86::IMUL16rri8, X86::IMUL16rmi8, 0 },
369 { X86::IMUL32rri, X86::IMUL32rmi, 0 },
370 { X86::IMUL32rri8, X86::IMUL32rmi8, 0 },
371 { X86::IMUL64rri32, X86::IMUL64rmi32, 0 },
372 { X86::IMUL64rri8, X86::IMUL64rmi8, 0 },
373 { X86::Int_CMPSDrr, X86::Int_CMPSDrm, 0 },
374 { X86::Int_CMPSSrr, X86::Int_CMPSSrm, 0 },
375 { X86::Int_COMISDrr, X86::Int_COMISDrm, 0 },
376 { X86::Int_COMISSrr, X86::Int_COMISSrm, 0 },
377 { X86::Int_CVTDQ2PDrr, X86::Int_CVTDQ2PDrm, 16 },
378 { X86::Int_CVTDQ2PSrr, X86::Int_CVTDQ2PSrm, 16 },
379 { X86::Int_CVTPD2DQrr, X86::Int_CVTPD2DQrm, 16 },
380 { X86::Int_CVTPD2PSrr, X86::Int_CVTPD2PSrm, 16 },
381 { X86::Int_CVTPS2DQrr, X86::Int_CVTPS2DQrm, 16 },
382 { X86::Int_CVTPS2PDrr, X86::Int_CVTPS2PDrm, 0 },
Chris Lattner0c04e4f2010-09-29 02:24:57 +0000383 { X86::CVTSD2SI64rr, X86::CVTSD2SI64rm, 0 },
384 { X86::CVTSD2SIrr, X86::CVTSD2SIrm, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000385 { X86::Int_CVTSD2SSrr, X86::Int_CVTSD2SSrm, 0 },
386 { X86::Int_CVTSI2SD64rr,X86::Int_CVTSI2SD64rm, 0 },
387 { X86::Int_CVTSI2SDrr, X86::Int_CVTSI2SDrm, 0 },
388 { X86::Int_CVTSI2SS64rr,X86::Int_CVTSI2SS64rm, 0 },
389 { X86::Int_CVTSI2SSrr, X86::Int_CVTSI2SSrm, 0 },
390 { X86::Int_CVTSS2SDrr, X86::Int_CVTSS2SDrm, 0 },
391 { X86::Int_CVTSS2SI64rr,X86::Int_CVTSS2SI64rm, 0 },
392 { X86::Int_CVTSS2SIrr, X86::Int_CVTSS2SIrm, 0 },
Chris Lattnerbf6018a2010-09-29 02:36:32 +0000393 { X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, 16 },
394 { X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, 16 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000395 { X86::Int_CVTTSD2SI64rr,X86::Int_CVTTSD2SI64rm, 0 },
396 { X86::Int_CVTTSD2SIrr, X86::Int_CVTTSD2SIrm, 0 },
397 { X86::Int_CVTTSS2SI64rr,X86::Int_CVTTSS2SI64rm, 0 },
398 { X86::Int_CVTTSS2SIrr, X86::Int_CVTTSS2SIrm, 0 },
399 { X86::Int_UCOMISDrr, X86::Int_UCOMISDrm, 0 },
400 { X86::Int_UCOMISSrr, X86::Int_UCOMISSrm, 0 },
401 { X86::MOV16rr, X86::MOV16rm, 0 },
402 { X86::MOV32rr, X86::MOV32rm, 0 },
Evan Chengf48ef032010-03-14 03:48:46 +0000403 { X86::MOV32rr_TC, X86::MOV32rm_TC, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000404 { X86::MOV64rr, X86::MOV64rm, 0 },
405 { X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 },
406 { X86::MOV64toSDrr, X86::MOV64toSDrm, 0 },
407 { X86::MOV8rr, X86::MOV8rm, 0 },
408 { X86::MOVAPDrr, X86::MOVAPDrm, 16 },
409 { X86::MOVAPSrr, X86::MOVAPSrm, 16 },
410 { X86::MOVDDUPrr, X86::MOVDDUPrm, 0 },
411 { X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 },
412 { X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 },
413 { X86::MOVDQArr, X86::MOVDQArm, 16 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000414 { X86::MOVSHDUPrr, X86::MOVSHDUPrm, 16 },
415 { X86::MOVSLDUPrr, X86::MOVSLDUPrm, 16 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000416 { X86::MOVSX16rr8, X86::MOVSX16rm8, 0 },
417 { X86::MOVSX32rr16, X86::MOVSX32rm16, 0 },
418 { X86::MOVSX32rr8, X86::MOVSX32rm8, 0 },
419 { X86::MOVSX64rr16, X86::MOVSX64rm16, 0 },
420 { X86::MOVSX64rr32, X86::MOVSX64rm32, 0 },
421 { X86::MOVSX64rr8, X86::MOVSX64rm8, 0 },
422 { X86::MOVUPDrr, X86::MOVUPDrm, 16 },
Evan Cheng94da7212010-01-21 00:55:14 +0000423 { X86::MOVUPSrr, X86::MOVUPSrm, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000424 { X86::MOVZDI2PDIrr, X86::MOVZDI2PDIrm, 0 },
425 { X86::MOVZQI2PQIrr, X86::MOVZQI2PQIrm, 0 },
426 { X86::MOVZPQILo2PQIrr, X86::MOVZPQILo2PQIrm, 16 },
427 { X86::MOVZX16rr8, X86::MOVZX16rm8, 0 },
428 { X86::MOVZX32rr16, X86::MOVZX32rm16, 0 },
429 { X86::MOVZX32_NOREXrr8, X86::MOVZX32_NOREXrm8, 0 },
430 { X86::MOVZX32rr8, X86::MOVZX32rm8, 0 },
431 { X86::MOVZX64rr16, X86::MOVZX64rm16, 0 },
432 { X86::MOVZX64rr32, X86::MOVZX64rm32, 0 },
433 { X86::MOVZX64rr8, X86::MOVZX64rm8, 0 },
434 { X86::PSHUFDri, X86::PSHUFDmi, 16 },
435 { X86::PSHUFHWri, X86::PSHUFHWmi, 16 },
436 { X86::PSHUFLWri, X86::PSHUFLWmi, 16 },
437 { X86::RCPPSr, X86::RCPPSm, 16 },
438 { X86::RCPPSr_Int, X86::RCPPSm_Int, 16 },
439 { X86::RSQRTPSr, X86::RSQRTPSm, 16 },
440 { X86::RSQRTPSr_Int, X86::RSQRTPSm_Int, 16 },
441 { X86::RSQRTSSr, X86::RSQRTSSm, 0 },
442 { X86::RSQRTSSr_Int, X86::RSQRTSSm_Int, 0 },
443 { X86::SQRTPDr, X86::SQRTPDm, 16 },
444 { X86::SQRTPDr_Int, X86::SQRTPDm_Int, 16 },
445 { X86::SQRTPSr, X86::SQRTPSm, 16 },
446 { X86::SQRTPSr_Int, X86::SQRTPSm_Int, 16 },
447 { X86::SQRTSDr, X86::SQRTSDm, 0 },
448 { X86::SQRTSDr_Int, X86::SQRTSDm_Int, 0 },
449 { X86::SQRTSSr, X86::SQRTSSm, 0 },
450 { X86::SQRTSSr_Int, X86::SQRTSSm_Int, 0 },
451 { X86::TEST16rr, X86::TEST16rm, 0 },
452 { X86::TEST32rr, X86::TEST32rm, 0 },
453 { X86::TEST64rr, X86::TEST64rm, 0 },
454 { X86::TEST8rr, X86::TEST8rm, 0 },
Owen Anderson43dbe052008-01-07 01:35:02 +0000455 // FIXME: TEST*rr EAX,EAX ---> CMP [mem], 0
Evan Chengf9b36f02009-07-15 06:10:07 +0000456 { X86::UCOMISDrr, X86::UCOMISDrm, 0 },
457 { X86::UCOMISSrr, X86::UCOMISSrm, 0 }
Owen Anderson43dbe052008-01-07 01:35:02 +0000458 };
459
460 for (unsigned i = 0, e = array_lengthof(OpTbl1); i != e; ++i) {
461 unsigned RegOp = OpTbl1[i][0];
Chris Lattner15df55d2010-10-08 03:57:25 +0000462 unsigned MemOp = OpTbl1[i][1] & ~TB_FLAGS;
Evan Chengf9b36f02009-07-15 06:10:07 +0000463 unsigned Align = OpTbl1[i][2];
Chris Lattnera2283762010-10-07 23:57:02 +0000464 assert(!RegOp2MemOpTable1.count(RegOp) && "Duplicate entries");
Chris Lattner15df55d2010-10-08 03:57:25 +0000465 RegOp2MemOpTable1[RegOp] = std::make_pair(MemOp, Align);
466
467 // If this is not a reversable operation (because there is a many->one)
468 // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
469 if (OpTbl1[i][1] & TB_NOT_REVERSABLE)
470 continue;
Chris Lattnera2283762010-10-07 23:57:02 +0000471
Evan Chengf9b36f02009-07-15 06:10:07 +0000472 // Index 1, folded load
473 unsigned AuxInfo = 1 | (1 << 4);
Chris Lattner15df55d2010-10-08 03:57:25 +0000474 assert(!MemOp2RegOpTable.count(MemOp) && "Duplicate entries");
475 MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
Owen Anderson43dbe052008-01-07 01:35:02 +0000476 }
477
Evan Chengf9b36f02009-07-15 06:10:07 +0000478 static const unsigned OpTbl2[][3] = {
479 { X86::ADC32rr, X86::ADC32rm, 0 },
480 { X86::ADC64rr, X86::ADC64rm, 0 },
481 { X86::ADD16rr, X86::ADD16rm, 0 },
Chris Lattner99ae6652010-10-08 03:54:52 +0000482 { X86::ADD16rr_DB, X86::ADD16rm | TB_NOT_REVERSABLE, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000483 { X86::ADD32rr, X86::ADD32rm, 0 },
Chris Lattner99ae6652010-10-08 03:54:52 +0000484 { X86::ADD32rr_DB, X86::ADD32rm | TB_NOT_REVERSABLE, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000485 { X86::ADD64rr, X86::ADD64rm, 0 },
Chris Lattner99ae6652010-10-08 03:54:52 +0000486 { X86::ADD64rr_DB, X86::ADD64rm | TB_NOT_REVERSABLE, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000487 { X86::ADD8rr, X86::ADD8rm, 0 },
488 { X86::ADDPDrr, X86::ADDPDrm, 16 },
489 { X86::ADDPSrr, X86::ADDPSrm, 16 },
490 { X86::ADDSDrr, X86::ADDSDrm, 0 },
491 { X86::ADDSSrr, X86::ADDSSrm, 0 },
492 { X86::ADDSUBPDrr, X86::ADDSUBPDrm, 16 },
493 { X86::ADDSUBPSrr, X86::ADDSUBPSrm, 16 },
494 { X86::AND16rr, X86::AND16rm, 0 },
495 { X86::AND32rr, X86::AND32rm, 0 },
496 { X86::AND64rr, X86::AND64rm, 0 },
497 { X86::AND8rr, X86::AND8rm, 0 },
498 { X86::ANDNPDrr, X86::ANDNPDrm, 16 },
499 { X86::ANDNPSrr, X86::ANDNPSrm, 16 },
500 { X86::ANDPDrr, X86::ANDPDrm, 16 },
501 { X86::ANDPSrr, X86::ANDPSrm, 16 },
502 { X86::CMOVA16rr, X86::CMOVA16rm, 0 },
503 { X86::CMOVA32rr, X86::CMOVA32rm, 0 },
504 { X86::CMOVA64rr, X86::CMOVA64rm, 0 },
505 { X86::CMOVAE16rr, X86::CMOVAE16rm, 0 },
506 { X86::CMOVAE32rr, X86::CMOVAE32rm, 0 },
507 { X86::CMOVAE64rr, X86::CMOVAE64rm, 0 },
508 { X86::CMOVB16rr, X86::CMOVB16rm, 0 },
509 { X86::CMOVB32rr, X86::CMOVB32rm, 0 },
510 { X86::CMOVB64rr, X86::CMOVB64rm, 0 },
Chris Lattner25cbf502010-10-05 23:00:14 +0000511 { X86::CMOVBE16rr, X86::CMOVBE16rm, 0 },
512 { X86::CMOVBE32rr, X86::CMOVBE32rm, 0 },
513 { X86::CMOVBE64rr, X86::CMOVBE64rm, 0 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000514 { X86::CMOVE16rr, X86::CMOVE16rm, 0 },
515 { X86::CMOVE32rr, X86::CMOVE32rm, 0 },
516 { X86::CMOVE64rr, X86::CMOVE64rm, 0 },
517 { X86::CMOVG16rr, X86::CMOVG16rm, 0 },
518 { X86::CMOVG32rr, X86::CMOVG32rm, 0 },
519 { X86::CMOVG64rr, X86::CMOVG64rm, 0 },
520 { X86::CMOVGE16rr, X86::CMOVGE16rm, 0 },
521 { X86::CMOVGE32rr, X86::CMOVGE32rm, 0 },
522 { X86::CMOVGE64rr, X86::CMOVGE64rm, 0 },
523 { X86::CMOVL16rr, X86::CMOVL16rm, 0 },
524 { X86::CMOVL32rr, X86::CMOVL32rm, 0 },
525 { X86::CMOVL64rr, X86::CMOVL64rm, 0 },
526 { X86::CMOVLE16rr, X86::CMOVLE16rm, 0 },
527 { X86::CMOVLE32rr, X86::CMOVLE32rm, 0 },
528 { X86::CMOVLE64rr, X86::CMOVLE64rm, 0 },
529 { X86::CMOVNE16rr, X86::CMOVNE16rm, 0 },
530 { X86::CMOVNE32rr, X86::CMOVNE32rm, 0 },
531 { X86::CMOVNE64rr, X86::CMOVNE64rm, 0 },
532 { X86::CMOVNO16rr, X86::CMOVNO16rm, 0 },
533 { X86::CMOVNO32rr, X86::CMOVNO32rm, 0 },
534 { X86::CMOVNO64rr, X86::CMOVNO64rm, 0 },
535 { X86::CMOVNP16rr, X86::CMOVNP16rm, 0 },
536 { X86::CMOVNP32rr, X86::CMOVNP32rm, 0 },
537 { X86::CMOVNP64rr, X86::CMOVNP64rm, 0 },
538 { X86::CMOVNS16rr, X86::CMOVNS16rm, 0 },
539 { X86::CMOVNS32rr, X86::CMOVNS32rm, 0 },
540 { X86::CMOVNS64rr, X86::CMOVNS64rm, 0 },
541 { X86::CMOVO16rr, X86::CMOVO16rm, 0 },
542 { X86::CMOVO32rr, X86::CMOVO32rm, 0 },
543 { X86::CMOVO64rr, X86::CMOVO64rm, 0 },
544 { X86::CMOVP16rr, X86::CMOVP16rm, 0 },
545 { X86::CMOVP32rr, X86::CMOVP32rm, 0 },
546 { X86::CMOVP64rr, X86::CMOVP64rm, 0 },
547 { X86::CMOVS16rr, X86::CMOVS16rm, 0 },
548 { X86::CMOVS32rr, X86::CMOVS32rm, 0 },
549 { X86::CMOVS64rr, X86::CMOVS64rm, 0 },
550 { X86::CMPPDrri, X86::CMPPDrmi, 16 },
551 { X86::CMPPSrri, X86::CMPPSrmi, 16 },
552 { X86::CMPSDrr, X86::CMPSDrm, 0 },
553 { X86::CMPSSrr, X86::CMPSSrm, 0 },
554 { X86::DIVPDrr, X86::DIVPDrm, 16 },
555 { X86::DIVPSrr, X86::DIVPSrm, 16 },
556 { X86::DIVSDrr, X86::DIVSDrm, 0 },
557 { X86::DIVSSrr, X86::DIVSSrm, 0 },
558 { X86::FsANDNPDrr, X86::FsANDNPDrm, 16 },
559 { X86::FsANDNPSrr, X86::FsANDNPSrm, 16 },
560 { X86::FsANDPDrr, X86::FsANDPDrm, 16 },
561 { X86::FsANDPSrr, X86::FsANDPSrm, 16 },
562 { X86::FsORPDrr, X86::FsORPDrm, 16 },
563 { X86::FsORPSrr, X86::FsORPSrm, 16 },
564 { X86::FsXORPDrr, X86::FsXORPDrm, 16 },
565 { X86::FsXORPSrr, X86::FsXORPSrm, 16 },
566 { X86::HADDPDrr, X86::HADDPDrm, 16 },
567 { X86::HADDPSrr, X86::HADDPSrm, 16 },
568 { X86::HSUBPDrr, X86::HSUBPDrm, 16 },
569 { X86::HSUBPSrr, X86::HSUBPSrm, 16 },
570 { X86::IMUL16rr, X86::IMUL16rm, 0 },
571 { X86::IMUL32rr, X86::IMUL32rm, 0 },
572 { X86::IMUL64rr, X86::IMUL64rm, 0 },
573 { X86::MAXPDrr, X86::MAXPDrm, 16 },
574 { X86::MAXPDrr_Int, X86::MAXPDrm_Int, 16 },
575 { X86::MAXPSrr, X86::MAXPSrm, 16 },
576 { X86::MAXPSrr_Int, X86::MAXPSrm_Int, 16 },
577 { X86::MAXSDrr, X86::MAXSDrm, 0 },
578 { X86::MAXSDrr_Int, X86::MAXSDrm_Int, 0 },
579 { X86::MAXSSrr, X86::MAXSSrm, 0 },
580 { X86::MAXSSrr_Int, X86::MAXSSrm_Int, 0 },
581 { X86::MINPDrr, X86::MINPDrm, 16 },
582 { X86::MINPDrr_Int, X86::MINPDrm_Int, 16 },
583 { X86::MINPSrr, X86::MINPSrm, 16 },
584 { X86::MINPSrr_Int, X86::MINPSrm_Int, 16 },
585 { X86::MINSDrr, X86::MINSDrm, 0 },
586 { X86::MINSDrr_Int, X86::MINSDrm_Int, 0 },
587 { X86::MINSSrr, X86::MINSSrm, 0 },
588 { X86::MINSSrr_Int, X86::MINSSrm_Int, 0 },
589 { X86::MULPDrr, X86::MULPDrm, 16 },
590 { X86::MULPSrr, X86::MULPSrm, 16 },
591 { X86::MULSDrr, X86::MULSDrm, 0 },
592 { X86::MULSSrr, X86::MULSSrm, 0 },
593 { X86::OR16rr, X86::OR16rm, 0 },
594 { X86::OR32rr, X86::OR32rm, 0 },
595 { X86::OR64rr, X86::OR64rm, 0 },
596 { X86::OR8rr, X86::OR8rm, 0 },
597 { X86::ORPDrr, X86::ORPDrm, 16 },
598 { X86::ORPSrr, X86::ORPSrm, 16 },
599 { X86::PACKSSDWrr, X86::PACKSSDWrm, 16 },
600 { X86::PACKSSWBrr, X86::PACKSSWBrm, 16 },
601 { X86::PACKUSWBrr, X86::PACKUSWBrm, 16 },
602 { X86::PADDBrr, X86::PADDBrm, 16 },
603 { X86::PADDDrr, X86::PADDDrm, 16 },
604 { X86::PADDQrr, X86::PADDQrm, 16 },
605 { X86::PADDSBrr, X86::PADDSBrm, 16 },
606 { X86::PADDSWrr, X86::PADDSWrm, 16 },
607 { X86::PADDWrr, X86::PADDWrm, 16 },
608 { X86::PANDNrr, X86::PANDNrm, 16 },
609 { X86::PANDrr, X86::PANDrm, 16 },
610 { X86::PAVGBrr, X86::PAVGBrm, 16 },
611 { X86::PAVGWrr, X86::PAVGWrm, 16 },
612 { X86::PCMPEQBrr, X86::PCMPEQBrm, 16 },
613 { X86::PCMPEQDrr, X86::PCMPEQDrm, 16 },
614 { X86::PCMPEQWrr, X86::PCMPEQWrm, 16 },
615 { X86::PCMPGTBrr, X86::PCMPGTBrm, 16 },
616 { X86::PCMPGTDrr, X86::PCMPGTDrm, 16 },
617 { X86::PCMPGTWrr, X86::PCMPGTWrm, 16 },
618 { X86::PINSRWrri, X86::PINSRWrmi, 16 },
619 { X86::PMADDWDrr, X86::PMADDWDrm, 16 },
620 { X86::PMAXSWrr, X86::PMAXSWrm, 16 },
621 { X86::PMAXUBrr, X86::PMAXUBrm, 16 },
622 { X86::PMINSWrr, X86::PMINSWrm, 16 },
623 { X86::PMINUBrr, X86::PMINUBrm, 16 },
624 { X86::PMULDQrr, X86::PMULDQrm, 16 },
625 { X86::PMULHUWrr, X86::PMULHUWrm, 16 },
626 { X86::PMULHWrr, X86::PMULHWrm, 16 },
627 { X86::PMULLDrr, X86::PMULLDrm, 16 },
Evan Chengf9b36f02009-07-15 06:10:07 +0000628 { X86::PMULLWrr, X86::PMULLWrm, 16 },
629 { X86::PMULUDQrr, X86::PMULUDQrm, 16 },
630 { X86::PORrr, X86::PORrm, 16 },
631 { X86::PSADBWrr, X86::PSADBWrm, 16 },
632 { X86::PSLLDrr, X86::PSLLDrm, 16 },
633 { X86::PSLLQrr, X86::PSLLQrm, 16 },
634 { X86::PSLLWrr, X86::PSLLWrm, 16 },
635 { X86::PSRADrr, X86::PSRADrm, 16 },
636 { X86::PSRAWrr, X86::PSRAWrm, 16 },
637 { X86::PSRLDrr, X86::PSRLDrm, 16 },
638 { X86::PSRLQrr, X86::PSRLQrm, 16 },
639 { X86::PSRLWrr, X86::PSRLWrm, 16 },
640 { X86::PSUBBrr, X86::PSUBBrm, 16 },
641 { X86::PSUBDrr, X86::PSUBDrm, 16 },
642 { X86::PSUBSBrr, X86::PSUBSBrm, 16 },
643 { X86::PSUBSWrr, X86::PSUBSWrm, 16 },
644 { X86::PSUBWrr, X86::PSUBWrm, 16 },
645 { X86::PUNPCKHBWrr, X86::PUNPCKHBWrm, 16 },
646 { X86::PUNPCKHDQrr, X86::PUNPCKHDQrm, 16 },
647 { X86::PUNPCKHQDQrr, X86::PUNPCKHQDQrm, 16 },
648 { X86::PUNPCKHWDrr, X86::PUNPCKHWDrm, 16 },
649 { X86::PUNPCKLBWrr, X86::PUNPCKLBWrm, 16 },
650 { X86::PUNPCKLDQrr, X86::PUNPCKLDQrm, 16 },
651 { X86::PUNPCKLQDQrr, X86::PUNPCKLQDQrm, 16 },
652 { X86::PUNPCKLWDrr, X86::PUNPCKLWDrm, 16 },
653 { X86::PXORrr, X86::PXORrm, 16 },
654 { X86::SBB32rr, X86::SBB32rm, 0 },
655 { X86::SBB64rr, X86::SBB64rm, 0 },
656 { X86::SHUFPDrri, X86::SHUFPDrmi, 16 },
657 { X86::SHUFPSrri, X86::SHUFPSrmi, 16 },
658 { X86::SUB16rr, X86::SUB16rm, 0 },
659 { X86::SUB32rr, X86::SUB32rm, 0 },
660 { X86::SUB64rr, X86::SUB64rm, 0 },
661 { X86::SUB8rr, X86::SUB8rm, 0 },
662 { X86::SUBPDrr, X86::SUBPDrm, 16 },
663 { X86::SUBPSrr, X86::SUBPSrm, 16 },
664 { X86::SUBSDrr, X86::SUBSDrm, 0 },
665 { X86::SUBSSrr, X86::SUBSSrm, 0 },
Owen Anderson43dbe052008-01-07 01:35:02 +0000666 // FIXME: TEST*rr -> swapped operand of TEST*mr.
Evan Chengf9b36f02009-07-15 06:10:07 +0000667 { X86::UNPCKHPDrr, X86::UNPCKHPDrm, 16 },
668 { X86::UNPCKHPSrr, X86::UNPCKHPSrm, 16 },
669 { X86::UNPCKLPDrr, X86::UNPCKLPDrm, 16 },
670 { X86::UNPCKLPSrr, X86::UNPCKLPSrm, 16 },
671 { X86::XOR16rr, X86::XOR16rm, 0 },
672 { X86::XOR32rr, X86::XOR32rm, 0 },
673 { X86::XOR64rr, X86::XOR64rm, 0 },
674 { X86::XOR8rr, X86::XOR8rm, 0 },
675 { X86::XORPDrr, X86::XORPDrm, 16 },
676 { X86::XORPSrr, X86::XORPSrm, 16 }
Owen Anderson43dbe052008-01-07 01:35:02 +0000677 };
678
679 for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
680 unsigned RegOp = OpTbl2[i][0];
Chris Lattner99ae6652010-10-08 03:54:52 +0000681 unsigned MemOp = OpTbl2[i][1] & ~TB_FLAGS;
Evan Chengf9b36f02009-07-15 06:10:07 +0000682 unsigned Align = OpTbl2[i][2];
Chris Lattner99ae6652010-10-08 03:54:52 +0000683
684 assert(!RegOp2MemOpTable2.count(RegOp) && "Duplicate entry!");
685 RegOp2MemOpTable2[RegOp] = std::make_pair(MemOp, Align);
686
Chris Lattner99ae6652010-10-08 03:54:52 +0000687 // If this is not a reversable operation (because there is a many->one)
688 // mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
689 if (OpTbl2[i][1] & TB_NOT_REVERSABLE)
690 continue;
691
Evan Chengf9b36f02009-07-15 06:10:07 +0000692 // Index 2, folded load
693 unsigned AuxInfo = 2 | (1 << 4);
Chris Lattner99ae6652010-10-08 03:54:52 +0000694 assert(!MemOp2RegOpTable.count(MemOp) &&
695 "Duplicated entries in unfolding maps?");
696 MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
Owen Anderson43dbe052008-01-07 01:35:02 +0000697 }
Chris Lattner72614082002-10-25 22:55:53 +0000698}
699
Evan Chenga5a81d72010-01-12 00:09:37 +0000700bool
Evan Cheng7da9ecf2010-01-13 00:30:23 +0000701X86InstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
702 unsigned &SrcReg, unsigned &DstReg,
703 unsigned &SubIdx) const {
Evan Chenga5a81d72010-01-12 00:09:37 +0000704 switch (MI.getOpcode()) {
705 default: break;
706 case X86::MOVSX16rr8:
707 case X86::MOVZX16rr8:
708 case X86::MOVSX32rr8:
709 case X86::MOVZX32rr8:
710 case X86::MOVSX64rr8:
711 case X86::MOVZX64rr8:
Evan Cheng57d1d932010-01-13 08:01:32 +0000712 if (!TM.getSubtarget<X86Subtarget>().is64Bit())
713 // It's not always legal to reference the low 8-bit of the larger
714 // register in 32-bit mode.
715 return false;
Evan Chenga5a81d72010-01-12 00:09:37 +0000716 case X86::MOVSX32rr16:
717 case X86::MOVZX32rr16:
718 case X86::MOVSX64rr16:
719 case X86::MOVZX64rr16:
720 case X86::MOVSX64rr32:
721 case X86::MOVZX64rr32: {
722 if (MI.getOperand(0).getSubReg() || MI.getOperand(1).getSubReg())
723 // Be conservative.
724 return false;
Evan Chenga5a81d72010-01-12 00:09:37 +0000725 SrcReg = MI.getOperand(1).getReg();
726 DstReg = MI.getOperand(0).getReg();
Evan Chenga5a81d72010-01-12 00:09:37 +0000727 switch (MI.getOpcode()) {
728 default:
729 llvm_unreachable(0);
730 break;
731 case X86::MOVSX16rr8:
732 case X86::MOVZX16rr8:
733 case X86::MOVSX32rr8:
734 case X86::MOVZX32rr8:
735 case X86::MOVSX64rr8:
736 case X86::MOVZX64rr8:
Jakob Stoklund Olesen22c0e972010-05-25 17:04:16 +0000737 SubIdx = X86::sub_8bit;
Evan Chenga5a81d72010-01-12 00:09:37 +0000738 break;
739 case X86::MOVSX32rr16:
740 case X86::MOVZX32rr16:
741 case X86::MOVSX64rr16:
742 case X86::MOVZX64rr16:
Jakob Stoklund Olesen22c0e972010-05-25 17:04:16 +0000743 SubIdx = X86::sub_16bit;
Evan Chenga5a81d72010-01-12 00:09:37 +0000744 break;
745 case X86::MOVSX64rr32:
746 case X86::MOVZX64rr32:
Jakob Stoklund Olesen22c0e972010-05-25 17:04:16 +0000747 SubIdx = X86::sub_32bit;
Evan Chenga5a81d72010-01-12 00:09:37 +0000748 break;
749 }
Evan Cheng7da9ecf2010-01-13 00:30:23 +0000750 return true;
Evan Chenga5a81d72010-01-12 00:09:37 +0000751 }
752 }
Evan Cheng7da9ecf2010-01-13 00:30:23 +0000753 return false;
Evan Chenga5a81d72010-01-12 00:09:37 +0000754}
755
David Greeneb87bc952009-11-12 20:55:29 +0000756/// isFrameOperand - Return true and the FrameIndex if the specified
757/// operand and follow operands form a reference to the stack frame.
758bool X86InstrInfo::isFrameOperand(const MachineInstr *MI, unsigned int Op,
759 int &FrameIndex) const {
760 if (MI->getOperand(Op).isFI() && MI->getOperand(Op+1).isImm() &&
761 MI->getOperand(Op+2).isReg() && MI->getOperand(Op+3).isImm() &&
762 MI->getOperand(Op+1).getImm() == 1 &&
763 MI->getOperand(Op+2).getReg() == 0 &&
764 MI->getOperand(Op+3).getImm() == 0) {
765 FrameIndex = MI->getOperand(Op).getIndex();
766 return true;
767 }
768 return false;
769}
770
David Greenedda39782009-11-13 00:29:53 +0000771static bool isFrameLoadOpcode(int Opcode) {
772 switch (Opcode) {
Chris Lattner40839602006-02-02 20:12:32 +0000773 default: break;
774 case X86::MOV8rm:
775 case X86::MOV16rm:
776 case X86::MOV32rm:
Jakob Stoklund Olesen61905c82010-07-09 21:27:55 +0000777 case X86::MOV32rm_TC:
Evan Cheng25ab6902006-09-08 06:48:29 +0000778 case X86::MOV64rm:
Jakob Stoklund Olesen61905c82010-07-09 21:27:55 +0000779 case X86::MOV64rm_TC:
Dale Johannesene377d4d2007-07-04 21:07:47 +0000780 case X86::LD_Fp64m:
Chris Lattner40839602006-02-02 20:12:32 +0000781 case X86::MOVSSrm:
782 case X86::MOVSDrm:
Chris Lattner993c8972006-04-18 16:44:51 +0000783 case X86::MOVAPSrm:
784 case X86::MOVAPDrm:
Dan Gohman54462742009-01-09 02:40:34 +0000785 case X86::MOVDQArm:
Bill Wendling823efee2007-04-03 06:00:37 +0000786 case X86::MMX_MOVD64rm:
787 case X86::MMX_MOVQ64rm:
David Greenedda39782009-11-13 00:29:53 +0000788 return true;
789 break;
790 }
791 return false;
792}
793
794static bool isFrameStoreOpcode(int Opcode) {
795 switch (Opcode) {
796 default: break;
797 case X86::MOV8mr:
798 case X86::MOV16mr:
799 case X86::MOV32mr:
Jakob Stoklund Olesen61905c82010-07-09 21:27:55 +0000800 case X86::MOV32mr_TC:
David Greenedda39782009-11-13 00:29:53 +0000801 case X86::MOV64mr:
Jakob Stoklund Olesen61905c82010-07-09 21:27:55 +0000802 case X86::MOV64mr_TC:
David Greenedda39782009-11-13 00:29:53 +0000803 case X86::ST_FpP64m:
804 case X86::MOVSSmr:
805 case X86::MOVSDmr:
806 case X86::MOVAPSmr:
807 case X86::MOVAPDmr:
808 case X86::MOVDQAmr:
809 case X86::MMX_MOVD64mr:
810 case X86::MMX_MOVQ64mr:
811 case X86::MMX_MOVNTQmr:
812 return true;
813 }
814 return false;
815}
816
817unsigned X86InstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
818 int &FrameIndex) const {
819 if (isFrameLoadOpcode(MI->getOpcode()))
Jakob Stoklund Olesen81c7b192010-07-27 04:17:01 +0000820 if (MI->getOperand(0).getSubReg() == 0 && isFrameOperand(MI, 1, FrameIndex))
Chris Lattner40839602006-02-02 20:12:32 +0000821 return MI->getOperand(0).getReg();
David Greenedda39782009-11-13 00:29:53 +0000822 return 0;
823}
824
825unsigned X86InstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
826 int &FrameIndex) const {
827 if (isFrameLoadOpcode(MI->getOpcode())) {
828 unsigned Reg;
829 if ((Reg = isLoadFromStackSlot(MI, FrameIndex)))
830 return Reg;
David Greeneb87bc952009-11-12 20:55:29 +0000831 // Check for post-frame index elimination operations
David Greene29dbf502009-12-04 22:38:46 +0000832 const MachineMemOperand *Dummy;
833 return hasLoadFromStackSlot(MI, Dummy, FrameIndex);
Chris Lattner40839602006-02-02 20:12:32 +0000834 }
835 return 0;
836}
837
David Greeneb87bc952009-11-12 20:55:29 +0000838bool X86InstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
David Greene29dbf502009-12-04 22:38:46 +0000839 const MachineMemOperand *&MMO,
David Greeneb87bc952009-11-12 20:55:29 +0000840 int &FrameIndex) const {
841 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
842 oe = MI->memoperands_end();
843 o != oe;
844 ++o) {
845 if ((*o)->isLoad() && (*o)->getValue())
846 if (const FixedStackPseudoSourceValue *Value =
847 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
848 FrameIndex = Value->getFrameIndex();
David Greene29dbf502009-12-04 22:38:46 +0000849 MMO = *o;
David Greeneb87bc952009-11-12 20:55:29 +0000850 return true;
851 }
852 }
853 return false;
854}
855
Dan Gohmancbad42c2008-11-18 19:49:32 +0000856unsigned X86InstrInfo::isStoreToStackSlot(const MachineInstr *MI,
Chris Lattner40839602006-02-02 20:12:32 +0000857 int &FrameIndex) const {
David Greenedda39782009-11-13 00:29:53 +0000858 if (isFrameStoreOpcode(MI->getOpcode()))
Jakob Stoklund Olesen81c7b192010-07-27 04:17:01 +0000859 if (MI->getOperand(X86::AddrNumOperands).getSubReg() == 0 &&
860 isFrameOperand(MI, 0, FrameIndex))
Chris Lattnerac0ed5d2010-07-08 22:41:28 +0000861 return MI->getOperand(X86::AddrNumOperands).getReg();
David Greenedda39782009-11-13 00:29:53 +0000862 return 0;
863}
864
865unsigned X86InstrInfo::isStoreToStackSlotPostFE(const MachineInstr *MI,
866 int &FrameIndex) const {
867 if (isFrameStoreOpcode(MI->getOpcode())) {
868 unsigned Reg;
869 if ((Reg = isStoreToStackSlot(MI, FrameIndex)))
870 return Reg;
David Greeneb87bc952009-11-12 20:55:29 +0000871 // Check for post-frame index elimination operations
David Greene29dbf502009-12-04 22:38:46 +0000872 const MachineMemOperand *Dummy;
873 return hasStoreToStackSlot(MI, Dummy, FrameIndex);
Chris Lattner40839602006-02-02 20:12:32 +0000874 }
875 return 0;
876}
877
David Greeneb87bc952009-11-12 20:55:29 +0000878bool X86InstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
David Greene29dbf502009-12-04 22:38:46 +0000879 const MachineMemOperand *&MMO,
David Greeneb87bc952009-11-12 20:55:29 +0000880 int &FrameIndex) const {
881 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
882 oe = MI->memoperands_end();
883 o != oe;
884 ++o) {
885 if ((*o)->isStore() && (*o)->getValue())
886 if (const FixedStackPseudoSourceValue *Value =
887 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
888 FrameIndex = Value->getFrameIndex();
David Greene29dbf502009-12-04 22:38:46 +0000889 MMO = *o;
David Greeneb87bc952009-11-12 20:55:29 +0000890 return true;
891 }
892 }
893 return false;
894}
895
Evan Chenge3d8dbf2008-03-27 01:45:11 +0000896/// regIsPICBase - Return true if register is PIC base (i.e.g defined by
897/// X86::MOVPC32r.
Dan Gohman8e5f2c62008-07-07 23:14:23 +0000898static bool regIsPICBase(unsigned BaseReg, const MachineRegisterInfo &MRI) {
Evan Chenge3d8dbf2008-03-27 01:45:11 +0000899 bool isPICBase = false;
900 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
901 E = MRI.def_end(); I != E; ++I) {
902 MachineInstr *DefMI = I.getOperand().getParent();
903 if (DefMI->getOpcode() != X86::MOVPC32r)
904 return false;
905 assert(!isPICBase && "More than one PIC base?");
906 isPICBase = true;
907 }
908 return isPICBase;
909}
Evan Cheng9d15abe2008-03-31 07:54:19 +0000910
Bill Wendling9f8fea32008-05-12 20:54:26 +0000911bool
Dan Gohman3731bc02009-10-10 00:34:18 +0000912X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI,
913 AliasAnalysis *AA) const {
Dan Gohmanc101e952007-06-14 20:50:44 +0000914 switch (MI->getOpcode()) {
915 default: break;
Evan Chenge771ebd2008-03-27 01:41:09 +0000916 case X86::MOV8rm:
917 case X86::MOV16rm:
Evan Chenge771ebd2008-03-27 01:41:09 +0000918 case X86::MOV32rm:
Evan Chenge771ebd2008-03-27 01:41:09 +0000919 case X86::MOV64rm:
920 case X86::LD_Fp64m:
921 case X86::MOVSSrm:
922 case X86::MOVSDrm:
923 case X86::MOVAPSrm:
Evan Cheng600c0432009-11-16 21:56:03 +0000924 case X86::MOVUPSrm:
Evan Chengd15ac2f2009-11-17 09:51:18 +0000925 case X86::MOVUPSrm_Int:
Evan Chenge771ebd2008-03-27 01:41:09 +0000926 case X86::MOVAPDrm:
Dan Gohman54462742009-01-09 02:40:34 +0000927 case X86::MOVDQArm:
Evan Chenge771ebd2008-03-27 01:41:09 +0000928 case X86::MMX_MOVD64rm:
Evan Chengd15ac2f2009-11-17 09:51:18 +0000929 case X86::MMX_MOVQ64rm:
930 case X86::FsMOVAPSrm:
931 case X86::FsMOVAPDrm: {
Evan Chenge771ebd2008-03-27 01:41:09 +0000932 // Loads from constant pools are trivially rematerializable.
Dan Gohmand735b802008-10-03 15:45:36 +0000933 if (MI->getOperand(1).isReg() &&
934 MI->getOperand(2).isImm() &&
935 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
Dan Gohman3731bc02009-10-10 00:34:18 +0000936 MI->isInvariantLoad(AA)) {
Evan Chenge771ebd2008-03-27 01:41:09 +0000937 unsigned BaseReg = MI->getOperand(1).getReg();
Chris Lattner18c59872009-06-27 04:16:01 +0000938 if (BaseReg == 0 || BaseReg == X86::RIP)
Evan Chenge771ebd2008-03-27 01:41:09 +0000939 return true;
940 // Allow re-materialization of PIC load.
Dan Gohmand735b802008-10-03 15:45:36 +0000941 if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())
Evan Chengffe2eb02008-04-01 23:26:12 +0000942 return false;
Dan Gohman8e5f2c62008-07-07 23:14:23 +0000943 const MachineFunction &MF = *MI->getParent()->getParent();
944 const MachineRegisterInfo &MRI = MF.getRegInfo();
Evan Chenge771ebd2008-03-27 01:41:09 +0000945 bool isPICBase = false;
946 for (MachineRegisterInfo::def_iterator I = MRI.def_begin(BaseReg),
947 E = MRI.def_end(); I != E; ++I) {
948 MachineInstr *DefMI = I.getOperand().getParent();
949 if (DefMI->getOpcode() != X86::MOVPC32r)
950 return false;
951 assert(!isPICBase && "More than one PIC base?");
952 isPICBase = true;
953 }
954 return isPICBase;
955 }
956 return false;
Evan Chengd8850a52008-02-22 09:25:47 +0000957 }
Evan Chenge771ebd2008-03-27 01:41:09 +0000958
959 case X86::LEA32r:
960 case X86::LEA64r: {
Dan Gohmand735b802008-10-03 15:45:36 +0000961 if (MI->getOperand(2).isImm() &&
962 MI->getOperand(3).isReg() && MI->getOperand(3).getReg() == 0 &&
963 !MI->getOperand(4).isReg()) {
Evan Chenge771ebd2008-03-27 01:41:09 +0000964 // lea fi#, lea GV, etc. are all rematerializable.
Dan Gohmand735b802008-10-03 15:45:36 +0000965 if (!MI->getOperand(1).isReg())
Dan Gohman83ccd142008-09-26 21:30:20 +0000966 return true;
Evan Chenge771ebd2008-03-27 01:41:09 +0000967 unsigned BaseReg = MI->getOperand(1).getReg();
968 if (BaseReg == 0)
969 return true;
970 // Allow re-materialization of lea PICBase + x.
Dan Gohman8e5f2c62008-07-07 23:14:23 +0000971 const MachineFunction &MF = *MI->getParent()->getParent();
972 const MachineRegisterInfo &MRI = MF.getRegInfo();
Evan Chenge3d8dbf2008-03-27 01:45:11 +0000973 return regIsPICBase(BaseReg, MRI);
Evan Chenge771ebd2008-03-27 01:41:09 +0000974 }
975 return false;
976 }
Dan Gohmanc101e952007-06-14 20:50:44 +0000977 }
Evan Chenge771ebd2008-03-27 01:41:09 +0000978
Dan Gohmand45eddd2007-06-26 00:48:07 +0000979 // All other instructions marked M_REMATERIALIZABLE are always trivially
980 // rematerializable.
981 return true;
Dan Gohmanc101e952007-06-14 20:50:44 +0000982}
983
Evan Cheng9ef4ca22008-06-24 07:10:51 +0000984/// isSafeToClobberEFLAGS - Return true if it's safe insert an instruction that
985/// would clobber the EFLAGS condition register. Note the result may be
986/// conservative. If it cannot definitely determine the safety after visiting
Dan Gohman1b1764b2009-10-14 00:08:59 +0000987/// a few instructions in each direction it assumes it's not safe.
Evan Cheng9ef4ca22008-06-24 07:10:51 +0000988static bool isSafeToClobberEFLAGS(MachineBasicBlock &MBB,
989 MachineBasicBlock::iterator I) {
Evan Cheng8d1f0dd2010-03-23 20:35:45 +0000990 MachineBasicBlock::iterator E = MBB.end();
991
Dan Gohman3afda6e2008-10-21 03:24:31 +0000992 // It's always safe to clobber EFLAGS at the end of a block.
Evan Cheng8d1f0dd2010-03-23 20:35:45 +0000993 if (I == E)
Dan Gohman3afda6e2008-10-21 03:24:31 +0000994 return true;
995
Evan Cheng9ef4ca22008-06-24 07:10:51 +0000996 // For compile time consideration, if we are not able to determine the
Dan Gohman1b1764b2009-10-14 00:08:59 +0000997 // safety after visiting 4 instructions in each direction, we will assume
998 // it's not safe.
999 MachineBasicBlock::iterator Iter = I;
1000 for (unsigned i = 0; i < 4; ++i) {
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001001 bool SeenDef = false;
Dan Gohman1b1764b2009-10-14 00:08:59 +00001002 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
1003 MachineOperand &MO = Iter->getOperand(j);
Dan Gohmand735b802008-10-03 15:45:36 +00001004 if (!MO.isReg())
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001005 continue;
1006 if (MO.getReg() == X86::EFLAGS) {
1007 if (MO.isUse())
1008 return false;
1009 SeenDef = true;
1010 }
1011 }
1012
1013 if (SeenDef)
1014 // This instruction defines EFLAGS, no need to look any further.
1015 return true;
Dan Gohman1b1764b2009-10-14 00:08:59 +00001016 ++Iter;
Evan Cheng8d1f0dd2010-03-23 20:35:45 +00001017 // Skip over DBG_VALUE.
1018 while (Iter != E && Iter->isDebugValue())
1019 ++Iter;
Dan Gohman3afda6e2008-10-21 03:24:31 +00001020
1021 // If we make it to the end of the block, it's safe to clobber EFLAGS.
Evan Cheng8d1f0dd2010-03-23 20:35:45 +00001022 if (Iter == E)
Dan Gohman1b1764b2009-10-14 00:08:59 +00001023 return true;
1024 }
1025
Evan Cheng8d1f0dd2010-03-23 20:35:45 +00001026 MachineBasicBlock::iterator B = MBB.begin();
Dan Gohman1b1764b2009-10-14 00:08:59 +00001027 Iter = I;
1028 for (unsigned i = 0; i < 4; ++i) {
1029 // If we make it to the beginning of the block, it's safe to clobber
1030 // EFLAGS iff EFLAGS is not live-in.
Evan Cheng8d1f0dd2010-03-23 20:35:45 +00001031 if (Iter == B)
Dan Gohman1b1764b2009-10-14 00:08:59 +00001032 return !MBB.isLiveIn(X86::EFLAGS);
1033
1034 --Iter;
Evan Cheng8d1f0dd2010-03-23 20:35:45 +00001035 // Skip over DBG_VALUE.
1036 while (Iter != B && Iter->isDebugValue())
1037 --Iter;
1038
Dan Gohman1b1764b2009-10-14 00:08:59 +00001039 bool SawKill = false;
1040 for (unsigned j = 0, e = Iter->getNumOperands(); j != e; ++j) {
1041 MachineOperand &MO = Iter->getOperand(j);
1042 if (MO.isReg() && MO.getReg() == X86::EFLAGS) {
1043 if (MO.isDef()) return MO.isDead();
1044 if (MO.isKill()) SawKill = true;
1045 }
1046 }
1047
1048 if (SawKill)
1049 // This instruction kills EFLAGS and doesn't redefine it, so
1050 // there's no need to look further.
Dan Gohman3afda6e2008-10-21 03:24:31 +00001051 return true;
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001052 }
1053
1054 // Conservative answer.
1055 return false;
1056}
1057
Evan Chengca1267c2008-03-31 20:40:39 +00001058void X86InstrInfo::reMaterialize(MachineBasicBlock &MBB,
1059 MachineBasicBlock::iterator I,
Evan Cheng37844532009-07-16 09:20:10 +00001060 unsigned DestReg, unsigned SubIdx,
Evan Chengd57cdd52009-11-14 02:55:43 +00001061 const MachineInstr *Orig,
Jakob Stoklund Olesen9edf7de2010-06-02 22:47:25 +00001062 const TargetRegisterInfo &TRI) const {
Dan Gohman0d881042010-05-07 01:28:10 +00001063 DebugLoc DL = Orig->getDebugLoc();
Bill Wendlingfbef3102009-02-11 21:51:19 +00001064
Evan Chengca1267c2008-03-31 20:40:39 +00001065 // MOV32r0 etc. are implemented with xor which clobbers condition code.
1066 // Re-materialize them as movri instructions to avoid side effects.
Evan Cheng37844532009-07-16 09:20:10 +00001067 bool Clone = true;
1068 unsigned Opc = Orig->getOpcode();
1069 switch (Opc) {
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001070 default: break;
Evan Chengca1267c2008-03-31 20:40:39 +00001071 case X86::MOV8r0:
Dan Gohmanf1b4d262010-01-12 04:42:54 +00001072 case X86::MOV16r0:
1073 case X86::MOV32r0:
1074 case X86::MOV64r0: {
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001075 if (!isSafeToClobberEFLAGS(MBB, I)) {
Evan Cheng37844532009-07-16 09:20:10 +00001076 switch (Opc) {
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001077 default: break;
1078 case X86::MOV8r0: Opc = X86::MOV8ri; break;
Dan Gohmanf1b4d262010-01-12 04:42:54 +00001079 case X86::MOV16r0: Opc = X86::MOV16ri; break;
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001080 case X86::MOV32r0: Opc = X86::MOV32ri; break;
Dan Gohman6fe0df22010-02-26 16:49:27 +00001081 case X86::MOV64r0: Opc = X86::MOV64ri64i32; break;
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001082 }
Evan Cheng37844532009-07-16 09:20:10 +00001083 Clone = false;
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001084 }
Evan Chengca1267c2008-03-31 20:40:39 +00001085 break;
Evan Cheng9ef4ca22008-06-24 07:10:51 +00001086 }
1087 }
1088
Evan Cheng37844532009-07-16 09:20:10 +00001089 if (Clone) {
Dan Gohman8e5f2c62008-07-07 23:14:23 +00001090 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
Evan Chengca1267c2008-03-31 20:40:39 +00001091 MBB.insert(I, MI);
Evan Cheng37844532009-07-16 09:20:10 +00001092 } else {
Jakob Stoklund Olesen9edf7de2010-06-02 22:47:25 +00001093 BuildMI(MBB, I, DL, get(Opc)).addOperand(Orig->getOperand(0)).addImm(0);
Evan Chengca1267c2008-03-31 20:40:39 +00001094 }
Evan Cheng03eb3882008-04-16 23:44:44 +00001095
Evan Cheng37844532009-07-16 09:20:10 +00001096 MachineInstr *NewMI = prior(I);
Jakob Stoklund Olesen9edf7de2010-06-02 22:47:25 +00001097 NewMI->substituteRegister(Orig->getOperand(0).getReg(), DestReg, SubIdx, TRI);
Evan Chengca1267c2008-03-31 20:40:39 +00001098}
1099
Evan Cheng3f411c72007-10-05 08:04:01 +00001100/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
1101/// is not marked dead.
1102static bool hasLiveCondCodeDef(MachineInstr *MI) {
Evan Cheng3f411c72007-10-05 08:04:01 +00001103 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1104 MachineOperand &MO = MI->getOperand(i);
Dan Gohmand735b802008-10-03 15:45:36 +00001105 if (MO.isReg() && MO.isDef() &&
Evan Cheng3f411c72007-10-05 08:04:01 +00001106 MO.getReg() == X86::EFLAGS && !MO.isDead()) {
1107 return true;
1108 }
1109 }
1110 return false;
1111}
1112
Evan Chengdd99f3a2009-12-12 20:03:14 +00001113/// convertToThreeAddressWithLEA - Helper for convertToThreeAddress when
Evan Cheng656e5142009-12-11 06:01:48 +00001114/// 16-bit LEA is disabled, use 32-bit LEA to form 3-address code by promoting
1115/// to a 32-bit superregister and then truncating back down to a 16-bit
1116/// subregister.
1117MachineInstr *
1118X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
1119 MachineFunction::iterator &MFI,
1120 MachineBasicBlock::iterator &MBBI,
1121 LiveVariables *LV) const {
1122 MachineInstr *MI = MBBI;
1123 unsigned Dest = MI->getOperand(0).getReg();
1124 unsigned Src = MI->getOperand(1).getReg();
1125 bool isDead = MI->getOperand(0).isDead();
1126 bool isKill = MI->getOperand(1).isKill();
1127
1128 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
1129 ? X86::LEA64_32r : X86::LEA32r;
1130 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001131 unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
Evan Cheng656e5142009-12-11 06:01:48 +00001132 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
1133
1134 // Build and insert into an implicit UNDEF value. This is OK because
1135 // well be shifting and then extracting the lower 16-bits.
Evan Chengdd99f3a2009-12-12 20:03:14 +00001136 // This has the potential to cause partial register stall. e.g.
Evan Cheng04ab19c2009-12-12 18:55:26 +00001137 // movw (%rbp,%rcx,2), %dx
1138 // leal -65(%rdx), %esi
Evan Chengdd99f3a2009-12-12 20:03:14 +00001139 // But testing has shown this *does* help performance in 64-bit mode (at
1140 // least on modern x86 machines).
Evan Cheng656e5142009-12-11 06:01:48 +00001141 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg);
1142 MachineInstr *InsMI =
Jakob Stoklund Olesen5c00e072010-07-08 16:40:15 +00001143 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
1144 .addReg(leaInReg, RegState::Define, X86::sub_16bit)
1145 .addReg(Src, getKillRegState(isKill));
Evan Cheng656e5142009-12-11 06:01:48 +00001146
1147 MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
1148 get(Opc), leaOutReg);
1149 switch (MIOpc) {
1150 default:
1151 llvm_unreachable(0);
1152 break;
1153 case X86::SHL16ri: {
1154 unsigned ShAmt = MI->getOperand(2).getImm();
1155 MIB.addReg(0).addImm(1 << ShAmt)
Chris Lattner599b5312010-07-08 23:46:44 +00001156 .addReg(leaInReg, RegState::Kill).addImm(0).addReg(0);
Evan Cheng656e5142009-12-11 06:01:48 +00001157 break;
1158 }
1159 case X86::INC16r:
1160 case X86::INC64_16r:
Chris Lattner599b5312010-07-08 23:46:44 +00001161 addRegOffset(MIB, leaInReg, true, 1);
Evan Cheng656e5142009-12-11 06:01:48 +00001162 break;
1163 case X86::DEC16r:
1164 case X86::DEC64_16r:
Chris Lattner599b5312010-07-08 23:46:44 +00001165 addRegOffset(MIB, leaInReg, true, -1);
Evan Cheng656e5142009-12-11 06:01:48 +00001166 break;
1167 case X86::ADD16ri:
1168 case X86::ADD16ri8:
Chris Lattner15df55d2010-10-08 03:57:25 +00001169 case X86::ADD16ri_DB:
1170 case X86::ADD16ri8_DB:
Chris Lattner599b5312010-07-08 23:46:44 +00001171 addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
Evan Cheng656e5142009-12-11 06:01:48 +00001172 break;
Chris Lattner99ae6652010-10-08 03:54:52 +00001173 case X86::ADD16rr:
1174 case X86::ADD16rr_DB: {
Evan Cheng656e5142009-12-11 06:01:48 +00001175 unsigned Src2 = MI->getOperand(2).getReg();
1176 bool isKill2 = MI->getOperand(2).isKill();
1177 unsigned leaInReg2 = 0;
1178 MachineInstr *InsMI2 = 0;
1179 if (Src == Src2) {
1180 // ADD16rr %reg1028<kill>, %reg1028
1181 // just a single insert_subreg.
1182 addRegReg(MIB, leaInReg, true, leaInReg, false);
1183 } else {
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001184 leaInReg2 = RegInfo.createVirtualRegister(&X86::GR32_NOSPRegClass);
Evan Cheng656e5142009-12-11 06:01:48 +00001185 // Build and insert into an implicit UNDEF value. This is OK because
1186 // well be shifting and then extracting the lower 16-bits.
1187 BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::IMPLICIT_DEF), leaInReg2);
1188 InsMI2 =
Jakob Stoklund Olesen5c00e072010-07-08 16:40:15 +00001189 BuildMI(*MFI, MIB, MI->getDebugLoc(), get(TargetOpcode::COPY))
1190 .addReg(leaInReg2, RegState::Define, X86::sub_16bit)
1191 .addReg(Src2, getKillRegState(isKill2));
Evan Cheng656e5142009-12-11 06:01:48 +00001192 addRegReg(MIB, leaInReg, true, leaInReg2, true);
1193 }
1194 if (LV && isKill2 && InsMI2)
1195 LV->replaceKillInstruction(Src2, MI, InsMI2);
1196 break;
1197 }
1198 }
1199
1200 MachineInstr *NewMI = MIB;
1201 MachineInstr *ExtMI =
Jakob Stoklund Olesen0bc25f42010-07-08 16:40:22 +00001202 BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(TargetOpcode::COPY))
Evan Cheng656e5142009-12-11 06:01:48 +00001203 .addReg(Dest, RegState::Define | getDeadRegState(isDead))
Jakob Stoklund Olesen0bc25f42010-07-08 16:40:22 +00001204 .addReg(leaOutReg, RegState::Kill, X86::sub_16bit);
Evan Cheng656e5142009-12-11 06:01:48 +00001205
1206 if (LV) {
1207 // Update live variables
1208 LV->getVarInfo(leaInReg).Kills.push_back(NewMI);
1209 LV->getVarInfo(leaOutReg).Kills.push_back(ExtMI);
1210 if (isKill)
1211 LV->replaceKillInstruction(Src, MI, InsMI);
1212 if (isDead)
1213 LV->replaceKillInstruction(Dest, MI, ExtMI);
1214 }
1215
1216 return ExtMI;
1217}
1218
Chris Lattnerbcea4d62005-01-02 02:37:07 +00001219/// convertToThreeAddress - This method must be implemented by targets that
1220/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
1221/// may be able to convert a two-address instruction into a true
1222/// three-address instruction on demand. This allows the X86 target (for
1223/// example) to convert ADD and SHL instructions into LEA instructions if they
1224/// would require register copies due to two-addressness.
1225///
1226/// This method returns a null pointer if the transformation cannot be
1227/// performed, otherwise it returns the new instruction.
1228///
Evan Cheng258ff672006-12-01 21:52:41 +00001229MachineInstr *
1230X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
1231 MachineBasicBlock::iterator &MBBI,
Owen Andersonf660c172008-07-02 23:41:07 +00001232 LiveVariables *LV) const {
Evan Cheng258ff672006-12-01 21:52:41 +00001233 MachineInstr *MI = MBBI;
Dan Gohman8e5f2c62008-07-07 23:14:23 +00001234 MachineFunction &MF = *MI->getParent()->getParent();
Chris Lattnerbcea4d62005-01-02 02:37:07 +00001235 // All instructions input are two-addr instructions. Get the known operands.
1236 unsigned Dest = MI->getOperand(0).getReg();
1237 unsigned Src = MI->getOperand(1).getReg();
Evan Cheng9f1c8312008-07-03 09:09:37 +00001238 bool isDead = MI->getOperand(0).isDead();
1239 bool isKill = MI->getOperand(1).isKill();
Chris Lattnerbcea4d62005-01-02 02:37:07 +00001240
Evan Cheng6ce7dc22006-11-15 20:58:11 +00001241 MachineInstr *NewMI = NULL;
Evan Cheng258ff672006-12-01 21:52:41 +00001242 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001243 // we have better subtarget support, enable the 16-bit LEA generation here.
Evan Chengdd99f3a2009-12-12 20:03:14 +00001244 // 16-bit LEA is also slow on Core2.
Evan Cheng258ff672006-12-01 21:52:41 +00001245 bool DisableLEA16 = true;
Evan Chengdd99f3a2009-12-12 20:03:14 +00001246 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
Evan Cheng258ff672006-12-01 21:52:41 +00001247
Evan Cheng559dc462007-10-05 20:34:26 +00001248 unsigned MIOpc = MI->getOpcode();
1249 switch (MIOpc) {
Evan Chengccba76b2006-05-30 20:26:50 +00001250 case X86::SHUFPSrri: {
1251 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001252 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
1253
Evan Chengaa3c1412006-05-30 21:45:53 +00001254 unsigned B = MI->getOperand(1).getReg();
1255 unsigned C = MI->getOperand(2).getReg();
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001256 if (B != C) return 0;
Evan Cheng9f1c8312008-07-03 09:09:37 +00001257 unsigned A = MI->getOperand(0).getReg();
1258 unsigned M = MI->getOperand(3).getImm();
Bill Wendlingfbef3102009-02-11 21:51:19 +00001259 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::PSHUFDri))
Bill Wendling587daed2009-05-13 21:33:08 +00001260 .addReg(A, RegState::Define | getDeadRegState(isDead))
1261 .addReg(B, getKillRegState(isKill)).addImm(M);
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001262 break;
1263 }
Chris Lattner995f5502007-03-28 18:12:31 +00001264 case X86::SHL64ri: {
Evan Cheng24f2ea32007-09-14 21:48:26 +00001265 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
Chris Lattner995f5502007-03-28 18:12:31 +00001266 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1267 // the flags produced by a shift yet, so this is safe.
Chris Lattner995f5502007-03-28 18:12:31 +00001268 unsigned ShAmt = MI->getOperand(2).getImm();
1269 if (ShAmt == 0 || ShAmt >= 4) return 0;
Evan Cheng9f1c8312008-07-03 09:09:37 +00001270
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001271 // LEA can't handle RSP.
1272 if (TargetRegisterInfo::isVirtualRegister(Src) &&
1273 !MF.getRegInfo().constrainRegClass(Src, &X86::GR64_NOSPRegClass))
1274 return 0;
1275
Bill Wendlingfbef3102009-02-11 21:51:19 +00001276 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
Bill Wendling587daed2009-05-13 21:33:08 +00001277 .addReg(Dest, RegState::Define | getDeadRegState(isDead))
1278 .addReg(0).addImm(1 << ShAmt)
1279 .addReg(Src, getKillRegState(isKill))
Chris Lattner599b5312010-07-08 23:46:44 +00001280 .addImm(0).addReg(0);
Chris Lattner995f5502007-03-28 18:12:31 +00001281 break;
1282 }
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001283 case X86::SHL32ri: {
Evan Cheng24f2ea32007-09-14 21:48:26 +00001284 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001285 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1286 // the flags produced by a shift yet, so this is safe.
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001287 unsigned ShAmt = MI->getOperand(2).getImm();
1288 if (ShAmt == 0 || ShAmt >= 4) return 0;
Evan Cheng9f1c8312008-07-03 09:09:37 +00001289
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001290 // LEA can't handle ESP.
1291 if (TargetRegisterInfo::isVirtualRegister(Src) &&
1292 !MF.getRegInfo().constrainRegClass(Src, &X86::GR32_NOSPRegClass))
1293 return 0;
1294
Evan Chengdd99f3a2009-12-12 20:03:14 +00001295 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
Bill Wendlingfbef3102009-02-11 21:51:19 +00001296 NewMI = BuildMI(MF, MI->getDebugLoc(), get(Opc))
Bill Wendling587daed2009-05-13 21:33:08 +00001297 .addReg(Dest, RegState::Define | getDeadRegState(isDead))
Evan Cheng9f1c8312008-07-03 09:09:37 +00001298 .addReg(0).addImm(1 << ShAmt)
Chris Lattner599b5312010-07-08 23:46:44 +00001299 .addReg(Src, getKillRegState(isKill)).addImm(0).addReg(0);
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001300 break;
1301 }
1302 case X86::SHL16ri: {
Evan Cheng24f2ea32007-09-14 21:48:26 +00001303 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
Evan Cheng61d9c862007-09-06 00:14:41 +00001304 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
1305 // the flags produced by a shift yet, so this is safe.
Evan Cheng61d9c862007-09-06 00:14:41 +00001306 unsigned ShAmt = MI->getOperand(2).getImm();
1307 if (ShAmt == 0 || ShAmt >= 4) return 0;
Evan Cheng9f1c8312008-07-03 09:09:37 +00001308
Evan Cheng656e5142009-12-11 06:01:48 +00001309 if (DisableLEA16)
Evan Chengdd99f3a2009-12-12 20:03:14 +00001310 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
Evan Cheng656e5142009-12-11 06:01:48 +00001311 NewMI = BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
1312 .addReg(Dest, RegState::Define | getDeadRegState(isDead))
1313 .addReg(0).addImm(1 << ShAmt)
1314 .addReg(Src, getKillRegState(isKill))
Chris Lattner599b5312010-07-08 23:46:44 +00001315 .addImm(0).addReg(0);
Chris Lattnera16b7cb2007-03-20 06:08:29 +00001316 break;
Evan Chengccba76b2006-05-30 20:26:50 +00001317 }
Evan Cheng559dc462007-10-05 20:34:26 +00001318 default: {
1319 // The following opcodes also sets the condition code register(s). Only
1320 // convert them to equivalent lea if the condition code register def's
1321 // are dead!
1322 if (hasLiveCondCodeDef(MI))
1323 return 0;
Evan Chengccba76b2006-05-30 20:26:50 +00001324
Evan Cheng559dc462007-10-05 20:34:26 +00001325 switch (MIOpc) {
1326 default: return 0;
1327 case X86::INC64r:
Dan Gohmancca29832009-01-06 23:34:46 +00001328 case X86::INC32r:
1329 case X86::INC64_32r: {
Evan Cheng559dc462007-10-05 20:34:26 +00001330 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
Evan Chengb76143c2007-10-09 07:14:53 +00001331 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
1332 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001333
1334 // LEA can't handle RSP.
1335 if (TargetRegisterInfo::isVirtualRegister(Src) &&
1336 !MF.getRegInfo().constrainRegClass(Src,
1337 MIOpc == X86::INC64r ? X86::GR64_NOSPRegisterClass :
1338 X86::GR32_NOSPRegisterClass))
1339 return 0;
1340
Chris Lattner599b5312010-07-08 23:46:44 +00001341 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
Bill Wendling587daed2009-05-13 21:33:08 +00001342 .addReg(Dest, RegState::Define |
1343 getDeadRegState(isDead)),
Rafael Espindola094fad32009-04-08 21:14:34 +00001344 Src, isKill, 1);
Evan Cheng559dc462007-10-05 20:34:26 +00001345 break;
Chris Lattnerbcea4d62005-01-02 02:37:07 +00001346 }
Evan Cheng559dc462007-10-05 20:34:26 +00001347 case X86::INC16r:
1348 case X86::INC64_16r:
Evan Cheng656e5142009-12-11 06:01:48 +00001349 if (DisableLEA16)
Evan Chengdd99f3a2009-12-12 20:03:14 +00001350 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
Evan Cheng559dc462007-10-05 20:34:26 +00001351 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
Bill Wendlingfbef3102009-02-11 21:51:19 +00001352 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
Bill Wendling587daed2009-05-13 21:33:08 +00001353 .addReg(Dest, RegState::Define |
1354 getDeadRegState(isDead)),
Evan Cheng9f1c8312008-07-03 09:09:37 +00001355 Src, isKill, 1);
Evan Cheng559dc462007-10-05 20:34:26 +00001356 break;
1357 case X86::DEC64r:
Dan Gohmancca29832009-01-06 23:34:46 +00001358 case X86::DEC32r:
1359 case X86::DEC64_32r: {
Evan Cheng559dc462007-10-05 20:34:26 +00001360 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
Evan Chengb76143c2007-10-09 07:14:53 +00001361 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
1362 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001363 // LEA can't handle RSP.
1364 if (TargetRegisterInfo::isVirtualRegister(Src) &&
1365 !MF.getRegInfo().constrainRegClass(Src,
1366 MIOpc == X86::DEC64r ? X86::GR64_NOSPRegisterClass :
1367 X86::GR32_NOSPRegisterClass))
1368 return 0;
1369
Chris Lattner599b5312010-07-08 23:46:44 +00001370 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
Bill Wendling587daed2009-05-13 21:33:08 +00001371 .addReg(Dest, RegState::Define |
1372 getDeadRegState(isDead)),
Rafael Espindola094fad32009-04-08 21:14:34 +00001373 Src, isKill, -1);
Evan Cheng559dc462007-10-05 20:34:26 +00001374 break;
1375 }
1376 case X86::DEC16r:
1377 case X86::DEC64_16r:
Evan Cheng656e5142009-12-11 06:01:48 +00001378 if (DisableLEA16)
Evan Chengdd99f3a2009-12-12 20:03:14 +00001379 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
Evan Cheng559dc462007-10-05 20:34:26 +00001380 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
Bill Wendlingfbef3102009-02-11 21:51:19 +00001381 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
Bill Wendling587daed2009-05-13 21:33:08 +00001382 .addReg(Dest, RegState::Define |
1383 getDeadRegState(isDead)),
Evan Cheng9f1c8312008-07-03 09:09:37 +00001384 Src, isKill, -1);
Evan Cheng559dc462007-10-05 20:34:26 +00001385 break;
1386 case X86::ADD64rr:
Chris Lattner99ae6652010-10-08 03:54:52 +00001387 case X86::ADD64rr_DB:
1388 case X86::ADD32rr:
1389 case X86::ADD32rr_DB: {
Evan Cheng559dc462007-10-05 20:34:26 +00001390 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
Chris Lattner99ae6652010-10-08 03:54:52 +00001391 unsigned Opc;
1392 TargetRegisterClass *RC;
1393 if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
1394 Opc = X86::LEA64r;
1395 RC = X86::GR64_NOSPRegisterClass;
1396 } else {
1397 Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
1398 RC = X86::GR32_NOSPRegisterClass;
1399 }
1400
1401
Evan Cheng9f1c8312008-07-03 09:09:37 +00001402 unsigned Src2 = MI->getOperand(2).getReg();
1403 bool isKill2 = MI->getOperand(2).isKill();
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001404
1405 // LEA can't handle RSP.
1406 if (TargetRegisterInfo::isVirtualRegister(Src2) &&
Chris Lattner99ae6652010-10-08 03:54:52 +00001407 !MF.getRegInfo().constrainRegClass(Src2, RC))
Jakob Stoklund Olesen635127a2010-10-07 00:07:26 +00001408 return 0;
1409
Bill Wendlingfbef3102009-02-11 21:51:19 +00001410 NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
Bill Wendling587daed2009-05-13 21:33:08 +00001411 .addReg(Dest, RegState::Define |
1412 getDeadRegState(isDead)),
Evan Cheng9f1c8312008-07-03 09:09:37 +00001413 Src, isKill, Src2, isKill2);
1414 if (LV && isKill2)
1415 LV->replaceKillInstruction(Src2, MI, NewMI);
Evan Cheng559dc462007-10-05 20:34:26 +00001416 break;
1417 }
Chris Lattner99ae6652010-10-08 03:54:52 +00001418 case X86::ADD16rr:
1419 case X86::ADD16rr_DB: {
Evan Cheng656e5142009-12-11 06:01:48 +00001420 if (DisableLEA16)
Evan Chengdd99f3a2009-12-12 20:03:14 +00001421 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
Evan Cheng559dc462007-10-05 20:34:26 +00001422 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
Evan Cheng9f1c8312008-07-03 09:09:37 +00001423 unsigned Src2 = MI->getOperand(2).getReg();
1424 bool isKill2 = MI->getOperand(2).isKill();
Bill Wendlingfbef3102009-02-11 21:51:19 +00001425 NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
Bill Wendling587daed2009-05-13 21:33:08 +00001426 .addReg(Dest, RegState::Define |
1427 getDeadRegState(isDead)),
Evan Cheng9f1c8312008-07-03 09:09:37 +00001428 Src, isKill, Src2, isKill2);
1429 if (LV && isKill2)
1430 LV->replaceKillInstruction(Src2, MI, NewMI);
Evan Cheng559dc462007-10-05 20:34:26 +00001431 break;
Evan Cheng9f1c8312008-07-03 09:09:37 +00001432 }
Evan Cheng559dc462007-10-05 20:34:26 +00001433 case X86::ADD64ri32:
1434 case X86::ADD64ri8:
Chris Lattner15df55d2010-10-08 03:57:25 +00001435 case X86::ADD64ri32_DB:
1436 case X86::ADD64ri8_DB:
Evan Cheng559dc462007-10-05 20:34:26 +00001437 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
Chris Lattner599b5312010-07-08 23:46:44 +00001438 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA64r))
Evan Cheng656e5142009-12-11 06:01:48 +00001439 .addReg(Dest, RegState::Define |
1440 getDeadRegState(isDead)),
1441 Src, isKill, MI->getOperand(2).getImm());
Evan Cheng559dc462007-10-05 20:34:26 +00001442 break;
1443 case X86::ADD32ri:
Chris Lattner15df55d2010-10-08 03:57:25 +00001444 case X86::ADD32ri8:
1445 case X86::ADD32ri_DB:
1446 case X86::ADD32ri8_DB: {
Evan Cheng559dc462007-10-05 20:34:26 +00001447 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
Evan Cheng656e5142009-12-11 06:01:48 +00001448 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
Chris Lattner599b5312010-07-08 23:46:44 +00001449 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(Opc))
Evan Cheng656e5142009-12-11 06:01:48 +00001450 .addReg(Dest, RegState::Define |
1451 getDeadRegState(isDead)),
Rafael Espindola094fad32009-04-08 21:14:34 +00001452 Src, isKill, MI->getOperand(2).getImm());
Evan Cheng559dc462007-10-05 20:34:26 +00001453 break;
1454 }
Evan Cheng656e5142009-12-11 06:01:48 +00001455 case X86::ADD16ri:
1456 case X86::ADD16ri8:
Chris Lattner15df55d2010-10-08 03:57:25 +00001457 case X86::ADD16ri_DB:
1458 case X86::ADD16ri8_DB:
Evan Cheng656e5142009-12-11 06:01:48 +00001459 if (DisableLEA16)
Evan Chengdd99f3a2009-12-12 20:03:14 +00001460 return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
Evan Cheng656e5142009-12-11 06:01:48 +00001461 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
Chris Lattner599b5312010-07-08 23:46:44 +00001462 NewMI = addRegOffset(BuildMI(MF, MI->getDebugLoc(), get(X86::LEA16r))
Evan Cheng656e5142009-12-11 06:01:48 +00001463 .addReg(Dest, RegState::Define |
1464 getDeadRegState(isDead)),
1465 Src, isKill, MI->getOperand(2).getImm());
1466 break;
Evan Cheng559dc462007-10-05 20:34:26 +00001467 }
1468 }
Chris Lattnerbcea4d62005-01-02 02:37:07 +00001469 }
1470
Evan Cheng15246732008-02-07 08:29:53 +00001471 if (!NewMI) return 0;
1472
Evan Cheng9f1c8312008-07-03 09:09:37 +00001473 if (LV) { // Update live variables
1474 if (isKill)
1475 LV->replaceKillInstruction(Src, MI, NewMI);
1476 if (isDead)
1477 LV->replaceKillInstruction(Dest, MI, NewMI);
1478 }
1479
Evan Cheng559dc462007-10-05 20:34:26 +00001480 MFI->insert(MBBI, NewMI); // Insert the new inst
Evan Cheng6ce7dc22006-11-15 20:58:11 +00001481 return NewMI;
Chris Lattnerbcea4d62005-01-02 02:37:07 +00001482}
1483
Chris Lattner41e431b2005-01-19 07:11:01 +00001484/// commuteInstruction - We have a few instructions that must be hacked on to
1485/// commute them.
1486///
Evan Cheng58dcb0e2008-06-16 07:33:11 +00001487MachineInstr *
1488X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
Chris Lattner41e431b2005-01-19 07:11:01 +00001489 switch (MI->getOpcode()) {
Chris Lattner0df53d22005-01-19 07:31:24 +00001490 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
1491 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
Chris Lattner41e431b2005-01-19 07:11:01 +00001492 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
Dan Gohmane47f1f92007-09-14 23:17:45 +00001493 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
1494 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
1495 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
Chris Lattner0df53d22005-01-19 07:31:24 +00001496 unsigned Opc;
1497 unsigned Size;
1498 switch (MI->getOpcode()) {
Torok Edwinc23197a2009-07-14 16:55:14 +00001499 default: llvm_unreachable("Unreachable!");
Chris Lattner0df53d22005-01-19 07:31:24 +00001500 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
1501 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
1502 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
1503 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
Dan Gohmane47f1f92007-09-14 23:17:45 +00001504 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
1505 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
Chris Lattner0df53d22005-01-19 07:31:24 +00001506 }
Chris Lattner9a1ceae2007-12-30 20:49:49 +00001507 unsigned Amt = MI->getOperand(3).getImm();
Dan Gohman74feef22008-10-17 01:23:35 +00001508 if (NewMI) {
1509 MachineFunction &MF = *MI->getParent()->getParent();
1510 MI = MF.CloneMachineInstr(MI);
1511 NewMI = false;
Evan Chenga4d16a12008-02-13 02:46:49 +00001512 }
Dan Gohman74feef22008-10-17 01:23:35 +00001513 MI->setDesc(get(Opc));
1514 MI->getOperand(3).setImm(Size-Amt);
1515 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
Chris Lattner41e431b2005-01-19 07:11:01 +00001516 }
Evan Cheng7ad42d92007-10-05 23:13:21 +00001517 case X86::CMOVB16rr:
1518 case X86::CMOVB32rr:
1519 case X86::CMOVB64rr:
1520 case X86::CMOVAE16rr:
1521 case X86::CMOVAE32rr:
1522 case X86::CMOVAE64rr:
1523 case X86::CMOVE16rr:
1524 case X86::CMOVE32rr:
1525 case X86::CMOVE64rr:
1526 case X86::CMOVNE16rr:
1527 case X86::CMOVNE32rr:
1528 case X86::CMOVNE64rr:
Chris Lattner25cbf502010-10-05 23:00:14 +00001529 case X86::CMOVBE16rr:
1530 case X86::CMOVBE32rr:
1531 case X86::CMOVBE64rr:
Evan Cheng7ad42d92007-10-05 23:13:21 +00001532 case X86::CMOVA16rr:
1533 case X86::CMOVA32rr:
1534 case X86::CMOVA64rr:
1535 case X86::CMOVL16rr:
1536 case X86::CMOVL32rr:
1537 case X86::CMOVL64rr:
1538 case X86::CMOVGE16rr:
1539 case X86::CMOVGE32rr:
1540 case X86::CMOVGE64rr:
1541 case X86::CMOVLE16rr:
1542 case X86::CMOVLE32rr:
1543 case X86::CMOVLE64rr:
1544 case X86::CMOVG16rr:
1545 case X86::CMOVG32rr:
1546 case X86::CMOVG64rr:
1547 case X86::CMOVS16rr:
1548 case X86::CMOVS32rr:
1549 case X86::CMOVS64rr:
1550 case X86::CMOVNS16rr:
1551 case X86::CMOVNS32rr:
1552 case X86::CMOVNS64rr:
1553 case X86::CMOVP16rr:
1554 case X86::CMOVP32rr:
1555 case X86::CMOVP64rr:
1556 case X86::CMOVNP16rr:
1557 case X86::CMOVNP32rr:
Dan Gohman305fceb2009-01-07 00:35:10 +00001558 case X86::CMOVNP64rr:
1559 case X86::CMOVO16rr:
1560 case X86::CMOVO32rr:
1561 case X86::CMOVO64rr:
1562 case X86::CMOVNO16rr:
1563 case X86::CMOVNO32rr:
1564 case X86::CMOVNO64rr: {
Evan Cheng7ad42d92007-10-05 23:13:21 +00001565 unsigned Opc = 0;
1566 switch (MI->getOpcode()) {
1567 default: break;
1568 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
1569 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
1570 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
1571 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
1572 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
1573 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
1574 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
1575 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
1576 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
1577 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
1578 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
1579 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
Chris Lattner25cbf502010-10-05 23:00:14 +00001580 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
1581 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
1582 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
1583 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
1584 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
1585 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
Evan Cheng7ad42d92007-10-05 23:13:21 +00001586 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
1587 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
1588 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
1589 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
1590 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
1591 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
1592 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
1593 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
1594 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
1595 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
1596 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
1597 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
1598 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
1599 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
Mon P Wang0bd07fc2009-04-18 05:16:01 +00001600 case X86::CMOVS64rr: Opc = X86::CMOVNS64rr; break;
Evan Cheng7ad42d92007-10-05 23:13:21 +00001601 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
1602 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
1603 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
1604 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
1605 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
Mon P Wang0bd07fc2009-04-18 05:16:01 +00001606 case X86::CMOVP64rr: Opc = X86::CMOVNP64rr; break;
Evan Cheng7ad42d92007-10-05 23:13:21 +00001607 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
1608 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
1609 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
Dan Gohman305fceb2009-01-07 00:35:10 +00001610 case X86::CMOVO16rr: Opc = X86::CMOVNO16rr; break;
1611 case X86::CMOVO32rr: Opc = X86::CMOVNO32rr; break;
Mon P Wang0bd07fc2009-04-18 05:16:01 +00001612 case X86::CMOVO64rr: Opc = X86::CMOVNO64rr; break;
Dan Gohman305fceb2009-01-07 00:35:10 +00001613 case X86::CMOVNO16rr: Opc = X86::CMOVO16rr; break;
1614 case X86::CMOVNO32rr: Opc = X86::CMOVO32rr; break;
1615 case X86::CMOVNO64rr: Opc = X86::CMOVO64rr; break;
Evan Cheng7ad42d92007-10-05 23:13:21 +00001616 }
Dan Gohman74feef22008-10-17 01:23:35 +00001617 if (NewMI) {
1618 MachineFunction &MF = *MI->getParent()->getParent();
1619 MI = MF.CloneMachineInstr(MI);
1620 NewMI = false;
1621 }
Chris Lattner5080f4d2008-01-11 18:10:50 +00001622 MI->setDesc(get(Opc));
Evan Cheng7ad42d92007-10-05 23:13:21 +00001623 // Fallthrough intended.
1624 }
Chris Lattner41e431b2005-01-19 07:11:01 +00001625 default:
Evan Cheng58dcb0e2008-06-16 07:33:11 +00001626 return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
Chris Lattner41e431b2005-01-19 07:11:01 +00001627 }
1628}
1629
Chris Lattner7fbe9722006-10-20 17:42:20 +00001630static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
1631 switch (BrOpc) {
1632 default: return X86::COND_INVALID;
Chris Lattnerbd13fb62010-02-11 19:25:55 +00001633 case X86::JE_4: return X86::COND_E;
1634 case X86::JNE_4: return X86::COND_NE;
1635 case X86::JL_4: return X86::COND_L;
1636 case X86::JLE_4: return X86::COND_LE;
1637 case X86::JG_4: return X86::COND_G;
1638 case X86::JGE_4: return X86::COND_GE;
1639 case X86::JB_4: return X86::COND_B;
1640 case X86::JBE_4: return X86::COND_BE;
1641 case X86::JA_4: return X86::COND_A;
1642 case X86::JAE_4: return X86::COND_AE;
1643 case X86::JS_4: return X86::COND_S;
1644 case X86::JNS_4: return X86::COND_NS;
1645 case X86::JP_4: return X86::COND_P;
1646 case X86::JNP_4: return X86::COND_NP;
1647 case X86::JO_4: return X86::COND_O;
1648 case X86::JNO_4: return X86::COND_NO;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001649 }
1650}
1651
1652unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
1653 switch (CC) {
Torok Edwinc23197a2009-07-14 16:55:14 +00001654 default: llvm_unreachable("Illegal condition code!");
Chris Lattnerbd13fb62010-02-11 19:25:55 +00001655 case X86::COND_E: return X86::JE_4;
1656 case X86::COND_NE: return X86::JNE_4;
1657 case X86::COND_L: return X86::JL_4;
1658 case X86::COND_LE: return X86::JLE_4;
1659 case X86::COND_G: return X86::JG_4;
1660 case X86::COND_GE: return X86::JGE_4;
1661 case X86::COND_B: return X86::JB_4;
1662 case X86::COND_BE: return X86::JBE_4;
1663 case X86::COND_A: return X86::JA_4;
1664 case X86::COND_AE: return X86::JAE_4;
1665 case X86::COND_S: return X86::JS_4;
1666 case X86::COND_NS: return X86::JNS_4;
1667 case X86::COND_P: return X86::JP_4;
1668 case X86::COND_NP: return X86::JNP_4;
1669 case X86::COND_O: return X86::JO_4;
1670 case X86::COND_NO: return X86::JNO_4;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001671 }
1672}
1673
Chris Lattner9cd68752006-10-21 05:52:40 +00001674/// GetOppositeBranchCondition - Return the inverse of the specified condition,
1675/// e.g. turning COND_E to COND_NE.
1676X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
1677 switch (CC) {
Torok Edwinc23197a2009-07-14 16:55:14 +00001678 default: llvm_unreachable("Illegal condition code!");
Chris Lattner9cd68752006-10-21 05:52:40 +00001679 case X86::COND_E: return X86::COND_NE;
1680 case X86::COND_NE: return X86::COND_E;
1681 case X86::COND_L: return X86::COND_GE;
1682 case X86::COND_LE: return X86::COND_G;
1683 case X86::COND_G: return X86::COND_LE;
1684 case X86::COND_GE: return X86::COND_L;
1685 case X86::COND_B: return X86::COND_AE;
1686 case X86::COND_BE: return X86::COND_A;
1687 case X86::COND_A: return X86::COND_BE;
1688 case X86::COND_AE: return X86::COND_B;
1689 case X86::COND_S: return X86::COND_NS;
1690 case X86::COND_NS: return X86::COND_S;
1691 case X86::COND_P: return X86::COND_NP;
1692 case X86::COND_NP: return X86::COND_P;
1693 case X86::COND_O: return X86::COND_NO;
1694 case X86::COND_NO: return X86::COND_O;
1695 }
1696}
1697
Dale Johannesen318093b2007-06-14 22:03:45 +00001698bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
Chris Lattner749c6f62008-01-07 07:27:27 +00001699 const TargetInstrDesc &TID = MI->getDesc();
1700 if (!TID.isTerminator()) return false;
Chris Lattner69244302008-01-07 01:56:04 +00001701
1702 // Conditional branch is a special case.
Chris Lattner749c6f62008-01-07 07:27:27 +00001703 if (TID.isBranch() && !TID.isBarrier())
Chris Lattner69244302008-01-07 01:56:04 +00001704 return true;
Chris Lattner749c6f62008-01-07 07:27:27 +00001705 if (!TID.isPredicable())
Chris Lattner69244302008-01-07 01:56:04 +00001706 return true;
1707 return !isPredicated(MI);
Dale Johannesen318093b2007-06-14 22:03:45 +00001708}
Chris Lattner9cd68752006-10-21 05:52:40 +00001709
Chris Lattner7fbe9722006-10-20 17:42:20 +00001710bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
1711 MachineBasicBlock *&TBB,
1712 MachineBasicBlock *&FBB,
Evan Chengdc54d312009-02-09 07:14:22 +00001713 SmallVectorImpl<MachineOperand> &Cond,
1714 bool AllowModify) const {
Dan Gohman279c22e2008-10-21 03:29:32 +00001715 // Start from the bottom of the block and work up, examining the
1716 // terminator instructions.
Chris Lattner7fbe9722006-10-20 17:42:20 +00001717 MachineBasicBlock::iterator I = MBB.end();
Evan Chengfc5a03e2010-04-13 18:50:27 +00001718 MachineBasicBlock::iterator UnCondBrIter = MBB.end();
Dan Gohman279c22e2008-10-21 03:29:32 +00001719 while (I != MBB.begin()) {
1720 --I;
Dale Johannesen93d6a7e2010-04-02 01:38:09 +00001721 if (I->isDebugValue())
1722 continue;
Bill Wendling85de1e52009-12-14 06:51:19 +00001723
1724 // Working from the bottom, when we see a non-terminator instruction, we're
1725 // done.
Jakob Stoklund Olesen468a2a42010-07-16 17:41:44 +00001726 if (!isUnpredicatedTerminator(I))
Dan Gohman279c22e2008-10-21 03:29:32 +00001727 break;
Bill Wendling85de1e52009-12-14 06:51:19 +00001728
1729 // A terminator that isn't a branch can't easily be handled by this
1730 // analysis.
Dan Gohman279c22e2008-10-21 03:29:32 +00001731 if (!I->getDesc().isBranch())
Chris Lattner7fbe9722006-10-20 17:42:20 +00001732 return true;
Bill Wendling85de1e52009-12-14 06:51:19 +00001733
Dan Gohman279c22e2008-10-21 03:29:32 +00001734 // Handle unconditional branches.
Chris Lattnerbd13fb62010-02-11 19:25:55 +00001735 if (I->getOpcode() == X86::JMP_4) {
Evan Chengfc5a03e2010-04-13 18:50:27 +00001736 UnCondBrIter = I;
1737
Evan Chengdc54d312009-02-09 07:14:22 +00001738 if (!AllowModify) {
1739 TBB = I->getOperand(0).getMBB();
Evan Cheng45e00102009-05-08 06:34:09 +00001740 continue;
Evan Chengdc54d312009-02-09 07:14:22 +00001741 }
1742
Dan Gohman279c22e2008-10-21 03:29:32 +00001743 // If the block has any instructions after a JMP, delete them.
Chris Lattner7896c9f2009-12-03 00:50:42 +00001744 while (llvm::next(I) != MBB.end())
1745 llvm::next(I)->eraseFromParent();
Bill Wendling85de1e52009-12-14 06:51:19 +00001746
Dan Gohman279c22e2008-10-21 03:29:32 +00001747 Cond.clear();
1748 FBB = 0;
Bill Wendling85de1e52009-12-14 06:51:19 +00001749
Dan Gohman279c22e2008-10-21 03:29:32 +00001750 // Delete the JMP if it's equivalent to a fall-through.
1751 if (MBB.isLayoutSuccessor(I->getOperand(0).getMBB())) {
1752 TBB = 0;
1753 I->eraseFromParent();
1754 I = MBB.end();
Evan Chengfc5a03e2010-04-13 18:50:27 +00001755 UnCondBrIter = MBB.end();
Dan Gohman279c22e2008-10-21 03:29:32 +00001756 continue;
1757 }
Bill Wendling85de1e52009-12-14 06:51:19 +00001758
Evan Chengfc5a03e2010-04-13 18:50:27 +00001759 // TBB is used to indicate the unconditional destination.
Dan Gohman279c22e2008-10-21 03:29:32 +00001760 TBB = I->getOperand(0).getMBB();
1761 continue;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001762 }
Bill Wendling85de1e52009-12-14 06:51:19 +00001763
Dan Gohman279c22e2008-10-21 03:29:32 +00001764 // Handle conditional branches.
1765 X86::CondCode BranchCode = GetCondFromBranchOpc(I->getOpcode());
Chris Lattner7fbe9722006-10-20 17:42:20 +00001766 if (BranchCode == X86::COND_INVALID)
1767 return true; // Can't handle indirect branch.
Bill Wendling85de1e52009-12-14 06:51:19 +00001768
Dan Gohman279c22e2008-10-21 03:29:32 +00001769 // Working from the bottom, handle the first conditional branch.
1770 if (Cond.empty()) {
Evan Chengfc5a03e2010-04-13 18:50:27 +00001771 MachineBasicBlock *TargetBB = I->getOperand(0).getMBB();
1772 if (AllowModify && UnCondBrIter != MBB.end() &&
1773 MBB.isLayoutSuccessor(TargetBB)) {
1774 // If we can modify the code and it ends in something like:
1775 //
1776 // jCC L1
1777 // jmp L2
1778 // L1:
1779 // ...
1780 // L2:
1781 //
1782 // Then we can change this to:
1783 //
1784 // jnCC L2
1785 // L1:
1786 // ...
1787 // L2:
1788 //
1789 // Which is a bit more efficient.
1790 // We conditionally jump to the fall-through block.
1791 BranchCode = GetOppositeBranchCondition(BranchCode);
1792 unsigned JNCC = GetCondBranchFromCond(BranchCode);
1793 MachineBasicBlock::iterator OldInst = I;
1794
1795 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(JNCC))
1796 .addMBB(UnCondBrIter->getOperand(0).getMBB());
1797 BuildMI(MBB, UnCondBrIter, MBB.findDebugLoc(I), get(X86::JMP_4))
1798 .addMBB(TargetBB);
1799 MBB.addSuccessor(TargetBB);
1800
1801 OldInst->eraseFromParent();
1802 UnCondBrIter->eraseFromParent();
1803
1804 // Restart the analysis.
1805 UnCondBrIter = MBB.end();
1806 I = MBB.end();
1807 continue;
1808 }
1809
Dan Gohman279c22e2008-10-21 03:29:32 +00001810 FBB = TBB;
1811 TBB = I->getOperand(0).getMBB();
1812 Cond.push_back(MachineOperand::CreateImm(BranchCode));
1813 continue;
1814 }
Bill Wendling85de1e52009-12-14 06:51:19 +00001815
1816 // Handle subsequent conditional branches. Only handle the case where all
1817 // conditional branches branch to the same destination and their condition
1818 // opcodes fit one of the special multi-branch idioms.
Dan Gohman279c22e2008-10-21 03:29:32 +00001819 assert(Cond.size() == 1);
1820 assert(TBB);
Bill Wendling85de1e52009-12-14 06:51:19 +00001821
1822 // Only handle the case where all conditional branches branch to the same
1823 // destination.
Dan Gohman279c22e2008-10-21 03:29:32 +00001824 if (TBB != I->getOperand(0).getMBB())
1825 return true;
Bill Wendling85de1e52009-12-14 06:51:19 +00001826
Dan Gohman279c22e2008-10-21 03:29:32 +00001827 // If the conditions are the same, we can leave them alone.
Bill Wendling85de1e52009-12-14 06:51:19 +00001828 X86::CondCode OldBranchCode = (X86::CondCode)Cond[0].getImm();
Dan Gohman279c22e2008-10-21 03:29:32 +00001829 if (OldBranchCode == BranchCode)
1830 continue;
Bill Wendling85de1e52009-12-14 06:51:19 +00001831
1832 // If they differ, see if they fit one of the known patterns. Theoretically,
1833 // we could handle more patterns here, but we shouldn't expect to see them
1834 // if instruction selection has done a reasonable job.
Dan Gohman279c22e2008-10-21 03:29:32 +00001835 if ((OldBranchCode == X86::COND_NP &&
1836 BranchCode == X86::COND_E) ||
1837 (OldBranchCode == X86::COND_E &&
1838 BranchCode == X86::COND_NP))
1839 BranchCode = X86::COND_NP_OR_E;
1840 else if ((OldBranchCode == X86::COND_P &&
1841 BranchCode == X86::COND_NE) ||
1842 (OldBranchCode == X86::COND_NE &&
1843 BranchCode == X86::COND_P))
1844 BranchCode = X86::COND_NE_OR_P;
1845 else
1846 return true;
Bill Wendling85de1e52009-12-14 06:51:19 +00001847
Dan Gohman279c22e2008-10-21 03:29:32 +00001848 // Update the MachineOperand.
1849 Cond[0].setImm(BranchCode);
Chris Lattner6ce64432006-10-30 22:27:23 +00001850 }
Chris Lattner7fbe9722006-10-20 17:42:20 +00001851
Dan Gohman279c22e2008-10-21 03:29:32 +00001852 return false;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001853}
1854
Evan Cheng6ae36262007-05-18 00:18:17 +00001855unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
Chris Lattner7fbe9722006-10-20 17:42:20 +00001856 MachineBasicBlock::iterator I = MBB.end();
Dan Gohman279c22e2008-10-21 03:29:32 +00001857 unsigned Count = 0;
1858
1859 while (I != MBB.begin()) {
1860 --I;
Dale Johannesen93d6a7e2010-04-02 01:38:09 +00001861 if (I->isDebugValue())
1862 continue;
Chris Lattnerbd13fb62010-02-11 19:25:55 +00001863 if (I->getOpcode() != X86::JMP_4 &&
Dan Gohman279c22e2008-10-21 03:29:32 +00001864 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
1865 break;
1866 // Remove the branch.
1867 I->eraseFromParent();
1868 I = MBB.end();
1869 ++Count;
1870 }
Chris Lattner7fbe9722006-10-20 17:42:20 +00001871
Dan Gohman279c22e2008-10-21 03:29:32 +00001872 return Count;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001873}
1874
Evan Cheng6ae36262007-05-18 00:18:17 +00001875unsigned
1876X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
1877 MachineBasicBlock *FBB,
Stuart Hastings3bf91252010-06-17 22:43:56 +00001878 const SmallVectorImpl<MachineOperand> &Cond,
1879 DebugLoc DL) const {
Chris Lattner7fbe9722006-10-20 17:42:20 +00001880 // Shouldn't be a fall through.
1881 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
Chris Lattner34a84ac2006-10-21 05:34:23 +00001882 assert((Cond.size() == 1 || Cond.size() == 0) &&
1883 "X86 branch conditions have one component!");
1884
Dan Gohman279c22e2008-10-21 03:29:32 +00001885 if (Cond.empty()) {
1886 // Unconditional branch?
1887 assert(!FBB && "Unconditional branch with multiple successors!");
Stuart Hastings3bf91252010-06-17 22:43:56 +00001888 BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(TBB);
Evan Cheng6ae36262007-05-18 00:18:17 +00001889 return 1;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001890 }
Dan Gohman279c22e2008-10-21 03:29:32 +00001891
1892 // Conditional branch.
1893 unsigned Count = 0;
1894 X86::CondCode CC = (X86::CondCode)Cond[0].getImm();
1895 switch (CC) {
1896 case X86::COND_NP_OR_E:
1897 // Synthesize NP_OR_E with two branches.
Stuart Hastings3bf91252010-06-17 22:43:56 +00001898 BuildMI(&MBB, DL, get(X86::JNP_4)).addMBB(TBB);
Bill Wendling18ce64e2010-03-05 00:33:59 +00001899 ++Count;
Stuart Hastings3bf91252010-06-17 22:43:56 +00001900 BuildMI(&MBB, DL, get(X86::JE_4)).addMBB(TBB);
Bill Wendling18ce64e2010-03-05 00:33:59 +00001901 ++Count;
Dan Gohman279c22e2008-10-21 03:29:32 +00001902 break;
1903 case X86::COND_NE_OR_P:
1904 // Synthesize NE_OR_P with two branches.
Stuart Hastings3bf91252010-06-17 22:43:56 +00001905 BuildMI(&MBB, DL, get(X86::JNE_4)).addMBB(TBB);
Bill Wendling18ce64e2010-03-05 00:33:59 +00001906 ++Count;
Stuart Hastings3bf91252010-06-17 22:43:56 +00001907 BuildMI(&MBB, DL, get(X86::JP_4)).addMBB(TBB);
Bill Wendling18ce64e2010-03-05 00:33:59 +00001908 ++Count;
Dan Gohman279c22e2008-10-21 03:29:32 +00001909 break;
Bill Wendling18ce64e2010-03-05 00:33:59 +00001910 default: {
1911 unsigned Opc = GetCondBranchFromCond(CC);
Stuart Hastings3bf91252010-06-17 22:43:56 +00001912 BuildMI(&MBB, DL, get(Opc)).addMBB(TBB);
Bill Wendling18ce64e2010-03-05 00:33:59 +00001913 ++Count;
Dan Gohman279c22e2008-10-21 03:29:32 +00001914 }
Bill Wendling18ce64e2010-03-05 00:33:59 +00001915 }
Dan Gohman279c22e2008-10-21 03:29:32 +00001916 if (FBB) {
1917 // Two-way Conditional branch. Insert the second branch.
Stuart Hastings3bf91252010-06-17 22:43:56 +00001918 BuildMI(&MBB, DL, get(X86::JMP_4)).addMBB(FBB);
Dan Gohman279c22e2008-10-21 03:29:32 +00001919 ++Count;
1920 }
1921 return Count;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001922}
1923
Dan Gohman6d9305c2009-04-15 00:04:23 +00001924/// isHReg - Test if the given register is a physical h register.
1925static bool isHReg(unsigned Reg) {
Dan Gohman4af325d2009-04-27 16:41:36 +00001926 return X86::GR8_ABCD_HRegClass.contains(Reg);
Dan Gohman6d9305c2009-04-15 00:04:23 +00001927}
1928
Anton Korobeynikovc52bedb2010-08-27 14:43:06 +00001929// Try and copy between VR128/VR64 and GR64 registers.
1930static unsigned CopyToFromAsymmetricReg(unsigned DestReg, unsigned SrcReg) {
1931 // SrcReg(VR128) -> DestReg(GR64)
1932 // SrcReg(VR64) -> DestReg(GR64)
1933 // SrcReg(GR64) -> DestReg(VR128)
1934 // SrcReg(GR64) -> DestReg(VR64)
1935
1936 if (X86::GR64RegClass.contains(DestReg)) {
1937 if (X86::VR128RegClass.contains(SrcReg)) {
1938 // Copy from a VR128 register to a GR64 register.
1939 return X86::MOVPQIto64rr;
1940 } else if (X86::VR64RegClass.contains(SrcReg)) {
1941 // Copy from a VR64 register to a GR64 register.
1942 return X86::MOVSDto64rr;
1943 }
1944 } else if (X86::GR64RegClass.contains(SrcReg)) {
1945 // Copy from a GR64 register to a VR128 register.
1946 if (X86::VR128RegClass.contains(DestReg))
1947 return X86::MOV64toPQIrr;
1948 // Copy from a GR64 register to a VR64 register.
1949 else if (X86::VR64RegClass.contains(DestReg))
1950 return X86::MOV64toSDrr;
1951 }
1952
1953 return 0;
1954}
1955
Jakob Stoklund Olesen320bdcb2010-07-08 19:46:25 +00001956void X86InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
1957 MachineBasicBlock::iterator MI, DebugLoc DL,
1958 unsigned DestReg, unsigned SrcReg,
1959 bool KillSrc) const {
1960 // First deal with the normal symmetric copies.
1961 unsigned Opc = 0;
1962 if (X86::GR64RegClass.contains(DestReg, SrcReg))
1963 Opc = X86::MOV64rr;
1964 else if (X86::GR32RegClass.contains(DestReg, SrcReg))
1965 Opc = X86::MOV32rr;
1966 else if (X86::GR16RegClass.contains(DestReg, SrcReg))
1967 Opc = X86::MOV16rr;
1968 else if (X86::GR8RegClass.contains(DestReg, SrcReg)) {
1969 // Copying to or from a physical H register on x86-64 requires a NOREX
1970 // move. Otherwise use a normal move.
1971 if ((isHReg(DestReg) || isHReg(SrcReg)) &&
1972 TM.getSubtarget<X86Subtarget>().is64Bit())
1973 Opc = X86::MOV8rr_NOREX;
1974 else
1975 Opc = X86::MOV8rr;
1976 } else if (X86::VR128RegClass.contains(DestReg, SrcReg))
1977 Opc = X86::MOVAPSrr;
Jakob Stoklund Olesen61c8ecc2010-07-08 22:30:35 +00001978 else if (X86::VR64RegClass.contains(DestReg, SrcReg))
1979 Opc = X86::MMX_MOVQ64rr;
Anton Korobeynikovc52bedb2010-08-27 14:43:06 +00001980 else
1981 Opc = CopyToFromAsymmetricReg(DestReg, SrcReg);
Jakob Stoklund Olesen320bdcb2010-07-08 19:46:25 +00001982
1983 if (Opc) {
1984 BuildMI(MBB, MI, DL, get(Opc), DestReg)
1985 .addReg(SrcReg, getKillRegState(KillSrc));
1986 return;
1987 }
1988
1989 // Moving EFLAGS to / from another register requires a push and a pop.
1990 if (SrcReg == X86::EFLAGS) {
1991 if (X86::GR64RegClass.contains(DestReg)) {
1992 BuildMI(MBB, MI, DL, get(X86::PUSHF64));
1993 BuildMI(MBB, MI, DL, get(X86::POP64r), DestReg);
1994 return;
1995 } else if (X86::GR32RegClass.contains(DestReg)) {
1996 BuildMI(MBB, MI, DL, get(X86::PUSHF32));
1997 BuildMI(MBB, MI, DL, get(X86::POP32r), DestReg);
1998 return;
1999 }
2000 }
2001 if (DestReg == X86::EFLAGS) {
2002 if (X86::GR64RegClass.contains(SrcReg)) {
2003 BuildMI(MBB, MI, DL, get(X86::PUSH64r))
2004 .addReg(SrcReg, getKillRegState(KillSrc));
2005 BuildMI(MBB, MI, DL, get(X86::POPF64));
2006 return;
2007 } else if (X86::GR32RegClass.contains(SrcReg)) {
2008 BuildMI(MBB, MI, DL, get(X86::PUSH32r))
2009 .addReg(SrcReg, getKillRegState(KillSrc));
2010 BuildMI(MBB, MI, DL, get(X86::POPF32));
2011 return;
2012 }
2013 }
2014
2015 DEBUG(dbgs() << "Cannot copy " << RI.getName(SrcReg)
2016 << " to " << RI.getName(DestReg) << '\n');
2017 llvm_unreachable("Cannot emit physreg copy instruction");
2018}
2019
Rafael Espindola21d238f2010-06-12 20:13:29 +00002020static unsigned getLoadStoreRegOpcode(unsigned Reg,
2021 const TargetRegisterClass *RC,
2022 bool isStackAligned,
2023 const TargetMachine &TM,
2024 bool load) {
Rafael Espindola5a717a32010-07-12 03:43:04 +00002025 switch (RC->getID()) {
2026 default:
2027 llvm_unreachable("Unknown regclass");
2028 case X86::GR64RegClassID:
2029 case X86::GR64_NOSPRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002030 return load ? X86::MOV64rm : X86::MOV64mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002031 case X86::GR32RegClassID:
2032 case X86::GR32_NOSPRegClassID:
2033 case X86::GR32_ADRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002034 return load ? X86::MOV32rm : X86::MOV32mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002035 case X86::GR16RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002036 return load ? X86::MOV16rm : X86::MOV16mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002037 case X86::GR8RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002038 // Copying to or from a physical H register on x86-64 requires a NOREX
2039 // move. Otherwise use a normal move.
2040 if (isHReg(Reg) &&
2041 TM.getSubtarget<X86Subtarget>().is64Bit())
2042 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
2043 else
2044 return load ? X86::MOV8rm : X86::MOV8mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002045 case X86::GR64_ABCDRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002046 return load ? X86::MOV64rm : X86::MOV64mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002047 case X86::GR32_ABCDRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002048 return load ? X86::MOV32rm : X86::MOV32mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002049 case X86::GR16_ABCDRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002050 return load ? X86::MOV16rm : X86::MOV16mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002051 case X86::GR8_ABCD_LRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002052 return load ? X86::MOV8rm :X86::MOV8mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002053 case X86::GR8_ABCD_HRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002054 if (TM.getSubtarget<X86Subtarget>().is64Bit())
2055 return load ? X86::MOV8rm_NOREX : X86::MOV8mr_NOREX;
2056 else
2057 return load ? X86::MOV8rm : X86::MOV8mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002058 case X86::GR64_NOREXRegClassID:
2059 case X86::GR64_NOREX_NOSPRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002060 return load ? X86::MOV64rm : X86::MOV64mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002061 case X86::GR32_NOREXRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002062 return load ? X86::MOV32rm : X86::MOV32mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002063 case X86::GR16_NOREXRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002064 return load ? X86::MOV16rm : X86::MOV16mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002065 case X86::GR8_NOREXRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002066 return load ? X86::MOV8rm : X86::MOV8mr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002067 case X86::GR64_TCRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002068 return load ? X86::MOV64rm_TC : X86::MOV64mr_TC;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002069 case X86::GR32_TCRegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002070 return load ? X86::MOV32rm_TC : X86::MOV32mr_TC;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002071 case X86::RFP80RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002072 return load ? X86::LD_Fp80m : X86::ST_FpP80m;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002073 case X86::RFP64RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002074 return load ? X86::LD_Fp64m : X86::ST_Fp64m;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002075 case X86::RFP32RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002076 return load ? X86::LD_Fp32m : X86::ST_Fp32m;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002077 case X86::FR32RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002078 return load ? X86::MOVSSrm : X86::MOVSSmr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002079 case X86::FR64RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002080 return load ? X86::MOVSDrm : X86::MOVSDmr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002081 case X86::VR128RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002082 // If stack is realigned we can use aligned stores.
2083 if (isStackAligned)
2084 return load ? X86::MOVAPSrm : X86::MOVAPSmr;
2085 else
2086 return load ? X86::MOVUPSrm : X86::MOVUPSmr;
Rafael Espindola5a717a32010-07-12 03:43:04 +00002087 case X86::VR64RegClassID:
Rafael Espindola21d238f2010-06-12 20:13:29 +00002088 return load ? X86::MMX_MOVQ64rm : X86::MMX_MOVQ64mr;
Rafael Espindola21d238f2010-06-12 20:13:29 +00002089 }
2090}
2091
Dan Gohman4af325d2009-04-27 16:41:36 +00002092static unsigned getStoreRegOpcode(unsigned SrcReg,
2093 const TargetRegisterClass *RC,
2094 bool isStackAligned,
2095 TargetMachine &TM) {
Rafael Espindola21d238f2010-06-12 20:13:29 +00002096 return getLoadStoreRegOpcode(SrcReg, RC, isStackAligned, TM, false);
2097}
Owen Andersonf6372aa2008-01-01 21:11:32 +00002098
Rafael Espindola21d238f2010-06-12 20:13:29 +00002099
2100static unsigned getLoadRegOpcode(unsigned DestReg,
2101 const TargetRegisterClass *RC,
2102 bool isStackAligned,
2103 const TargetMachine &TM) {
2104 return getLoadStoreRegOpcode(DestReg, RC, isStackAligned, TM, true);
Owen Andersonf6372aa2008-01-01 21:11:32 +00002105}
2106
2107void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
2108 MachineBasicBlock::iterator MI,
2109 unsigned SrcReg, bool isKill, int FrameIdx,
Evan Cheng746ad692010-05-06 19:06:44 +00002110 const TargetRegisterClass *RC,
2111 const TargetRegisterInfo *TRI) const {
Anton Korobeynikov88bbf692008-07-19 06:30:51 +00002112 const MachineFunction &MF = *MBB.getParent();
Jakob Stoklund Olesen516cd452010-07-27 04:16:58 +00002113 assert(MF.getFrameInfo()->getObjectSize(FrameIdx) >= RC->getSize() &&
2114 "Stack slot too small for store");
Jim Grosbache45ab8a2010-01-19 18:31:11 +00002115 bool isAligned = (RI.getStackAlignment() >= 16) || RI.canRealignStack(MF);
Dan Gohman4af325d2009-04-27 16:41:36 +00002116 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
Dale Johannesen6ec25f52010-01-26 00:03:12 +00002117 DebugLoc DL = MBB.findDebugLoc(MI);
Bill Wendlingfbef3102009-02-11 21:51:19 +00002118 addFrameReference(BuildMI(MBB, MI, DL, get(Opc)), FrameIdx)
Bill Wendling587daed2009-05-13 21:33:08 +00002119 .addReg(SrcReg, getKillRegState(isKill));
Owen Andersonf6372aa2008-01-01 21:11:32 +00002120}
2121
2122void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
2123 bool isKill,
2124 SmallVectorImpl<MachineOperand> &Addr,
2125 const TargetRegisterClass *RC,
Dan Gohman91e69c32009-10-09 18:10:05 +00002126 MachineInstr::mmo_iterator MMOBegin,
2127 MachineInstr::mmo_iterator MMOEnd,
Owen Andersonf6372aa2008-01-01 21:11:32 +00002128 SmallVectorImpl<MachineInstr*> &NewMIs) const {
Dan Gohmaned42f1e2010-07-12 18:12:35 +00002129 bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
Dan Gohman4af325d2009-04-27 16:41:36 +00002130 unsigned Opc = getStoreRegOpcode(SrcReg, RC, isAligned, TM);
Chris Lattnerc7f3ace2010-04-02 20:16:16 +00002131 DebugLoc DL;
Dale Johannesen21b55412009-02-12 23:08:38 +00002132 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc));
Owen Andersonf6372aa2008-01-01 21:11:32 +00002133 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
Dan Gohman97357612009-02-18 05:45:50 +00002134 MIB.addOperand(Addr[i]);
Bill Wendling587daed2009-05-13 21:33:08 +00002135 MIB.addReg(SrcReg, getKillRegState(isKill));
Dan Gohman91e69c32009-10-09 18:10:05 +00002136 (*MIB).setMemRefs(MMOBegin, MMOEnd);
Owen Andersonf6372aa2008-01-01 21:11:32 +00002137 NewMIs.push_back(MIB);
2138}
2139
Owen Andersonf6372aa2008-01-01 21:11:32 +00002140
2141void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
Anton Korobeynikov88bbf692008-07-19 06:30:51 +00002142 MachineBasicBlock::iterator MI,
2143 unsigned DestReg, int FrameIdx,
Evan Cheng746ad692010-05-06 19:06:44 +00002144 const TargetRegisterClass *RC,
2145 const TargetRegisterInfo *TRI) const {
Anton Korobeynikov88bbf692008-07-19 06:30:51 +00002146 const MachineFunction &MF = *MBB.getParent();
Jim Grosbache45ab8a2010-01-19 18:31:11 +00002147 bool isAligned = (RI.getStackAlignment() >= 16) || RI.canRealignStack(MF);
Dan Gohman4af325d2009-04-27 16:41:36 +00002148 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
Dale Johannesen6ec25f52010-01-26 00:03:12 +00002149 DebugLoc DL = MBB.findDebugLoc(MI);
Bill Wendlingfbef3102009-02-11 21:51:19 +00002150 addFrameReference(BuildMI(MBB, MI, DL, get(Opc), DestReg), FrameIdx);
Owen Andersonf6372aa2008-01-01 21:11:32 +00002151}
2152
2153void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
Evan Cheng9f1c8312008-07-03 09:09:37 +00002154 SmallVectorImpl<MachineOperand> &Addr,
2155 const TargetRegisterClass *RC,
Dan Gohman91e69c32009-10-09 18:10:05 +00002156 MachineInstr::mmo_iterator MMOBegin,
2157 MachineInstr::mmo_iterator MMOEnd,
Owen Andersonf6372aa2008-01-01 21:11:32 +00002158 SmallVectorImpl<MachineInstr*> &NewMIs) const {
Dan Gohmaned42f1e2010-07-12 18:12:35 +00002159 bool isAligned = MMOBegin != MMOEnd && (*MMOBegin)->getAlignment() >= 16;
Dan Gohman4af325d2009-04-27 16:41:36 +00002160 unsigned Opc = getLoadRegOpcode(DestReg, RC, isAligned, TM);
Chris Lattnerc7f3ace2010-04-02 20:16:16 +00002161 DebugLoc DL;
Dale Johannesen21b55412009-02-12 23:08:38 +00002162 MachineInstrBuilder MIB = BuildMI(MF, DL, get(Opc), DestReg);
Owen Andersonf6372aa2008-01-01 21:11:32 +00002163 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
Dan Gohman97357612009-02-18 05:45:50 +00002164 MIB.addOperand(Addr[i]);
Dan Gohman91e69c32009-10-09 18:10:05 +00002165 (*MIB).setMemRefs(MMOBegin, MMOEnd);
Owen Andersonf6372aa2008-01-01 21:11:32 +00002166 NewMIs.push_back(MIB);
2167}
2168
Owen Andersond94b6a12008-01-04 23:57:37 +00002169bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
Bill Wendlingfbef3102009-02-11 21:51:19 +00002170 MachineBasicBlock::iterator MI,
Evan Cheng2457f2c2010-05-22 01:47:14 +00002171 const std::vector<CalleeSavedInfo> &CSI,
2172 const TargetRegisterInfo *TRI) const {
Owen Andersond94b6a12008-01-04 23:57:37 +00002173 if (CSI.empty())
2174 return false;
2175
Dale Johannesen73e884b2010-01-20 21:36:02 +00002176 DebugLoc DL = MBB.findDebugLoc(MI);
Bill Wendlingfbef3102009-02-11 21:51:19 +00002177
Evan Chenga67f32a2008-09-26 19:14:21 +00002178 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002179 bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
Anton Korobeynikovc4e8bec2008-10-04 11:09:36 +00002180 unsigned SlotSize = is64Bit ? 8 : 4;
2181
2182 MachineFunction &MF = *MBB.getParent();
Evan Cheng910139f2009-07-09 06:53:48 +00002183 unsigned FPReg = RI.getFrameRegister(MF);
Anton Korobeynikovc4e8bec2008-10-04 11:09:36 +00002184 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
Eli Friedmanbccf4b32009-06-04 02:32:04 +00002185 unsigned CalleeFrameSize = 0;
Anton Korobeynikovc4e8bec2008-10-04 11:09:36 +00002186
Owen Andersond94b6a12008-01-04 23:57:37 +00002187 unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
2188 for (unsigned i = CSI.size(); i != 0; --i) {
2189 unsigned Reg = CSI[i-1].getReg();
2190 // Add the callee-saved register as live-in. It's killed at the spill.
2191 MBB.addLiveIn(Reg);
Evan Cheng910139f2009-07-09 06:53:48 +00002192 if (Reg == FPReg)
2193 // X86RegisterInfo::emitPrologue will handle spilling of frame register.
2194 continue;
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002195 if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
Eli Friedmanbccf4b32009-06-04 02:32:04 +00002196 CalleeFrameSize += SlotSize;
Evan Cheng910139f2009-07-09 06:53:48 +00002197 BuildMI(MBB, MI, DL, get(Opc)).addReg(Reg, RegState::Kill);
Eli Friedmanbccf4b32009-06-04 02:32:04 +00002198 } else {
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002199 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
Rafael Espindola42d075c2010-06-02 20:02:30 +00002200 storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002201 RC, &RI);
Eli Friedmanbccf4b32009-06-04 02:32:04 +00002202 }
Owen Andersond94b6a12008-01-04 23:57:37 +00002203 }
Eli Friedmanbccf4b32009-06-04 02:32:04 +00002204
2205 X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
Owen Andersond94b6a12008-01-04 23:57:37 +00002206 return true;
2207}
2208
2209bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
Bill Wendlingfbef3102009-02-11 21:51:19 +00002210 MachineBasicBlock::iterator MI,
Evan Cheng2457f2c2010-05-22 01:47:14 +00002211 const std::vector<CalleeSavedInfo> &CSI,
2212 const TargetRegisterInfo *TRI) const {
Owen Andersond94b6a12008-01-04 23:57:37 +00002213 if (CSI.empty())
2214 return false;
Bill Wendlingfbef3102009-02-11 21:51:19 +00002215
Dale Johannesen73e884b2010-01-20 21:36:02 +00002216 DebugLoc DL = MBB.findDebugLoc(MI);
Bill Wendlingfbef3102009-02-11 21:51:19 +00002217
Evan Cheng910139f2009-07-09 06:53:48 +00002218 MachineFunction &MF = *MBB.getParent();
2219 unsigned FPReg = RI.getFrameRegister(MF);
Owen Andersond94b6a12008-01-04 23:57:37 +00002220 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002221 bool isWin64 = TM.getSubtarget<X86Subtarget>().isTargetWin64();
Owen Andersond94b6a12008-01-04 23:57:37 +00002222 unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
2223 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
2224 unsigned Reg = CSI[i].getReg();
Evan Cheng910139f2009-07-09 06:53:48 +00002225 if (Reg == FPReg)
2226 // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
2227 continue;
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002228 if (!X86::VR128RegClass.contains(Reg) && !isWin64) {
Eli Friedmanbccf4b32009-06-04 02:32:04 +00002229 BuildMI(MBB, MI, DL, get(Opc), Reg);
2230 } else {
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002231 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
Rafael Espindola42d075c2010-06-02 20:02:30 +00002232 loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
Rafael Espindolafcbd1a72010-07-21 23:19:57 +00002233 RC, &RI);
Eli Friedmanbccf4b32009-06-04 02:32:04 +00002234 }
Owen Andersond94b6a12008-01-04 23:57:37 +00002235 }
2236 return true;
2237}
2238
Evan Cheng962021b2010-04-26 07:38:55 +00002239MachineInstr*
2240X86InstrInfo::emitFrameIndexDebugValue(MachineFunction &MF,
Evan Cheng8601a3d2010-04-29 01:13:30 +00002241 int FrameIx, uint64_t Offset,
Evan Cheng962021b2010-04-26 07:38:55 +00002242 const MDNode *MDPtr,
2243 DebugLoc DL) const {
Evan Cheng962021b2010-04-26 07:38:55 +00002244 X86AddressMode AM;
2245 AM.BaseType = X86AddressMode::FrameIndexBase;
2246 AM.Base.FrameIndex = FrameIx;
2247 MachineInstrBuilder MIB = BuildMI(MF, DL, get(X86::DBG_VALUE));
2248 addFullAddress(MIB, AM).addImm(Offset).addMetadata(MDPtr);
2249 return &*MIB;
2250}
2251
Dan Gohman8e5f2c62008-07-07 23:14:23 +00002252static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
Dan Gohmand68a0762009-01-05 17:59:02 +00002253 const SmallVectorImpl<MachineOperand> &MOs,
Bill Wendling9bc96a52009-02-03 00:55:04 +00002254 MachineInstr *MI,
2255 const TargetInstrInfo &TII) {
Owen Anderson43dbe052008-01-07 01:35:02 +00002256 // Create the base instruction with the memory operand as the first part.
Bill Wendling9bc96a52009-02-03 00:55:04 +00002257 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
2258 MI->getDebugLoc(), true);
Owen Anderson43dbe052008-01-07 01:35:02 +00002259 MachineInstrBuilder MIB(NewMI);
2260 unsigned NumAddrOps = MOs.size();
2261 for (unsigned i = 0; i != NumAddrOps; ++i)
Dan Gohman97357612009-02-18 05:45:50 +00002262 MIB.addOperand(MOs[i]);
Owen Anderson43dbe052008-01-07 01:35:02 +00002263 if (NumAddrOps < 4) // FrameIndex only
Rafael Espindola094fad32009-04-08 21:14:34 +00002264 addOffset(MIB, 0);
Owen Anderson43dbe052008-01-07 01:35:02 +00002265
2266 // Loop over the rest of the ri operands, converting them over.
Chris Lattner749c6f62008-01-07 07:27:27 +00002267 unsigned NumOps = MI->getDesc().getNumOperands()-2;
Owen Anderson43dbe052008-01-07 01:35:02 +00002268 for (unsigned i = 0; i != NumOps; ++i) {
2269 MachineOperand &MO = MI->getOperand(i+2);
Dan Gohman97357612009-02-18 05:45:50 +00002270 MIB.addOperand(MO);
Owen Anderson43dbe052008-01-07 01:35:02 +00002271 }
2272 for (unsigned i = NumOps+2, e = MI->getNumOperands(); i != e; ++i) {
2273 MachineOperand &MO = MI->getOperand(i);
Dan Gohman97357612009-02-18 05:45:50 +00002274 MIB.addOperand(MO);
Owen Anderson43dbe052008-01-07 01:35:02 +00002275 }
2276 return MIB;
2277}
2278
Dan Gohman8e5f2c62008-07-07 23:14:23 +00002279static MachineInstr *FuseInst(MachineFunction &MF,
2280 unsigned Opcode, unsigned OpNo,
Dan Gohmand68a0762009-01-05 17:59:02 +00002281 const SmallVectorImpl<MachineOperand> &MOs,
Owen Anderson43dbe052008-01-07 01:35:02 +00002282 MachineInstr *MI, const TargetInstrInfo &TII) {
Bill Wendling9bc96a52009-02-03 00:55:04 +00002283 MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
2284 MI->getDebugLoc(), true);
Owen Anderson43dbe052008-01-07 01:35:02 +00002285 MachineInstrBuilder MIB(NewMI);
2286
2287 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2288 MachineOperand &MO = MI->getOperand(i);
2289 if (i == OpNo) {
Dan Gohmand735b802008-10-03 15:45:36 +00002290 assert(MO.isReg() && "Expected to fold into reg operand!");
Owen Anderson43dbe052008-01-07 01:35:02 +00002291 unsigned NumAddrOps = MOs.size();
2292 for (unsigned i = 0; i != NumAddrOps; ++i)
Dan Gohman97357612009-02-18 05:45:50 +00002293 MIB.addOperand(MOs[i]);
Owen Anderson43dbe052008-01-07 01:35:02 +00002294 if (NumAddrOps < 4) // FrameIndex only
Rafael Espindola094fad32009-04-08 21:14:34 +00002295 addOffset(MIB, 0);
Owen Anderson43dbe052008-01-07 01:35:02 +00002296 } else {
Dan Gohman97357612009-02-18 05:45:50 +00002297 MIB.addOperand(MO);
Owen Anderson43dbe052008-01-07 01:35:02 +00002298 }
2299 }
2300 return MIB;
2301}
2302
2303static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
Dan Gohmand68a0762009-01-05 17:59:02 +00002304 const SmallVectorImpl<MachineOperand> &MOs,
Owen Anderson43dbe052008-01-07 01:35:02 +00002305 MachineInstr *MI) {
Dan Gohman8e5f2c62008-07-07 23:14:23 +00002306 MachineFunction &MF = *MI->getParent()->getParent();
Bill Wendlingfbef3102009-02-11 21:51:19 +00002307 MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode));
Owen Anderson43dbe052008-01-07 01:35:02 +00002308
2309 unsigned NumAddrOps = MOs.size();
2310 for (unsigned i = 0; i != NumAddrOps; ++i)
Dan Gohman97357612009-02-18 05:45:50 +00002311 MIB.addOperand(MOs[i]);
Owen Anderson43dbe052008-01-07 01:35:02 +00002312 if (NumAddrOps < 4) // FrameIndex only
Rafael Espindola094fad32009-04-08 21:14:34 +00002313 addOffset(MIB, 0);
Owen Anderson43dbe052008-01-07 01:35:02 +00002314 return MIB.addImm(0);
2315}
2316
2317MachineInstr*
Dan Gohmanc54baa22008-12-03 18:43:12 +00002318X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2319 MachineInstr *MI, unsigned i,
Evan Chengf9b36f02009-07-15 06:10:07 +00002320 const SmallVectorImpl<MachineOperand> &MOs,
Evan Cheng9cef48e2009-09-11 00:39:26 +00002321 unsigned Size, unsigned Align) const {
Chris Lattner45a1cb22010-10-07 23:08:41 +00002322 const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
Owen Anderson43dbe052008-01-07 01:35:02 +00002323 bool isTwoAddrFold = false;
Chris Lattner749c6f62008-01-07 07:27:27 +00002324 unsigned NumOps = MI->getDesc().getNumOperands();
Owen Anderson43dbe052008-01-07 01:35:02 +00002325 bool isTwoAddr = NumOps > 1 &&
Chris Lattner749c6f62008-01-07 07:27:27 +00002326 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
Owen Anderson43dbe052008-01-07 01:35:02 +00002327
2328 MachineInstr *NewMI = NULL;
2329 // Folding a memory location into the two-address part of a two-address
2330 // instruction is different than folding it other places. It requires
2331 // replacing the *two* registers with the memory location.
2332 if (isTwoAddr && NumOps >= 2 && i < 2 &&
Dan Gohmand735b802008-10-03 15:45:36 +00002333 MI->getOperand(0).isReg() &&
2334 MI->getOperand(1).isReg() &&
Owen Anderson43dbe052008-01-07 01:35:02 +00002335 MI->getOperand(0).getReg() == MI->getOperand(1).getReg()) {
2336 OpcodeTablePtr = &RegOp2MemOpTable2Addr;
2337 isTwoAddrFold = true;
2338 } else if (i == 0) { // If operand 0
Dan Gohmanf1b4d262010-01-12 04:42:54 +00002339 if (MI->getOpcode() == X86::MOV64r0)
2340 NewMI = MakeM0Inst(*this, X86::MOV64mi32, MOs, MI);
2341 else if (MI->getOpcode() == X86::MOV32r0)
Owen Anderson43dbe052008-01-07 01:35:02 +00002342 NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI);
Dan Gohmanf1b4d262010-01-12 04:42:54 +00002343 else if (MI->getOpcode() == X86::MOV16r0)
2344 NewMI = MakeM0Inst(*this, X86::MOV16mi, MOs, MI);
Owen Anderson43dbe052008-01-07 01:35:02 +00002345 else if (MI->getOpcode() == X86::MOV8r0)
2346 NewMI = MakeM0Inst(*this, X86::MOV8mi, MOs, MI);
Evan Cheng9f1c8312008-07-03 09:09:37 +00002347 if (NewMI)
Owen Anderson43dbe052008-01-07 01:35:02 +00002348 return NewMI;
Owen Anderson43dbe052008-01-07 01:35:02 +00002349
2350 OpcodeTablePtr = &RegOp2MemOpTable0;
2351 } else if (i == 1) {
2352 OpcodeTablePtr = &RegOp2MemOpTable1;
2353 } else if (i == 2) {
2354 OpcodeTablePtr = &RegOp2MemOpTable2;
2355 }
2356
2357 // If table selected...
2358 if (OpcodeTablePtr) {
2359 // Find the Opcode to fuse
Chris Lattner45a1cb22010-10-07 23:08:41 +00002360 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
2361 OpcodeTablePtr->find(MI->getOpcode());
Owen Anderson43dbe052008-01-07 01:35:02 +00002362 if (I != OpcodeTablePtr->end()) {
Evan Cheng9cef48e2009-09-11 00:39:26 +00002363 unsigned Opcode = I->second.first;
Evan Chengf9b36f02009-07-15 06:10:07 +00002364 unsigned MinAlign = I->second.second;
2365 if (Align < MinAlign)
2366 return NULL;
Evan Cheng879caea2009-09-11 01:01:31 +00002367 bool NarrowToMOV32rm = false;
Evan Cheng9cef48e2009-09-11 00:39:26 +00002368 if (Size) {
2369 unsigned RCSize = MI->getDesc().OpInfo[i].getRegClass(&RI)->getSize();
2370 if (Size < RCSize) {
2371 // Check if it's safe to fold the load. If the size of the object is
2372 // narrower than the load width, then it's not.
2373 if (Opcode != X86::MOV64rm || RCSize != 8 || Size != 4)
2374 return NULL;
2375 // If this is a 64-bit load, but the spill slot is 32, then we can do
2376 // a 32-bit load which is implicitly zero-extended. This likely is due
2377 // to liveintervalanalysis remat'ing a load from stack slot.
Evan Cheng879caea2009-09-11 01:01:31 +00002378 if (MI->getOperand(0).getSubReg() || MI->getOperand(1).getSubReg())
2379 return NULL;
Evan Cheng9cef48e2009-09-11 00:39:26 +00002380 Opcode = X86::MOV32rm;
Evan Cheng879caea2009-09-11 01:01:31 +00002381 NarrowToMOV32rm = true;
Evan Cheng9cef48e2009-09-11 00:39:26 +00002382 }
2383 }
2384
Owen Anderson43dbe052008-01-07 01:35:02 +00002385 if (isTwoAddrFold)
Evan Cheng9cef48e2009-09-11 00:39:26 +00002386 NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this);
Owen Anderson43dbe052008-01-07 01:35:02 +00002387 else
Evan Cheng9cef48e2009-09-11 00:39:26 +00002388 NewMI = FuseInst(MF, Opcode, i, MOs, MI, *this);
Evan Cheng879caea2009-09-11 01:01:31 +00002389
2390 if (NarrowToMOV32rm) {
2391 // If this is the special case where we use a MOV32rm to load a 32-bit
2392 // value and zero-extend the top bits. Change the destination register
2393 // to a 32-bit one.
2394 unsigned DstReg = NewMI->getOperand(0).getReg();
2395 if (TargetRegisterInfo::isPhysicalRegister(DstReg))
2396 NewMI->getOperand(0).setReg(RI.getSubReg(DstReg,
Jakob Stoklund Olesen3458e9e2010-05-24 14:48:17 +00002397 X86::sub_32bit));
Evan Cheng879caea2009-09-11 01:01:31 +00002398 else
Jakob Stoklund Olesen3458e9e2010-05-24 14:48:17 +00002399 NewMI->getOperand(0).setSubReg(X86::sub_32bit);
Evan Cheng879caea2009-09-11 01:01:31 +00002400 }
Owen Anderson43dbe052008-01-07 01:35:02 +00002401 return NewMI;
2402 }
2403 }
2404
2405 // No fusion
Jakob Stoklund Olesen9c50e8b2010-07-09 20:43:09 +00002406 if (PrintFailedFusing && !MI->isCopy())
David Greene5b901322010-01-05 01:29:29 +00002407 dbgs() << "We failed to fuse operand " << i << " in " << *MI;
Owen Anderson43dbe052008-01-07 01:35:02 +00002408 return NULL;
2409}
2410
2411
Dan Gohmanc54baa22008-12-03 18:43:12 +00002412MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2413 MachineInstr *MI,
Evan Chengf9b36f02009-07-15 06:10:07 +00002414 const SmallVectorImpl<unsigned> &Ops,
Dan Gohmanc54baa22008-12-03 18:43:12 +00002415 int FrameIndex) const {
Owen Anderson43dbe052008-01-07 01:35:02 +00002416 // Check switch flag
2417 if (NoFusing) return NULL;
2418
Evan Chengb1f49812009-12-22 17:47:23 +00002419 if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
Evan Cheng400073d2009-12-18 07:40:29 +00002420 switch (MI->getOpcode()) {
2421 case X86::CVTSD2SSrr:
2422 case X86::Int_CVTSD2SSrr:
2423 case X86::CVTSS2SDrr:
2424 case X86::Int_CVTSS2SDrr:
2425 case X86::RCPSSr:
2426 case X86::RCPSSr_Int:
Chris Lattnerb2ef4c12010-09-29 02:57:56 +00002427 case X86::ROUNDSDr:
2428 case X86::ROUNDSSr:
Evan Cheng400073d2009-12-18 07:40:29 +00002429 case X86::RSQRTSSr:
2430 case X86::RSQRTSSr_Int:
2431 case X86::SQRTSSr:
2432 case X86::SQRTSSr_Int:
2433 return 0;
2434 }
2435
Evan Cheng5fd79d02008-02-08 21:20:40 +00002436 const MachineFrameInfo *MFI = MF.getFrameInfo();
Evan Cheng9cef48e2009-09-11 00:39:26 +00002437 unsigned Size = MFI->getObjectSize(FrameIndex);
Evan Cheng5fd79d02008-02-08 21:20:40 +00002438 unsigned Alignment = MFI->getObjectAlignment(FrameIndex);
Owen Anderson43dbe052008-01-07 01:35:02 +00002439 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
2440 unsigned NewOpc = 0;
Evan Cheng9cef48e2009-09-11 00:39:26 +00002441 unsigned RCSize = 0;
Owen Anderson43dbe052008-01-07 01:35:02 +00002442 switch (MI->getOpcode()) {
2443 default: return NULL;
Evan Cheng9cef48e2009-09-11 00:39:26 +00002444 case X86::TEST8rr: NewOpc = X86::CMP8ri; RCSize = 1; break;
Dan Gohmane5efbaf2010-05-18 21:42:03 +00002445 case X86::TEST16rr: NewOpc = X86::CMP16ri8; RCSize = 2; break;
2446 case X86::TEST32rr: NewOpc = X86::CMP32ri8; RCSize = 4; break;
2447 case X86::TEST64rr: NewOpc = X86::CMP64ri8; RCSize = 8; break;
Owen Anderson43dbe052008-01-07 01:35:02 +00002448 }
Evan Cheng9cef48e2009-09-11 00:39:26 +00002449 // Check if it's safe to fold the load. If the size of the object is
2450 // narrower than the load width, then it's not.
2451 if (Size < RCSize)
2452 return NULL;
Owen Anderson43dbe052008-01-07 01:35:02 +00002453 // Change to CMPXXri r, 0 first.
Chris Lattner5080f4d2008-01-11 18:10:50 +00002454 MI->setDesc(get(NewOpc));
Owen Anderson43dbe052008-01-07 01:35:02 +00002455 MI->getOperand(1).ChangeToImmediate(0);
2456 } else if (Ops.size() != 1)
2457 return NULL;
2458
2459 SmallVector<MachineOperand,4> MOs;
2460 MOs.push_back(MachineOperand::CreateFI(FrameIndex));
Evan Cheng9cef48e2009-09-11 00:39:26 +00002461 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, Size, Alignment);
Owen Anderson43dbe052008-01-07 01:35:02 +00002462}
2463
Dan Gohmanc54baa22008-12-03 18:43:12 +00002464MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
2465 MachineInstr *MI,
Evan Chengf9b36f02009-07-15 06:10:07 +00002466 const SmallVectorImpl<unsigned> &Ops,
Dan Gohmanc54baa22008-12-03 18:43:12 +00002467 MachineInstr *LoadMI) const {
Owen Anderson43dbe052008-01-07 01:35:02 +00002468 // Check switch flag
2469 if (NoFusing) return NULL;
2470
Evan Chengb1f49812009-12-22 17:47:23 +00002471 if (!MF.getFunction()->hasFnAttr(Attribute::OptimizeForSize))
Evan Cheng400073d2009-12-18 07:40:29 +00002472 switch (MI->getOpcode()) {
2473 case X86::CVTSD2SSrr:
2474 case X86::Int_CVTSD2SSrr:
2475 case X86::CVTSS2SDrr:
2476 case X86::Int_CVTSS2SDrr:
2477 case X86::RCPSSr:
2478 case X86::RCPSSr_Int:
Chris Lattnerb2ef4c12010-09-29 02:57:56 +00002479 case X86::ROUNDSDr:
2480 case X86::ROUNDSSr:
Evan Cheng400073d2009-12-18 07:40:29 +00002481 case X86::RSQRTSSr:
2482 case X86::RSQRTSSr_Int:
2483 case X86::SQRTSSr:
2484 case X86::SQRTSSr_Int:
2485 return 0;
2486 }
2487
Dan Gohmancddc11e2008-07-12 00:10:52 +00002488 // Determine the alignment of the load.
Evan Cheng5fd79d02008-02-08 21:20:40 +00002489 unsigned Alignment = 0;
Dan Gohmancddc11e2008-07-12 00:10:52 +00002490 if (LoadMI->hasOneMemOperand())
Dan Gohmanc76909a2009-09-25 20:36:54 +00002491 Alignment = (*LoadMI->memoperands_begin())->getAlignment();
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002492 else
2493 switch (LoadMI->getOpcode()) {
Bruno Cardoso Lopes642eb022010-08-12 20:20:53 +00002494 case X86::AVX_SET0PSY:
2495 case X86::AVX_SET0PDY:
2496 Alignment = 32;
2497 break;
Jakob Stoklund Olesend363b4e2010-03-31 00:40:13 +00002498 case X86::V_SET0PS:
2499 case X86::V_SET0PD:
2500 case X86::V_SET0PI:
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002501 case X86::V_SETALLONES:
Bruno Cardoso Lopes642eb022010-08-12 20:20:53 +00002502 case X86::AVX_SET0PS:
2503 case X86::AVX_SET0PD:
2504 case X86::AVX_SET0PI:
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002505 Alignment = 16;
2506 break;
2507 case X86::FsFLD0SD:
2508 Alignment = 8;
2509 break;
2510 case X86::FsFLD0SS:
2511 Alignment = 4;
2512 break;
2513 default:
2514 llvm_unreachable("Don't know how to fold this instruction!");
2515 }
Owen Anderson43dbe052008-01-07 01:35:02 +00002516 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
2517 unsigned NewOpc = 0;
2518 switch (MI->getOpcode()) {
2519 default: return NULL;
2520 case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
Dan Gohmanf8c1ef02010-05-18 21:54:15 +00002521 case X86::TEST16rr: NewOpc = X86::CMP16ri8; break;
2522 case X86::TEST32rr: NewOpc = X86::CMP32ri8; break;
2523 case X86::TEST64rr: NewOpc = X86::CMP64ri8; break;
Owen Anderson43dbe052008-01-07 01:35:02 +00002524 }
2525 // Change to CMPXXri r, 0 first.
Chris Lattner5080f4d2008-01-11 18:10:50 +00002526 MI->setDesc(get(NewOpc));
Owen Anderson43dbe052008-01-07 01:35:02 +00002527 MI->getOperand(1).ChangeToImmediate(0);
2528 } else if (Ops.size() != 1)
2529 return NULL;
2530
Jakob Stoklund Olesend29583b2010-08-11 23:08:22 +00002531 // Make sure the subregisters match.
2532 // Otherwise we risk changing the size of the load.
2533 if (LoadMI->getOperand(0).getSubReg() != MI->getOperand(Ops[0]).getSubReg())
2534 return NULL;
2535
Chris Lattnerac0ed5d2010-07-08 22:41:28 +00002536 SmallVector<MachineOperand,X86::AddrNumOperands> MOs;
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002537 switch (LoadMI->getOpcode()) {
Jakob Stoklund Olesend363b4e2010-03-31 00:40:13 +00002538 case X86::V_SET0PS:
2539 case X86::V_SET0PD:
2540 case X86::V_SET0PI:
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002541 case X86::V_SETALLONES:
Bruno Cardoso Lopes642eb022010-08-12 20:20:53 +00002542 case X86::AVX_SET0PS:
2543 case X86::AVX_SET0PD:
2544 case X86::AVX_SET0PI:
2545 case X86::AVX_SET0PSY:
2546 case X86::AVX_SET0PDY:
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002547 case X86::FsFLD0SD:
2548 case X86::FsFLD0SS: {
Jakob Stoklund Olesend363b4e2010-03-31 00:40:13 +00002549 // Folding a V_SET0P? or V_SETALLONES as a load, to ease register pressure.
Dan Gohman62c939d2008-12-03 05:21:24 +00002550 // Create a constant-pool entry and operands to load from it.
2551
Dan Gohman81d0c362010-03-09 03:01:40 +00002552 // Medium and large mode can't fold loads this way.
2553 if (TM.getCodeModel() != CodeModel::Small &&
2554 TM.getCodeModel() != CodeModel::Kernel)
2555 return NULL;
2556
Dan Gohman62c939d2008-12-03 05:21:24 +00002557 // x86-32 PIC requires a PIC base register for constant pools.
2558 unsigned PICBase = 0;
Jakob Stoklund Olesen93e55de2009-07-16 21:24:13 +00002559 if (TM.getRelocationModel() == Reloc::PIC_) {
Evan Cheng2b48ab92009-07-16 18:44:05 +00002560 if (TM.getSubtarget<X86Subtarget>().is64Bit())
2561 PICBase = X86::RIP;
Jakob Stoklund Olesen93e55de2009-07-16 21:24:13 +00002562 else
Dan Gohman84023e02010-07-10 09:00:22 +00002563 // FIXME: PICBase = getGlobalBaseReg(&MF);
Evan Cheng2b48ab92009-07-16 18:44:05 +00002564 // This doesn't work for several reasons.
2565 // 1. GlobalBaseReg may have been spilled.
2566 // 2. It may not be live at MI.
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002567 return NULL;
Jakob Stoklund Olesen93e55de2009-07-16 21:24:13 +00002568 }
Dan Gohman62c939d2008-12-03 05:21:24 +00002569
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002570 // Create a constant-pool entry.
Dan Gohman62c939d2008-12-03 05:21:24 +00002571 MachineConstantPool &MCP = *MF.getConstantPool();
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002572 const Type *Ty;
Bruno Cardoso Lopes642eb022010-08-12 20:20:53 +00002573 unsigned Opc = LoadMI->getOpcode();
2574 if (Opc == X86::FsFLD0SS)
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002575 Ty = Type::getFloatTy(MF.getFunction()->getContext());
Bruno Cardoso Lopes642eb022010-08-12 20:20:53 +00002576 else if (Opc == X86::FsFLD0SD)
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002577 Ty = Type::getDoubleTy(MF.getFunction()->getContext());
Bruno Cardoso Lopes642eb022010-08-12 20:20:53 +00002578 else if (Opc == X86::AVX_SET0PSY || Opc == X86::AVX_SET0PDY)
2579 Ty = VectorType::get(Type::getFloatTy(MF.getFunction()->getContext()), 8);
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002580 else
2581 Ty = VectorType::get(Type::getInt32Ty(MF.getFunction()->getContext()), 4);
Dan Gohman46510a72010-04-15 01:51:59 +00002582 const Constant *C = LoadMI->getOpcode() == X86::V_SETALLONES ?
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002583 Constant::getAllOnesValue(Ty) :
2584 Constant::getNullValue(Ty);
2585 unsigned CPI = MCP.getConstantPoolIndex(C, Alignment);
Dan Gohman62c939d2008-12-03 05:21:24 +00002586
2587 // Create operands to load from the constant pool entry.
2588 MOs.push_back(MachineOperand::CreateReg(PICBase, false));
2589 MOs.push_back(MachineOperand::CreateImm(1));
2590 MOs.push_back(MachineOperand::CreateReg(0, false));
2591 MOs.push_back(MachineOperand::CreateCPI(CPI, 0));
Rafael Espindola094fad32009-04-08 21:14:34 +00002592 MOs.push_back(MachineOperand::CreateReg(0, false));
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002593 break;
2594 }
2595 default: {
Dan Gohman62c939d2008-12-03 05:21:24 +00002596 // Folding a normal load. Just copy the load's address operands.
2597 unsigned NumOps = LoadMI->getDesc().getNumOperands();
Chris Lattnerac0ed5d2010-07-08 22:41:28 +00002598 for (unsigned i = NumOps - X86::AddrNumOperands; i != NumOps; ++i)
Dan Gohman62c939d2008-12-03 05:21:24 +00002599 MOs.push_back(LoadMI->getOperand(i));
Dan Gohman4a0b3e12009-09-21 18:30:38 +00002600 break;
2601 }
Dan Gohman62c939d2008-12-03 05:21:24 +00002602 }
Evan Cheng9cef48e2009-09-11 00:39:26 +00002603 return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, 0, Alignment);
Owen Anderson43dbe052008-01-07 01:35:02 +00002604}
2605
2606
Dan Gohman8e8b8a22008-10-16 01:49:15 +00002607bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
2608 const SmallVectorImpl<unsigned> &Ops) const {
Owen Anderson43dbe052008-01-07 01:35:02 +00002609 // Check switch flag
2610 if (NoFusing) return 0;
2611
2612 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
2613 switch (MI->getOpcode()) {
2614 default: return false;
2615 case X86::TEST8rr:
2616 case X86::TEST16rr:
2617 case X86::TEST32rr:
2618 case X86::TEST64rr:
2619 return true;
2620 }
2621 }
2622
2623 if (Ops.size() != 1)
2624 return false;
2625
2626 unsigned OpNum = Ops[0];
2627 unsigned Opc = MI->getOpcode();
Chris Lattner749c6f62008-01-07 07:27:27 +00002628 unsigned NumOps = MI->getDesc().getNumOperands();
Owen Anderson43dbe052008-01-07 01:35:02 +00002629 bool isTwoAddr = NumOps > 1 &&
Chris Lattner749c6f62008-01-07 07:27:27 +00002630 MI->getDesc().getOperandConstraint(1, TOI::TIED_TO) != -1;
Owen Anderson43dbe052008-01-07 01:35:02 +00002631
2632 // Folding a memory location into the two-address part of a two-address
2633 // instruction is different than folding it other places. It requires
2634 // replacing the *two* registers with the memory location.
Chris Lattner45a1cb22010-10-07 23:08:41 +00002635 const DenseMap<unsigned, std::pair<unsigned,unsigned> > *OpcodeTablePtr = 0;
Owen Anderson43dbe052008-01-07 01:35:02 +00002636 if (isTwoAddr && NumOps >= 2 && OpNum < 2) {
2637 OpcodeTablePtr = &RegOp2MemOpTable2Addr;
2638 } else if (OpNum == 0) { // If operand 0
2639 switch (Opc) {
Chris Lattner9ac75422009-07-14 20:19:57 +00002640 case X86::MOV8r0:
Dan Gohmanf1b4d262010-01-12 04:42:54 +00002641 case X86::MOV16r0:
Owen Anderson43dbe052008-01-07 01:35:02 +00002642 case X86::MOV32r0:
Chris Lattner45a1cb22010-10-07 23:08:41 +00002643 case X86::MOV64r0: return true;
Owen Anderson43dbe052008-01-07 01:35:02 +00002644 default: break;
2645 }
2646 OpcodeTablePtr = &RegOp2MemOpTable0;
2647 } else if (OpNum == 1) {
2648 OpcodeTablePtr = &RegOp2MemOpTable1;
2649 } else if (OpNum == 2) {
2650 OpcodeTablePtr = &RegOp2MemOpTable2;
2651 }
2652
Chris Lattner99ae6652010-10-08 03:54:52 +00002653 if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
2654 return true;
Jakob Stoklund Olesen1f323402010-07-09 20:43:13 +00002655 return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
Owen Anderson43dbe052008-01-07 01:35:02 +00002656}
2657
2658bool X86InstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
2659 unsigned Reg, bool UnfoldLoad, bool UnfoldStore,
Bill Wendlingfbef3102009-02-11 21:51:19 +00002660 SmallVectorImpl<MachineInstr*> &NewMIs) const {
Chris Lattner45a1cb22010-10-07 23:08:41 +00002661 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
2662 MemOp2RegOpTable.find(MI->getOpcode());
Owen Anderson43dbe052008-01-07 01:35:02 +00002663 if (I == MemOp2RegOpTable.end())
2664 return false;
2665 unsigned Opc = I->second.first;
2666 unsigned Index = I->second.second & 0xf;
2667 bool FoldedLoad = I->second.second & (1 << 4);
2668 bool FoldedStore = I->second.second & (1 << 5);
2669 if (UnfoldLoad && !FoldedLoad)
2670 return false;
2671 UnfoldLoad &= FoldedLoad;
2672 if (UnfoldStore && !FoldedStore)
2673 return false;
2674 UnfoldStore &= FoldedStore;
2675
Chris Lattner749c6f62008-01-07 07:27:27 +00002676 const TargetInstrDesc &TID = get(Opc);
Owen Anderson43dbe052008-01-07 01:35:02 +00002677 const TargetOperandInfo &TOI = TID.OpInfo[Index];
Chris Lattnercb778a82009-07-29 21:10:12 +00002678 const TargetRegisterClass *RC = TOI.getRegClass(&RI);
Evan Cheng98ec91e2010-07-02 20:36:18 +00002679 if (!MI->hasOneMemOperand() &&
2680 RC == &X86::VR128RegClass &&
2681 !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
2682 // Without memoperands, loadRegFromAddr and storeRegToStackSlot will
2683 // conservatively assume the address is unaligned. That's bad for
2684 // performance.
2685 return false;
Chris Lattnerac0ed5d2010-07-08 22:41:28 +00002686 SmallVector<MachineOperand, X86::AddrNumOperands> AddrOps;
Owen Anderson43dbe052008-01-07 01:35:02 +00002687 SmallVector<MachineOperand,2> BeforeOps;
2688 SmallVector<MachineOperand,2> AfterOps;
2689 SmallVector<MachineOperand,4> ImpOps;
2690 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
2691 MachineOperand &Op = MI->getOperand(i);
Chris Lattnerac0ed5d2010-07-08 22:41:28 +00002692 if (i >= Index && i < Index + X86::AddrNumOperands)
Owen Anderson43dbe052008-01-07 01:35:02 +00002693 AddrOps.push_back(Op);
Dan Gohmand735b802008-10-03 15:45:36 +00002694 else if (Op.isReg() && Op.isImplicit())
Owen Anderson43dbe052008-01-07 01:35:02 +00002695 ImpOps.push_back(Op);
2696 else if (i < Index)
2697 BeforeOps.push_back(Op);
2698 else if (i > Index)
2699 AfterOps.push_back(Op);
2700 }
2701
2702 // Emit the load instruction.
2703 if (UnfoldLoad) {
Dan Gohman91e69c32009-10-09 18:10:05 +00002704 std::pair<MachineInstr::mmo_iterator,
2705 MachineInstr::mmo_iterator> MMOs =
2706 MF.extractLoadMemRefs(MI->memoperands_begin(),
2707 MI->memoperands_end());
2708 loadRegFromAddr(MF, Reg, AddrOps, RC, MMOs.first, MMOs.second, NewMIs);
Owen Anderson43dbe052008-01-07 01:35:02 +00002709 if (UnfoldStore) {
2710 // Address operands cannot be marked isKill.
Chris Lattnerac0ed5d2010-07-08 22:41:28 +00002711 for (unsigned i = 1; i != 1 + X86::AddrNumOperands; ++i) {
Owen Anderson43dbe052008-01-07 01:35:02 +00002712 MachineOperand &MO = NewMIs[0]->getOperand(i);
Dan Gohmand735b802008-10-03 15:45:36 +00002713 if (MO.isReg())
Owen Anderson43dbe052008-01-07 01:35:02 +00002714 MO.setIsKill(false);
2715 }
2716 }
2717 }
2718
2719 // Emit the data processing instruction.
Bill Wendling9bc96a52009-02-03 00:55:04 +00002720 MachineInstr *DataMI = MF.CreateMachineInstr(TID, MI->getDebugLoc(), true);
Owen Anderson43dbe052008-01-07 01:35:02 +00002721 MachineInstrBuilder MIB(DataMI);
2722
2723 if (FoldedStore)
Bill Wendling587daed2009-05-13 21:33:08 +00002724 MIB.addReg(Reg, RegState::Define);
Owen Anderson43dbe052008-01-07 01:35:02 +00002725 for (unsigned i = 0, e = BeforeOps.size(); i != e; ++i)
Dan Gohman97357612009-02-18 05:45:50 +00002726 MIB.addOperand(BeforeOps[i]);
Owen Anderson43dbe052008-01-07 01:35:02 +00002727 if (FoldedLoad)
2728 MIB.addReg(Reg);
2729 for (unsigned i = 0, e = AfterOps.size(); i != e; ++i)
Dan Gohman97357612009-02-18 05:45:50 +00002730 MIB.addOperand(AfterOps[i]);
Owen Anderson43dbe052008-01-07 01:35:02 +00002731 for (unsigned i = 0, e = ImpOps.size(); i != e; ++i) {
2732 MachineOperand &MO = ImpOps[i];
Bill Wendling587daed2009-05-13 21:33:08 +00002733 MIB.addReg(MO.getReg(),
2734 getDefRegState(MO.isDef()) |
2735 RegState::Implicit |
2736 getKillRegState(MO.isKill()) |
Evan Cheng4784f1f2009-06-30 08:49:04 +00002737 getDeadRegState(MO.isDead()) |
2738 getUndefRegState(MO.isUndef()));
Owen Anderson43dbe052008-01-07 01:35:02 +00002739 }
2740 // Change CMP32ri r, 0 back to TEST32rr r, r, etc.
2741 unsigned NewOpc = 0;
2742 switch (DataMI->getOpcode()) {
2743 default: break;
2744 case X86::CMP64ri32:
Dan Gohmanf8c1ef02010-05-18 21:54:15 +00002745 case X86::CMP64ri8:
Owen Anderson43dbe052008-01-07 01:35:02 +00002746 case X86::CMP32ri:
Dan Gohmanf8c1ef02010-05-18 21:54:15 +00002747 case X86::CMP32ri8:
Owen Anderson43dbe052008-01-07 01:35:02 +00002748 case X86::CMP16ri:
Dan Gohmanf8c1ef02010-05-18 21:54:15 +00002749 case X86::CMP16ri8:
Owen Anderson43dbe052008-01-07 01:35:02 +00002750 case X86::CMP8ri: {
2751 MachineOperand &MO0 = DataMI->getOperand(0);
2752 MachineOperand &MO1 = DataMI->getOperand(1);
2753 if (MO1.getImm() == 0) {
2754 switch (DataMI->getOpcode()) {
2755 default: break;
Dan Gohmanf8c1ef02010-05-18 21:54:15 +00002756 case X86::CMP64ri8:
Owen Anderson43dbe052008-01-07 01:35:02 +00002757 case X86::CMP64ri32: NewOpc = X86::TEST64rr; break;
Dan Gohmanf8c1ef02010-05-18 21:54:15 +00002758 case X86::CMP32ri8:
Owen Anderson43dbe052008-01-07 01:35:02 +00002759 case X86::CMP32ri: NewOpc = X86::TEST32rr; break;
Dan Gohmanf8c1ef02010-05-18 21:54:15 +00002760 case X86::CMP16ri8:
Owen Anderson43dbe052008-01-07 01:35:02 +00002761 case X86::CMP16ri: NewOpc = X86::TEST16rr; break;
2762 case X86::CMP8ri: NewOpc = X86::TEST8rr; break;
2763 }
Chris Lattner5080f4d2008-01-11 18:10:50 +00002764 DataMI->setDesc(get(NewOpc));
Owen Anderson43dbe052008-01-07 01:35:02 +00002765 MO1.ChangeToRegister(MO0.getReg(), false);
2766 }
2767 }
2768 }
2769 NewMIs.push_back(DataMI);
2770
2771 // Emit the store instruction.
2772 if (UnfoldStore) {
Chris Lattnercb778a82009-07-29 21:10:12 +00002773 const TargetRegisterClass *DstRC = TID.OpInfo[0].getRegClass(&RI);
Dan Gohman91e69c32009-10-09 18:10:05 +00002774 std::pair<MachineInstr::mmo_iterator,
2775 MachineInstr::mmo_iterator> MMOs =
2776 MF.extractStoreMemRefs(MI->memoperands_begin(),
2777 MI->memoperands_end());
2778 storeRegToAddr(MF, Reg, true, AddrOps, DstRC, MMOs.first, MMOs.second, NewMIs);
Owen Anderson43dbe052008-01-07 01:35:02 +00002779 }
2780
2781 return true;
2782}
2783
2784bool
2785X86InstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
Bill Wendlingfbef3102009-02-11 21:51:19 +00002786 SmallVectorImpl<SDNode*> &NewNodes) const {
Dan Gohmane8be6c62008-07-17 19:10:17 +00002787 if (!N->isMachineOpcode())
Owen Anderson43dbe052008-01-07 01:35:02 +00002788 return false;
2789
Chris Lattner45a1cb22010-10-07 23:08:41 +00002790 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
2791 MemOp2RegOpTable.find(N->getMachineOpcode());
Owen Anderson43dbe052008-01-07 01:35:02 +00002792 if (I == MemOp2RegOpTable.end())
2793 return false;
2794 unsigned Opc = I->second.first;
2795 unsigned Index = I->second.second & 0xf;
2796 bool FoldedLoad = I->second.second & (1 << 4);
2797 bool FoldedStore = I->second.second & (1 << 5);
Chris Lattner749c6f62008-01-07 07:27:27 +00002798 const TargetInstrDesc &TID = get(Opc);
Chris Lattnercb778a82009-07-29 21:10:12 +00002799 const TargetRegisterClass *RC = TID.OpInfo[Index].getRegClass(&RI);
Dan Gohmanb37a8202009-03-04 19:23:38 +00002800 unsigned NumDefs = TID.NumDefs;
Dan Gohman475871a2008-07-27 21:46:04 +00002801 std::vector<SDValue> AddrOps;
2802 std::vector<SDValue> BeforeOps;
2803 std::vector<SDValue> AfterOps;
Dale Johannesened2eee62009-02-06 01:31:28 +00002804 DebugLoc dl = N->getDebugLoc();
Owen Anderson43dbe052008-01-07 01:35:02 +00002805 unsigned NumOps = N->getNumOperands();
Dan Gohmanc76909a2009-09-25 20:36:54 +00002806 for (unsigned i = 0; i != NumOps-1; ++i) {
Dan Gohman475871a2008-07-27 21:46:04 +00002807 SDValue Op = N->getOperand(i);
Chris Lattnerac0ed5d2010-07-08 22:41:28 +00002808 if (i >= Index-NumDefs && i < Index-NumDefs + X86::AddrNumOperands)
Owen Anderson43dbe052008-01-07 01:35:02 +00002809 AddrOps.push_back(Op);
Dan Gohmanb37a8202009-03-04 19:23:38 +00002810 else if (i < Index-NumDefs)
Owen Anderson43dbe052008-01-07 01:35:02 +00002811 BeforeOps.push_back(Op);
Dan Gohmanb37a8202009-03-04 19:23:38 +00002812 else if (i > Index-NumDefs)
Owen Anderson43dbe052008-01-07 01:35:02 +00002813 AfterOps.push_back(Op);
2814 }
Dan Gohman475871a2008-07-27 21:46:04 +00002815 SDValue Chain = N->getOperand(NumOps-1);
Owen Anderson43dbe052008-01-07 01:35:02 +00002816 AddrOps.push_back(Chain);
2817
2818 // Emit the load instruction.
2819 SDNode *Load = 0;
Dan Gohman91e69c32009-10-09 18:10:05 +00002820 MachineFunction &MF = DAG.getMachineFunction();
Owen Anderson43dbe052008-01-07 01:35:02 +00002821 if (FoldedLoad) {
Owen Andersone50ed302009-08-10 22:56:29 +00002822 EVT VT = *RC->vt_begin();
Evan Cheng600c0432009-11-16 21:56:03 +00002823 std::pair<MachineInstr::mmo_iterator,
2824 MachineInstr::mmo_iterator> MMOs =
2825 MF.extractLoadMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
2826 cast<MachineSDNode>(N)->memoperands_end());
Evan Cheng98ec91e2010-07-02 20:36:18 +00002827 if (!(*MMOs.first) &&
2828 RC == &X86::VR128RegClass &&
2829 !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
2830 // Do not introduce a slow unaligned load.
2831 return false;
2832 bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
Dan Gohman602b0c82009-09-25 18:54:59 +00002833 Load = DAG.getMachineNode(getLoadRegOpcode(0, RC, isAligned, TM), dl,
2834 VT, MVT::Other, &AddrOps[0], AddrOps.size());
Owen Anderson43dbe052008-01-07 01:35:02 +00002835 NewNodes.push_back(Load);
Dan Gohman91e69c32009-10-09 18:10:05 +00002836
2837 // Preserve memory reference information.
Dan Gohman91e69c32009-10-09 18:10:05 +00002838 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
Owen Anderson43dbe052008-01-07 01:35:02 +00002839 }
2840
2841 // Emit the data processing instruction.
Owen Andersone50ed302009-08-10 22:56:29 +00002842 std::vector<EVT> VTs;
Owen Anderson43dbe052008-01-07 01:35:02 +00002843 const TargetRegisterClass *DstRC = 0;
Chris Lattner349c4952008-01-07 03:13:06 +00002844 if (TID.getNumDefs() > 0) {
Chris Lattnercb778a82009-07-29 21:10:12 +00002845 DstRC = TID.OpInfo[0].getRegClass(&RI);
Owen Anderson43dbe052008-01-07 01:35:02 +00002846 VTs.push_back(*DstRC->vt_begin());
2847 }
2848 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
Owen Andersone50ed302009-08-10 22:56:29 +00002849 EVT VT = N->getValueType(i);
Owen Anderson825b72b2009-08-11 20:47:22 +00002850 if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs())
Owen Anderson43dbe052008-01-07 01:35:02 +00002851 VTs.push_back(VT);
2852 }
2853 if (Load)
Dan Gohman475871a2008-07-27 21:46:04 +00002854 BeforeOps.push_back(SDValue(Load, 0));
Owen Anderson43dbe052008-01-07 01:35:02 +00002855 std::copy(AfterOps.begin(), AfterOps.end(), std::back_inserter(BeforeOps));
Dan Gohman602b0c82009-09-25 18:54:59 +00002856 SDNode *NewNode= DAG.getMachineNode(Opc, dl, VTs, &BeforeOps[0],
2857 BeforeOps.size());
Owen Anderson43dbe052008-01-07 01:35:02 +00002858 NewNodes.push_back(NewNode);
2859
2860 // Emit the store instruction.
2861 if (FoldedStore) {
2862 AddrOps.pop_back();
Dan Gohman475871a2008-07-27 21:46:04 +00002863 AddrOps.push_back(SDValue(NewNode, 0));
Owen Anderson43dbe052008-01-07 01:35:02 +00002864 AddrOps.push_back(Chain);
Evan Cheng600c0432009-11-16 21:56:03 +00002865 std::pair<MachineInstr::mmo_iterator,
2866 MachineInstr::mmo_iterator> MMOs =
2867 MF.extractStoreMemRefs(cast<MachineSDNode>(N)->memoperands_begin(),
2868 cast<MachineSDNode>(N)->memoperands_end());
Evan Cheng98ec91e2010-07-02 20:36:18 +00002869 if (!(*MMOs.first) &&
2870 RC == &X86::VR128RegClass &&
2871 !TM.getSubtarget<X86Subtarget>().isUnalignedMemAccessFast())
2872 // Do not introduce a slow unaligned store.
2873 return false;
2874 bool isAligned = (*MMOs.first) && (*MMOs.first)->getAlignment() >= 16;
Dan Gohman602b0c82009-09-25 18:54:59 +00002875 SDNode *Store = DAG.getMachineNode(getStoreRegOpcode(0, DstRC,
2876 isAligned, TM),
2877 dl, MVT::Other,
2878 &AddrOps[0], AddrOps.size());
Owen Anderson43dbe052008-01-07 01:35:02 +00002879 NewNodes.push_back(Store);
Dan Gohman91e69c32009-10-09 18:10:05 +00002880
2881 // Preserve memory reference information.
Dan Gohman91e69c32009-10-09 18:10:05 +00002882 cast<MachineSDNode>(Load)->setMemRefs(MMOs.first, MMOs.second);
Owen Anderson43dbe052008-01-07 01:35:02 +00002883 }
2884
2885 return true;
2886}
2887
2888unsigned X86InstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
Dan Gohman0115e162009-10-30 22:18:41 +00002889 bool UnfoldLoad, bool UnfoldStore,
2890 unsigned *LoadRegIndex) const {
Chris Lattner45a1cb22010-10-07 23:08:41 +00002891 DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
2892 MemOp2RegOpTable.find(Opc);
Owen Anderson43dbe052008-01-07 01:35:02 +00002893 if (I == MemOp2RegOpTable.end())
2894 return 0;
2895 bool FoldedLoad = I->second.second & (1 << 4);
2896 bool FoldedStore = I->second.second & (1 << 5);
2897 if (UnfoldLoad && !FoldedLoad)
2898 return 0;
2899 if (UnfoldStore && !FoldedStore)
2900 return 0;
Dan Gohman0115e162009-10-30 22:18:41 +00002901 if (LoadRegIndex)
2902 *LoadRegIndex = I->second.second & 0xf;
Owen Anderson43dbe052008-01-07 01:35:02 +00002903 return I->second.first;
2904}
2905
Evan Cheng96dc1152010-01-22 03:34:51 +00002906bool
2907X86InstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2,
2908 int64_t &Offset1, int64_t &Offset2) const {
2909 if (!Load1->isMachineOpcode() || !Load2->isMachineOpcode())
2910 return false;
2911 unsigned Opc1 = Load1->getMachineOpcode();
2912 unsigned Opc2 = Load2->getMachineOpcode();
2913 switch (Opc1) {
2914 default: return false;
2915 case X86::MOV8rm:
2916 case X86::MOV16rm:
2917 case X86::MOV32rm:
2918 case X86::MOV64rm:
2919 case X86::LD_Fp32m:
2920 case X86::LD_Fp64m:
2921 case X86::LD_Fp80m:
2922 case X86::MOVSSrm:
2923 case X86::MOVSDrm:
2924 case X86::MMX_MOVD64rm:
2925 case X86::MMX_MOVQ64rm:
2926 case X86::FsMOVAPSrm:
2927 case X86::FsMOVAPDrm:
2928 case X86::MOVAPSrm:
2929 case X86::MOVUPSrm:
2930 case X86::MOVUPSrm_Int:
2931 case X86::MOVAPDrm:
2932 case X86::MOVDQArm:
2933 case X86::MOVDQUrm:
2934 case X86::MOVDQUrm_Int:
2935 break;
2936 }
2937 switch (Opc2) {
2938 default: return false;
2939 case X86::MOV8rm:
2940 case X86::MOV16rm:
2941 case X86::MOV32rm:
2942 case X86::MOV64rm:
2943 case X86::LD_Fp32m:
2944 case X86::LD_Fp64m:
2945 case X86::LD_Fp80m:
2946 case X86::MOVSSrm:
2947 case X86::MOVSDrm:
2948 case X86::MMX_MOVD64rm:
2949 case X86::MMX_MOVQ64rm:
2950 case X86::FsMOVAPSrm:
2951 case X86::FsMOVAPDrm:
2952 case X86::MOVAPSrm:
2953 case X86::MOVUPSrm:
2954 case X86::MOVUPSrm_Int:
2955 case X86::MOVAPDrm:
2956 case X86::MOVDQArm:
2957 case X86::MOVDQUrm:
2958 case X86::MOVDQUrm_Int:
2959 break;
2960 }
2961
2962 // Check if chain operands and base addresses match.
2963 if (Load1->getOperand(0) != Load2->getOperand(0) ||
2964 Load1->getOperand(5) != Load2->getOperand(5))
2965 return false;
2966 // Segment operands should match as well.
2967 if (Load1->getOperand(4) != Load2->getOperand(4))
2968 return false;
2969 // Scale should be 1, Index should be Reg0.
2970 if (Load1->getOperand(1) == Load2->getOperand(1) &&
2971 Load1->getOperand(2) == Load2->getOperand(2)) {
2972 if (cast<ConstantSDNode>(Load1->getOperand(1))->getZExtValue() != 1)
2973 return false;
Evan Cheng96dc1152010-01-22 03:34:51 +00002974
2975 // Now let's examine the displacements.
2976 if (isa<ConstantSDNode>(Load1->getOperand(3)) &&
2977 isa<ConstantSDNode>(Load2->getOperand(3))) {
2978 Offset1 = cast<ConstantSDNode>(Load1->getOperand(3))->getSExtValue();
2979 Offset2 = cast<ConstantSDNode>(Load2->getOperand(3))->getSExtValue();
2980 return true;
2981 }
2982 }
2983 return false;
2984}
2985
2986bool X86InstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
2987 int64_t Offset1, int64_t Offset2,
2988 unsigned NumLoads) const {
2989 assert(Offset2 > Offset1);
2990 if ((Offset2 - Offset1) / 8 > 64)
2991 return false;
2992
2993 unsigned Opc1 = Load1->getMachineOpcode();
2994 unsigned Opc2 = Load2->getMachineOpcode();
2995 if (Opc1 != Opc2)
2996 return false; // FIXME: overly conservative?
2997
2998 switch (Opc1) {
2999 default: break;
3000 case X86::LD_Fp32m:
3001 case X86::LD_Fp64m:
3002 case X86::LD_Fp80m:
3003 case X86::MMX_MOVD64rm:
3004 case X86::MMX_MOVQ64rm:
3005 return false;
3006 }
3007
3008 EVT VT = Load1->getValueType(0);
3009 switch (VT.getSimpleVT().SimpleTy) {
Bill Wendling19d85972010-06-22 22:16:17 +00003010 default:
Evan Cheng96dc1152010-01-22 03:34:51 +00003011 // XMM registers. In 64-bit mode we can be a bit more aggressive since we
3012 // have 16 of them to play with.
3013 if (TM.getSubtargetImpl()->is64Bit()) {
3014 if (NumLoads >= 3)
3015 return false;
Bill Wendling19d85972010-06-22 22:16:17 +00003016 } else if (NumLoads) {
Evan Cheng96dc1152010-01-22 03:34:51 +00003017 return false;
Bill Wendling19d85972010-06-22 22:16:17 +00003018 }
Evan Cheng96dc1152010-01-22 03:34:51 +00003019 break;
Evan Cheng96dc1152010-01-22 03:34:51 +00003020 case MVT::i8:
3021 case MVT::i16:
3022 case MVT::i32:
3023 case MVT::i64:
Evan Chengafc36732010-01-22 23:49:11 +00003024 case MVT::f32:
3025 case MVT::f64:
Evan Cheng96dc1152010-01-22 03:34:51 +00003026 if (NumLoads)
3027 return false;
Bill Wendling19d85972010-06-22 22:16:17 +00003028 break;
Evan Cheng96dc1152010-01-22 03:34:51 +00003029 }
3030
3031 return true;
3032}
3033
3034
Chris Lattner7fbe9722006-10-20 17:42:20 +00003035bool X86InstrInfo::
Owen Anderson44eb65c2008-08-14 22:49:33 +00003036ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
Chris Lattner9cd68752006-10-21 05:52:40 +00003037 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
Evan Cheng97af60b2008-08-29 23:21:31 +00003038 X86::CondCode CC = static_cast<X86::CondCode>(Cond[0].getImm());
Dan Gohman279c22e2008-10-21 03:29:32 +00003039 if (CC == X86::COND_NE_OR_P || CC == X86::COND_NP_OR_E)
3040 return true;
Evan Cheng97af60b2008-08-29 23:21:31 +00003041 Cond[0].setImm(GetOppositeBranchCondition(CC));
Chris Lattner9cd68752006-10-21 05:52:40 +00003042 return false;
Chris Lattner7fbe9722006-10-20 17:42:20 +00003043}
3044
Evan Cheng23066282008-10-27 07:14:50 +00003045bool X86InstrInfo::
Evan Cheng4350eb82009-02-06 17:17:30 +00003046isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
3047 // FIXME: Return false for x87 stack register classes for now. We can't
Evan Cheng23066282008-10-27 07:14:50 +00003048 // allow any loads of these registers before FpGet_ST0_80.
Evan Cheng4350eb82009-02-06 17:17:30 +00003049 return !(RC == &X86::CCRRegClass || RC == &X86::RFP32RegClass ||
3050 RC == &X86::RFP64RegClass || RC == &X86::RFP80RegClass);
Evan Cheng23066282008-10-27 07:14:50 +00003051}
3052
Nicolas Geoffray52e724a2008-04-16 20:10:13 +00003053
Chris Lattner39a612e2010-02-05 22:10:22 +00003054/// isX86_64ExtendedReg - Is the MachineOperand a x86-64 extended (r8 or higher)
3055/// register? e.g. r8, xmm8, xmm13, etc.
3056bool X86InstrInfo::isX86_64ExtendedReg(unsigned RegNo) {
3057 switch (RegNo) {
Nicolas Geoffray52e724a2008-04-16 20:10:13 +00003058 default: break;
3059 case X86::R8: case X86::R9: case X86::R10: case X86::R11:
3060 case X86::R12: case X86::R13: case X86::R14: case X86::R15:
3061 case X86::R8D: case X86::R9D: case X86::R10D: case X86::R11D:
3062 case X86::R12D: case X86::R13D: case X86::R14D: case X86::R15D:
3063 case X86::R8W: case X86::R9W: case X86::R10W: case X86::R11W:
3064 case X86::R12W: case X86::R13W: case X86::R14W: case X86::R15W:
3065 case X86::R8B: case X86::R9B: case X86::R10B: case X86::R11B:
3066 case X86::R12B: case X86::R13B: case X86::R14B: case X86::R15B:
3067 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
3068 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
Bruno Cardoso Lopese86b01c2010-07-09 18:27:43 +00003069 case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
3070 case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
Chris Lattnerbc57c6d2010-09-22 05:29:50 +00003071 case X86::CR8: case X86::CR9: case X86::CR10: case X86::CR11:
3072 case X86::CR12: case X86::CR13: case X86::CR14: case X86::CR15:
Nicolas Geoffray52e724a2008-04-16 20:10:13 +00003073 return true;
3074 }
3075 return false;
3076}
3077
Dan Gohman57c3dac2008-09-30 00:58:23 +00003078/// getGlobalBaseReg - Return a virtual register initialized with the
3079/// the global base register value. Output instructions required to
3080/// initialize the register in the function entry block, if necessary.
Dan Gohman8b746962008-09-23 18:22:58 +00003081///
Dan Gohman84023e02010-07-10 09:00:22 +00003082/// TODO: Eliminate this and move the code to X86MachineFunctionInfo.
3083///
Dan Gohman57c3dac2008-09-30 00:58:23 +00003084unsigned X86InstrInfo::getGlobalBaseReg(MachineFunction *MF) const {
3085 assert(!TM.getSubtarget<X86Subtarget>().is64Bit() &&
3086 "X86-64 PIC uses RIP relative addressing");
3087
3088 X86MachineFunctionInfo *X86FI = MF->getInfo<X86MachineFunctionInfo>();
3089 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
3090 if (GlobalBaseReg != 0)
3091 return GlobalBaseReg;
3092
Dan Gohman84023e02010-07-10 09:00:22 +00003093 // Create the register. The code to initialize it is inserted
3094 // later, by the CGBR pass (below).
Dan Gohman8b746962008-09-23 18:22:58 +00003095 MachineRegisterInfo &RegInfo = MF->getRegInfo();
Dan Gohman84023e02010-07-10 09:00:22 +00003096 GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
Dan Gohman57c3dac2008-09-30 00:58:23 +00003097 X86FI->setGlobalBaseReg(GlobalBaseReg);
3098 return GlobalBaseReg;
Dan Gohman8b746962008-09-23 18:22:58 +00003099}
Jakob Stoklund Olesen352aa502010-03-25 17:25:00 +00003100
Jakob Stoklund Olesene4b94b42010-03-29 23:24:21 +00003101// These are the replaceable SSE instructions. Some of these have Int variants
3102// that we don't include here. We don't want to replace instructions selected
3103// by intrinsics.
3104static const unsigned ReplaceableInstrs[][3] = {
Bruno Cardoso Lopes4d043622010-08-12 02:08:52 +00003105 //PackedSingle PackedDouble PackedInt
Jakob Stoklund Olesen357be7f2010-03-30 22:46:53 +00003106 { X86::MOVAPSmr, X86::MOVAPDmr, X86::MOVDQAmr },
3107 { X86::MOVAPSrm, X86::MOVAPDrm, X86::MOVDQArm },
3108 { X86::MOVAPSrr, X86::MOVAPDrr, X86::MOVDQArr },
3109 { X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
3110 { X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
3111 { X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
3112 { X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
3113 { X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
3114 { X86::ANDPSrm, X86::ANDPDrm, X86::PANDrm },
3115 { X86::ANDPSrr, X86::ANDPDrr, X86::PANDrr },
3116 { X86::ORPSrm, X86::ORPDrm, X86::PORrm },
3117 { X86::ORPSrr, X86::ORPDrr, X86::PORrr },
Jakob Stoklund Olesend363b4e2010-03-31 00:40:13 +00003118 { X86::V_SET0PS, X86::V_SET0PD, X86::V_SET0PI },
Jakob Stoklund Olesen357be7f2010-03-30 22:46:53 +00003119 { X86::XORPSrm, X86::XORPDrm, X86::PXORrm },
3120 { X86::XORPSrr, X86::XORPDrr, X86::PXORrr },
Bruno Cardoso Lopes642eb022010-08-12 20:20:53 +00003121 // AVX 128-bit support
3122 { X86::VMOVAPSmr, X86::VMOVAPDmr, X86::VMOVDQAmr },
3123 { X86::VMOVAPSrm, X86::VMOVAPDrm, X86::VMOVDQArm },
3124 { X86::VMOVAPSrr, X86::VMOVAPDrr, X86::VMOVDQArr },
3125 { X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
3126 { X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
3127 { X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
3128 { X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
3129 { X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
3130 { X86::VANDPSrm, X86::VANDPDrm, X86::VPANDrm },
3131 { X86::VANDPSrr, X86::VANDPDrr, X86::VPANDrr },
3132 { X86::VORPSrm, X86::VORPDrm, X86::VPORrm },
3133 { X86::VORPSrr, X86::VORPDrr, X86::VPORrr },
3134 { X86::AVX_SET0PS, X86::AVX_SET0PD, X86::AVX_SET0PI },
3135 { X86::VXORPSrm, X86::VXORPDrm, X86::VPXORrm },
3136 { X86::VXORPSrr, X86::VXORPDrr, X86::VPXORrr },
Jakob Stoklund Olesene4b94b42010-03-29 23:24:21 +00003137};
Jakob Stoklund Olesen352aa502010-03-25 17:25:00 +00003138
Jakob Stoklund Olesene4b94b42010-03-29 23:24:21 +00003139// FIXME: Some shuffle and unpack instructions have equivalents in different
3140// domains, but they require a bit more work than just switching opcodes.
Jakob Stoklund Olesen352aa502010-03-25 17:25:00 +00003141
Jakob Stoklund Olesene4b94b42010-03-29 23:24:21 +00003142static const unsigned *lookup(unsigned opcode, unsigned domain) {
Jakob Stoklund Olesen352aa502010-03-25 17:25:00 +00003143 for (unsigned i = 0, e = array_lengthof(ReplaceableInstrs); i != e; ++i)
Jakob Stoklund Olesene4b94b42010-03-29 23:24:21 +00003144 if (ReplaceableInstrs[i][domain-1] == opcode)
3145 return ReplaceableInstrs[i];
3146 return 0;
3147}
3148
3149std::pair<uint16_t, uint16_t>
3150X86InstrInfo::GetSSEDomain(const MachineInstr *MI) const {
3151 uint16_t domain = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
Jakob Stoklund Olesen357be7f2010-03-30 22:46:53 +00003152 return std::make_pair(domain,
3153 domain && lookup(MI->getOpcode(), domain) ? 0xe : 0);
Jakob Stoklund Olesene4b94b42010-03-29 23:24:21 +00003154}
3155
3156void X86InstrInfo::SetSSEDomain(MachineInstr *MI, unsigned Domain) const {
3157 assert(Domain>0 && Domain<4 && "Invalid execution domain");
3158 uint16_t dom = (MI->getDesc().TSFlags >> X86II::SSEDomainShift) & 3;
3159 assert(dom && "Not an SSE instruction");
3160 const unsigned *table = lookup(MI->getOpcode(), dom);
3161 assert(table && "Cannot change domain");
3162 MI->setDesc(get(table[Domain-1]));
Jakob Stoklund Olesen352aa502010-03-25 17:25:00 +00003163}
Chris Lattneree9eb412010-04-26 23:37:21 +00003164
3165/// getNoopForMachoTarget - Return the noop instruction to use for a noop.
3166void X86InstrInfo::getNoopForMachoTarget(MCInst &NopInst) const {
3167 NopInst.setOpcode(X86::NOOP);
3168}
Dan Gohman84023e02010-07-10 09:00:22 +00003169
3170namespace {
3171 /// CGBR - Create Global Base Reg pass. This initializes the PIC
3172 /// global base register for x86-32.
3173 struct CGBR : public MachineFunctionPass {
3174 static char ID;
Owen Anderson90c579d2010-08-06 18:33:48 +00003175 CGBR() : MachineFunctionPass(ID) {}
Dan Gohman84023e02010-07-10 09:00:22 +00003176
3177 virtual bool runOnMachineFunction(MachineFunction &MF) {
3178 const X86TargetMachine *TM =
3179 static_cast<const X86TargetMachine *>(&MF.getTarget());
3180
3181 assert(!TM->getSubtarget<X86Subtarget>().is64Bit() &&
3182 "X86-64 PIC uses RIP relative addressing");
3183
3184 // Only emit a global base reg in PIC mode.
3185 if (TM->getRelocationModel() != Reloc::PIC_)
3186 return false;
3187
Dan Gohmand8c0a512010-09-17 20:24:24 +00003188 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
3189 unsigned GlobalBaseReg = X86FI->getGlobalBaseReg();
3190
3191 // If we didn't need a GlobalBaseReg, don't insert code.
3192 if (GlobalBaseReg == 0)
3193 return false;
3194
Dan Gohman84023e02010-07-10 09:00:22 +00003195 // Insert the set of GlobalBaseReg into the first MBB of the function
3196 MachineBasicBlock &FirstMBB = MF.front();
3197 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
3198 DebugLoc DL = FirstMBB.findDebugLoc(MBBI);
3199 MachineRegisterInfo &RegInfo = MF.getRegInfo();
3200 const X86InstrInfo *TII = TM->getInstrInfo();
3201
3202 unsigned PC;
3203 if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT())
3204 PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
3205 else
Dan Gohmand8c0a512010-09-17 20:24:24 +00003206 PC = GlobalBaseReg;
Dan Gohman84023e02010-07-10 09:00:22 +00003207
3208 // Operand of MovePCtoStack is completely ignored by asm printer. It's
3209 // only used in JIT code emission as displacement to pc.
3210 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::MOVPC32r), PC).addImm(0);
3211
3212 // If we're using vanilla 'GOT' PIC style, we should use relative addressing
3213 // not to pc, but to _GLOBAL_OFFSET_TABLE_ external.
3214 if (TM->getSubtarget<X86Subtarget>().isPICStyleGOT()) {
Dan Gohman84023e02010-07-10 09:00:22 +00003215 // Generate addl $__GLOBAL_OFFSET_TABLE_ + [.-piclabel], %some_register
3216 BuildMI(FirstMBB, MBBI, DL, TII->get(X86::ADD32ri), GlobalBaseReg)
3217 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_",
3218 X86II::MO_GOT_ABSOLUTE_ADDRESS);
3219 }
3220
3221 return true;
3222 }
3223
3224 virtual const char *getPassName() const {
3225 return "X86 PIC Global Base Reg Initialization";
3226 }
3227
3228 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
3229 AU.setPreservesCFG();
3230 MachineFunctionPass::getAnalysisUsage(AU);
3231 }
3232 };
3233}
3234
3235char CGBR::ID = 0;
3236FunctionPass*
3237llvm::createGlobalBaseRegPass() { return new CGBR(); }