blob: ede88f28ffefab497040df469bbff89f958e9e86 [file] [log] [blame]
Misha Brukmana85d6bc2002-11-22 22:42:50 +00001//===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
Misha Brukman0e0a7a452005-04-21 23:38:14 +00002//
John Criswellb576c942003-10-20 19:43:21 +00003// The LLVM Compiler Infrastructure
4//
Chris Lattner4ee451d2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Misha Brukman0e0a7a452005-04-21 23:38:14 +00007//
John Criswellb576c942003-10-20 19:43:21 +00008//===----------------------------------------------------------------------===//
Chris Lattner72614082002-10-25 22:55:53 +00009//
Chris Lattner3501fea2003-01-14 22:00:31 +000010// This file contains the X86 implementation of the TargetInstrInfo class.
Chris Lattner72614082002-10-25 22:55:53 +000011//
12//===----------------------------------------------------------------------===//
13
Chris Lattner055c9652002-10-29 21:05:24 +000014#include "X86InstrInfo.h"
Chris Lattner4ce42a72002-12-03 05:42:53 +000015#include "X86.h"
Chris Lattnerabf05b22003-08-03 21:55:55 +000016#include "X86GenInstrInfo.inc"
Evan Chengaa3c1412006-05-30 21:45:53 +000017#include "X86InstrBuilder.h"
Owen Andersond94b6a12008-01-04 23:57:37 +000018#include "X86MachineFunctionInfo.h"
Evan Chengaa3c1412006-05-30 21:45:53 +000019#include "X86Subtarget.h"
20#include "X86TargetMachine.h"
Owen Anderson718cb662007-09-07 04:06:50 +000021#include "llvm/ADT/STLExtras.h"
Owen Andersond94b6a12008-01-04 23:57:37 +000022#include "llvm/CodeGen/MachineFrameInfo.h"
Evan Chengaa3c1412006-05-30 21:45:53 +000023#include "llvm/CodeGen/MachineInstrBuilder.h"
Chris Lattner84bc5422007-12-31 04:13:23 +000024#include "llvm/CodeGen/MachineRegisterInfo.h"
Evan Cheng258ff672006-12-01 21:52:41 +000025#include "llvm/CodeGen/LiveVariables.h"
Evan Cheng0488db92007-09-25 01:57:46 +000026#include "llvm/Target/TargetOptions.h"
Brian Gaeked0fde302003-11-11 22:41:34 +000027using namespace llvm;
28
Evan Chengaa3c1412006-05-30 21:45:53 +000029X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
Chris Lattner64105522008-01-01 01:03:04 +000030 : TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
Evan Cheng25ab6902006-09-08 06:48:29 +000031 TM(tm), RI(tm, *this) {
Chris Lattner72614082002-10-25 22:55:53 +000032}
33
Alkis Evlogimenos5e300022003-12-28 17:35:08 +000034bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
35 unsigned& sourceReg,
36 unsigned& destReg) const {
37 MachineOpCode oc = MI.getOpcode();
Evan Cheng25ab6902006-09-08 06:48:29 +000038 if (oc == X86::MOV8rr || oc == X86::MOV16rr ||
39 oc == X86::MOV32rr || oc == X86::MOV64rr ||
Evan Cheng403be7e2006-05-08 08:01:26 +000040 oc == X86::MOV16to16_ || oc == X86::MOV32to32_ ||
Dale Johannesene377d4d2007-07-04 21:07:47 +000041 oc == X86::MOV_Fp3232 || oc == X86::MOVSSrr || oc == X86::MOVSDrr ||
42 oc == X86::MOV_Fp3264 || oc == X86::MOV_Fp6432 || oc == X86::MOV_Fp6464 ||
Evan Chengfe5cb192006-02-16 22:45:17 +000043 oc == X86::FsMOVAPSrr || oc == X86::FsMOVAPDrr ||
Evan Cheng82521dd2006-03-21 07:09:35 +000044 oc == X86::MOVAPSrr || oc == X86::MOVAPDrr ||
Evan Cheng11e15b32006-04-03 20:53:28 +000045 oc == X86::MOVSS2PSrr || oc == X86::MOVSD2PDrr ||
Bill Wendling2f88dcd2007-03-08 22:09:11 +000046 oc == X86::MOVPS2SSrr || oc == X86::MOVPD2SDrr ||
Bill Wendling6dd29e02007-04-24 21:17:46 +000047 oc == X86::MMX_MOVD64rr || oc == X86::MMX_MOVQ64rr) {
Evan Cheng1e3417292007-04-25 07:12:14 +000048 assert(MI.getNumOperands() >= 2 &&
Alkis Evlogimenos5e300022003-12-28 17:35:08 +000049 MI.getOperand(0).isRegister() &&
50 MI.getOperand(1).isRegister() &&
51 "invalid register-register move instruction");
Alkis Evlogimenosbe766c72004-02-13 21:01:20 +000052 sourceReg = MI.getOperand(1).getReg();
53 destReg = MI.getOperand(0).getReg();
Alkis Evlogimenos5e300022003-12-28 17:35:08 +000054 return true;
55 }
56 return false;
57}
Alkis Evlogimenos36f506e2004-07-31 09:38:47 +000058
Chris Lattner40839602006-02-02 20:12:32 +000059unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI,
60 int &FrameIndex) const {
61 switch (MI->getOpcode()) {
62 default: break;
63 case X86::MOV8rm:
64 case X86::MOV16rm:
Evan Chengf4df6802006-05-11 07:33:49 +000065 case X86::MOV16_rm:
Chris Lattner40839602006-02-02 20:12:32 +000066 case X86::MOV32rm:
Evan Chengf4df6802006-05-11 07:33:49 +000067 case X86::MOV32_rm:
Evan Cheng25ab6902006-09-08 06:48:29 +000068 case X86::MOV64rm:
Dale Johannesene377d4d2007-07-04 21:07:47 +000069 case X86::LD_Fp64m:
Chris Lattner40839602006-02-02 20:12:32 +000070 case X86::MOVSSrm:
71 case X86::MOVSDrm:
Chris Lattner993c8972006-04-18 16:44:51 +000072 case X86::MOVAPSrm:
73 case X86::MOVAPDrm:
Bill Wendling823efee2007-04-03 06:00:37 +000074 case X86::MMX_MOVD64rm:
75 case X86::MMX_MOVQ64rm:
Chris Lattner8aa797a2007-12-30 23:10:15 +000076 if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
77 MI->getOperand(3).isReg() && MI->getOperand(4).isImm() &&
Chris Lattner9a1ceae2007-12-30 20:49:49 +000078 MI->getOperand(2).getImm() == 1 &&
Chris Lattner40839602006-02-02 20:12:32 +000079 MI->getOperand(3).getReg() == 0 &&
Chris Lattner9a1ceae2007-12-30 20:49:49 +000080 MI->getOperand(4).getImm() == 0) {
Chris Lattner8aa797a2007-12-30 23:10:15 +000081 FrameIndex = MI->getOperand(1).getIndex();
Chris Lattner40839602006-02-02 20:12:32 +000082 return MI->getOperand(0).getReg();
83 }
84 break;
85 }
86 return 0;
87}
88
89unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI,
90 int &FrameIndex) const {
91 switch (MI->getOpcode()) {
92 default: break;
93 case X86::MOV8mr:
94 case X86::MOV16mr:
Evan Chengf4df6802006-05-11 07:33:49 +000095 case X86::MOV16_mr:
Chris Lattner40839602006-02-02 20:12:32 +000096 case X86::MOV32mr:
Evan Chengf4df6802006-05-11 07:33:49 +000097 case X86::MOV32_mr:
Evan Cheng25ab6902006-09-08 06:48:29 +000098 case X86::MOV64mr:
Dale Johannesene377d4d2007-07-04 21:07:47 +000099 case X86::ST_FpP64m:
Chris Lattner40839602006-02-02 20:12:32 +0000100 case X86::MOVSSmr:
101 case X86::MOVSDmr:
Chris Lattner993c8972006-04-18 16:44:51 +0000102 case X86::MOVAPSmr:
103 case X86::MOVAPDmr:
Bill Wendling823efee2007-04-03 06:00:37 +0000104 case X86::MMX_MOVD64mr:
105 case X86::MMX_MOVQ64mr:
Bill Wendling71bfd112007-04-03 23:48:32 +0000106 case X86::MMX_MOVNTQmr:
Chris Lattner8aa797a2007-12-30 23:10:15 +0000107 if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
108 MI->getOperand(2).isReg() && MI->getOperand(3).isImm() &&
Chris Lattner9a1ceae2007-12-30 20:49:49 +0000109 MI->getOperand(1).getImm() == 1 &&
Chris Lattner1c07e722006-02-02 20:38:12 +0000110 MI->getOperand(2).getReg() == 0 &&
Chris Lattner9a1ceae2007-12-30 20:49:49 +0000111 MI->getOperand(3).getImm() == 0) {
Chris Lattner8aa797a2007-12-30 23:10:15 +0000112 FrameIndex = MI->getOperand(0).getIndex();
Chris Lattner40839602006-02-02 20:12:32 +0000113 return MI->getOperand(4).getReg();
114 }
115 break;
116 }
117 return 0;
118}
119
120
Bill Wendling041b3f82007-12-08 23:58:46 +0000121bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const {
Dan Gohmanc101e952007-06-14 20:50:44 +0000122 switch (MI->getOpcode()) {
123 default: break;
124 case X86::MOV8rm:
125 case X86::MOV16rm:
126 case X86::MOV16_rm:
127 case X86::MOV32rm:
128 case X86::MOV32_rm:
129 case X86::MOV64rm:
Dale Johannesene377d4d2007-07-04 21:07:47 +0000130 case X86::LD_Fp64m:
Dan Gohmanc101e952007-06-14 20:50:44 +0000131 case X86::MOVSSrm:
132 case X86::MOVSDrm:
133 case X86::MOVAPSrm:
134 case X86::MOVAPDrm:
135 case X86::MMX_MOVD64rm:
136 case X86::MMX_MOVQ64rm:
Dan Gohman82a87a02007-06-19 01:48:05 +0000137 // Loads from constant pools are trivially rematerializable.
Chris Lattner3b5a2212008-01-05 05:28:30 +0000138 if (MI->getOperand(1).isReg() && MI->getOperand(2).isImm() &&
139 MI->getOperand(3).isReg() && MI->getOperand(4).isCPI() &&
140 MI->getOperand(1).getReg() == 0 &&
141 MI->getOperand(2).getImm() == 1 &&
142 MI->getOperand(3).getReg() == 0)
143 return true;
Chris Lattnerf29495a2008-01-05 06:10:42 +0000144
145 // If this is a load from a fixed argument slot, we know the value is
146 // invariant across the whole function, because we don't redefine argument
147 // values.
148#if 0
149 // FIXME: This is disabled due to a remat bug. rdar://5671644
150 MachineFunction *MF = MI->getParent()->getParent();
151 if (MI->getOperand(1).isFI() &&
152 MF->getFrameInfo()->isFixedObjectIndex(MI->getOperand(1).getIndex()))
153 return true;
154#endif
155
Chris Lattner3b5a2212008-01-05 05:28:30 +0000156 return false;
Dan Gohmanc101e952007-06-14 20:50:44 +0000157 }
Dan Gohmand45eddd2007-06-26 00:48:07 +0000158 // All other instructions marked M_REMATERIALIZABLE are always trivially
159 // rematerializable.
160 return true;
Dan Gohmanc101e952007-06-14 20:50:44 +0000161}
162
Bill Wendling627c00b2007-12-17 23:07:56 +0000163/// isReallySideEffectFree - If the M_MAY_HAVE_SIDE_EFFECTS flag is set, this
164/// method is called to determine if the specific instance of this instruction
165/// has side effects. This is useful in cases of instructions, like loads, which
166/// generally always have side effects. A load from a constant pool doesn't have
167/// side effects, though. So we need to differentiate it from the general case.
168bool X86InstrInfo::isReallySideEffectFree(MachineInstr *MI) const {
169 switch (MI->getOpcode()) {
170 default: break;
Bill Wendling6259d512007-12-30 03:18:58 +0000171 case X86::MOV32rm:
172 if (MI->getOperand(1).isRegister()) {
173 unsigned Reg = MI->getOperand(1).getReg();
174
175 // Loads from global addresses which aren't redefined in the function are
176 // side effect free.
Bill Wendling3100afa2008-01-02 21:10:40 +0000177 if (Reg != 0 && MRegisterInfo::isVirtualRegister(Reg) &&
Chris Lattner3b5a2212008-01-05 05:28:30 +0000178 MI->getOperand(2).isImm() && MI->getOperand(3).isReg() &&
179 MI->getOperand(4).isGlobal() && MI->getOperand(2).getImm() == 1 &&
Bill Wendling6259d512007-12-30 03:18:58 +0000180 MI->getOperand(3).getReg() == 0)
181 return true;
182 }
Chris Lattnera83b34b2008-01-05 05:26:26 +0000183 // FALLTHROUGH
184 case X86::MOV8rm:
185 case X86::MOV16rm:
186 case X86::MOV16_rm:
187 case X86::MOV32_rm:
188 case X86::MOV64rm:
189 case X86::LD_Fp64m:
190 case X86::MOVSSrm:
191 case X86::MOVSDrm:
192 case X86::MOVAPSrm:
193 case X86::MOVAPDrm:
194 case X86::MMX_MOVD64rm:
195 case X86::MMX_MOVQ64rm:
Chris Lattner3b5a2212008-01-05 05:28:30 +0000196 // Loads from constant pools are trivially rematerializable.
197 if (MI->getOperand(1).isReg() && MI->getOperand(2).isImm() &&
198 MI->getOperand(3).isReg() && MI->getOperand(4).isCPI() &&
199 MI->getOperand(1).getReg() == 0 &&
200 MI->getOperand(2).getImm() == 1 &&
201 MI->getOperand(3).getReg() == 0)
202 return true;
Chris Lattnerf29495a2008-01-05 06:10:42 +0000203
204 // If this is a load from a fixed argument slot, we know the value is
205 // invariant across the whole function, because we don't redefine argument
206 // values.
207 MachineFunction *MF = MI->getParent()->getParent();
208 if (MI->getOperand(1).isFI() &&
209 MF->getFrameInfo()->isFixedObjectIndex(MI->getOperand(1).getIndex()))
210 return true;
211
Chris Lattner3b5a2212008-01-05 05:28:30 +0000212 return false;
Bill Wendling627c00b2007-12-17 23:07:56 +0000213 }
214
Chris Lattnera83b34b2008-01-05 05:26:26 +0000215 // All other instances of these instructions are presumed to have side
216 // effects.
217 return false;
Bill Wendling627c00b2007-12-17 23:07:56 +0000218}
219
Evan Cheng3f411c72007-10-05 08:04:01 +0000220/// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
221/// is not marked dead.
222static bool hasLiveCondCodeDef(MachineInstr *MI) {
Evan Cheng3f411c72007-10-05 08:04:01 +0000223 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
224 MachineOperand &MO = MI->getOperand(i);
225 if (MO.isRegister() && MO.isDef() &&
226 MO.getReg() == X86::EFLAGS && !MO.isDead()) {
227 return true;
228 }
229 }
230 return false;
231}
232
Chris Lattnerbcea4d62005-01-02 02:37:07 +0000233/// convertToThreeAddress - This method must be implemented by targets that
234/// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
235/// may be able to convert a two-address instruction into a true
236/// three-address instruction on demand. This allows the X86 target (for
237/// example) to convert ADD and SHL instructions into LEA instructions if they
238/// would require register copies due to two-addressness.
239///
240/// This method returns a null pointer if the transformation cannot be
241/// performed, otherwise it returns the new instruction.
242///
Evan Cheng258ff672006-12-01 21:52:41 +0000243MachineInstr *
244X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
245 MachineBasicBlock::iterator &MBBI,
246 LiveVariables &LV) const {
247 MachineInstr *MI = MBBI;
Chris Lattnerbcea4d62005-01-02 02:37:07 +0000248 // All instructions input are two-addr instructions. Get the known operands.
249 unsigned Dest = MI->getOperand(0).getReg();
250 unsigned Src = MI->getOperand(1).getReg();
251
Evan Cheng6ce7dc22006-11-15 20:58:11 +0000252 MachineInstr *NewMI = NULL;
Evan Cheng258ff672006-12-01 21:52:41 +0000253 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000254 // we have better subtarget support, enable the 16-bit LEA generation here.
Evan Cheng258ff672006-12-01 21:52:41 +0000255 bool DisableLEA16 = true;
256
Evan Cheng559dc462007-10-05 20:34:26 +0000257 unsigned MIOpc = MI->getOpcode();
258 switch (MIOpc) {
Evan Chengccba76b2006-05-30 20:26:50 +0000259 case X86::SHUFPSrri: {
260 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000261 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
262
Evan Chengaa3c1412006-05-30 21:45:53 +0000263 unsigned A = MI->getOperand(0).getReg();
264 unsigned B = MI->getOperand(1).getReg();
265 unsigned C = MI->getOperand(2).getReg();
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000266 unsigned M = MI->getOperand(3).getImm();
267 if (B != C) return 0;
Evan Chengc0f64ff2006-11-27 23:37:22 +0000268 NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M);
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000269 break;
270 }
Chris Lattner995f5502007-03-28 18:12:31 +0000271 case X86::SHL64ri: {
Evan Cheng24f2ea32007-09-14 21:48:26 +0000272 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
Chris Lattner995f5502007-03-28 18:12:31 +0000273 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
274 // the flags produced by a shift yet, so this is safe.
275 unsigned Dest = MI->getOperand(0).getReg();
276 unsigned Src = MI->getOperand(1).getReg();
277 unsigned ShAmt = MI->getOperand(2).getImm();
278 if (ShAmt == 0 || ShAmt >= 4) return 0;
279
280 NewMI = BuildMI(get(X86::LEA64r), Dest)
281 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
282 break;
283 }
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000284 case X86::SHL32ri: {
Evan Cheng24f2ea32007-09-14 21:48:26 +0000285 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000286 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
287 // the flags produced by a shift yet, so this is safe.
288 unsigned Dest = MI->getOperand(0).getReg();
289 unsigned Src = MI->getOperand(1).getReg();
290 unsigned ShAmt = MI->getOperand(2).getImm();
291 if (ShAmt == 0 || ShAmt >= 4) return 0;
292
Chris Lattnerf2177b82007-03-28 00:58:40 +0000293 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ?
294 X86::LEA64_32r : X86::LEA32r;
295 NewMI = BuildMI(get(Opc), Dest)
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000296 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
297 break;
298 }
299 case X86::SHL16ri: {
Evan Cheng24f2ea32007-09-14 21:48:26 +0000300 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
Evan Cheng61d9c862007-09-06 00:14:41 +0000301 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
302 // the flags produced by a shift yet, so this is safe.
303 unsigned Dest = MI->getOperand(0).getReg();
304 unsigned Src = MI->getOperand(1).getReg();
305 unsigned ShAmt = MI->getOperand(2).getImm();
306 if (ShAmt == 0 || ShAmt >= 4) return 0;
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000307
Christopher Lambb8133712007-08-10 21:18:25 +0000308 if (DisableLEA16) {
309 // If 16-bit LEA is disabled, use 32-bit LEA via subregisters.
Chris Lattner84bc5422007-12-31 04:13:23 +0000310 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
Evan Cheng61d9c862007-09-06 00:14:41 +0000311 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
312 ? X86::LEA64_32r : X86::LEA32r;
Chris Lattner84bc5422007-12-31 04:13:23 +0000313 unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
314 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
Christopher Lambb8133712007-08-10 21:18:25 +0000315
Evan Cheng61d9c862007-09-06 00:14:41 +0000316 MachineInstr *Ins =
317 BuildMI(get(X86::INSERT_SUBREG), leaInReg).addReg(Src).addImm(2);
Christopher Lambb8133712007-08-10 21:18:25 +0000318 Ins->copyKillDeadInfo(MI);
319
320 NewMI = BuildMI(get(Opc), leaOutReg)
321 .addReg(0).addImm(1 << ShAmt).addReg(leaInReg).addImm(0);
322
Evan Cheng61d9c862007-09-06 00:14:41 +0000323 MachineInstr *Ext =
324 BuildMI(get(X86::EXTRACT_SUBREG), Dest).addReg(leaOutReg).addImm(2);
Christopher Lambb8133712007-08-10 21:18:25 +0000325 Ext->copyKillDeadInfo(MI);
326
327 MFI->insert(MBBI, Ins); // Insert the insert_subreg
328 LV.instructionChanged(MI, NewMI); // Update live variables
329 LV.addVirtualRegisterKilled(leaInReg, NewMI);
330 MFI->insert(MBBI, NewMI); // Insert the new inst
331 LV.addVirtualRegisterKilled(leaOutReg, Ext);
Evan Cheng61d9c862007-09-06 00:14:41 +0000332 MFI->insert(MBBI, Ext); // Insert the extract_subreg
Christopher Lambb8133712007-08-10 21:18:25 +0000333 return Ext;
334 } else {
335 NewMI = BuildMI(get(X86::LEA16r), Dest)
336 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
337 }
Chris Lattnera16b7cb2007-03-20 06:08:29 +0000338 break;
Evan Chengccba76b2006-05-30 20:26:50 +0000339 }
Evan Cheng559dc462007-10-05 20:34:26 +0000340 default: {
341 // The following opcodes also sets the condition code register(s). Only
342 // convert them to equivalent lea if the condition code register def's
343 // are dead!
344 if (hasLiveCondCodeDef(MI))
345 return 0;
Evan Chengccba76b2006-05-30 20:26:50 +0000346
Evan Chengb76143c2007-10-09 07:14:53 +0000347 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
Evan Cheng559dc462007-10-05 20:34:26 +0000348 switch (MIOpc) {
349 default: return 0;
350 case X86::INC64r:
Evan Chengb75ed322007-10-05 21:55:32 +0000351 case X86::INC32r: {
Evan Cheng559dc462007-10-05 20:34:26 +0000352 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
Evan Chengb76143c2007-10-09 07:14:53 +0000353 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
354 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
Evan Cheng559dc462007-10-05 20:34:26 +0000355 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, 1);
356 break;
Chris Lattnerbcea4d62005-01-02 02:37:07 +0000357 }
Evan Cheng559dc462007-10-05 20:34:26 +0000358 case X86::INC16r:
359 case X86::INC64_16r:
360 if (DisableLEA16) return 0;
361 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
362 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1);
363 break;
364 case X86::DEC64r:
Evan Chengb75ed322007-10-05 21:55:32 +0000365 case X86::DEC32r: {
Evan Cheng559dc462007-10-05 20:34:26 +0000366 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
Evan Chengb76143c2007-10-09 07:14:53 +0000367 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
368 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
Evan Cheng559dc462007-10-05 20:34:26 +0000369 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, -1);
370 break;
371 }
372 case X86::DEC16r:
373 case X86::DEC64_16r:
374 if (DisableLEA16) return 0;
375 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
376 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1);
377 break;
378 case X86::ADD64rr:
379 case X86::ADD32rr: {
380 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
Evan Chengb76143c2007-10-09 07:14:53 +0000381 unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
382 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
Evan Cheng559dc462007-10-05 20:34:26 +0000383 NewMI = addRegReg(BuildMI(get(Opc), Dest), Src,
384 MI->getOperand(2).getReg());
385 break;
386 }
387 case X86::ADD16rr:
388 if (DisableLEA16) return 0;
389 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
390 NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src,
391 MI->getOperand(2).getReg());
392 break;
393 case X86::ADD64ri32:
394 case X86::ADD64ri8:
395 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
396 if (MI->getOperand(2).isImmediate())
397 NewMI = addRegOffset(BuildMI(get(X86::LEA64r), Dest), Src,
Chris Lattner9a1ceae2007-12-30 20:49:49 +0000398 MI->getOperand(2).getImm());
Evan Cheng559dc462007-10-05 20:34:26 +0000399 break;
400 case X86::ADD32ri:
401 case X86::ADD32ri8:
402 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
Evan Chengb76143c2007-10-09 07:14:53 +0000403 if (MI->getOperand(2).isImmediate()) {
404 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
405 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src,
Chris Lattner9a1ceae2007-12-30 20:49:49 +0000406 MI->getOperand(2).getImm());
Evan Chengb76143c2007-10-09 07:14:53 +0000407 }
Evan Cheng559dc462007-10-05 20:34:26 +0000408 break;
409 case X86::ADD16ri:
410 case X86::ADD16ri8:
411 if (DisableLEA16) return 0;
412 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
413 if (MI->getOperand(2).isImmediate())
414 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src,
Chris Lattner9a1ceae2007-12-30 20:49:49 +0000415 MI->getOperand(2).getImm());
Evan Cheng559dc462007-10-05 20:34:26 +0000416 break;
417 case X86::SHL16ri:
418 if (DisableLEA16) return 0;
419 case X86::SHL32ri:
420 case X86::SHL64ri: {
421 assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() &&
422 "Unknown shl instruction!");
Chris Lattner9a1ceae2007-12-30 20:49:49 +0000423 unsigned ShAmt = MI->getOperand(2).getImm();
Evan Cheng559dc462007-10-05 20:34:26 +0000424 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
425 X86AddressMode AM;
426 AM.Scale = 1 << ShAmt;
427 AM.IndexReg = Src;
428 unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r
Evan Chengb76143c2007-10-09 07:14:53 +0000429 : (MIOpc == X86::SHL32ri
430 ? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r);
Evan Cheng559dc462007-10-05 20:34:26 +0000431 NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM);
432 }
433 break;
434 }
435 }
436 }
Chris Lattnerbcea4d62005-01-02 02:37:07 +0000437 }
438
Evan Cheng559dc462007-10-05 20:34:26 +0000439 NewMI->copyKillDeadInfo(MI);
440 LV.instructionChanged(MI, NewMI); // Update live variables
441 MFI->insert(MBBI, NewMI); // Insert the new inst
Evan Cheng6ce7dc22006-11-15 20:58:11 +0000442 return NewMI;
Chris Lattnerbcea4d62005-01-02 02:37:07 +0000443}
444
Chris Lattner41e431b2005-01-19 07:11:01 +0000445/// commuteInstruction - We have a few instructions that must be hacked on to
446/// commute them.
447///
448MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
449 switch (MI->getOpcode()) {
Chris Lattner0df53d22005-01-19 07:31:24 +0000450 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
451 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
Chris Lattner41e431b2005-01-19 07:11:01 +0000452 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
Dan Gohmane47f1f92007-09-14 23:17:45 +0000453 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
454 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
455 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
Chris Lattner0df53d22005-01-19 07:31:24 +0000456 unsigned Opc;
457 unsigned Size;
458 switch (MI->getOpcode()) {
459 default: assert(0 && "Unreachable!");
460 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
461 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
462 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
463 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
Dan Gohmane47f1f92007-09-14 23:17:45 +0000464 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
465 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
Chris Lattner0df53d22005-01-19 07:31:24 +0000466 }
Chris Lattner9a1ceae2007-12-30 20:49:49 +0000467 unsigned Amt = MI->getOperand(3).getImm();
Chris Lattner41e431b2005-01-19 07:11:01 +0000468 unsigned A = MI->getOperand(0).getReg();
469 unsigned B = MI->getOperand(1).getReg();
470 unsigned C = MI->getOperand(2).getReg();
Evan Cheng6ce7dc22006-11-15 20:58:11 +0000471 bool BisKill = MI->getOperand(1).isKill();
472 bool CisKill = MI->getOperand(2).isKill();
Evan Chengc0f64ff2006-11-27 23:37:22 +0000473 return BuildMI(get(Opc), A).addReg(C, false, false, CisKill)
Evan Cheng6ce7dc22006-11-15 20:58:11 +0000474 .addReg(B, false, false, BisKill).addImm(Size-Amt);
Chris Lattner41e431b2005-01-19 07:11:01 +0000475 }
Evan Cheng7ad42d92007-10-05 23:13:21 +0000476 case X86::CMOVB16rr:
477 case X86::CMOVB32rr:
478 case X86::CMOVB64rr:
479 case X86::CMOVAE16rr:
480 case X86::CMOVAE32rr:
481 case X86::CMOVAE64rr:
482 case X86::CMOVE16rr:
483 case X86::CMOVE32rr:
484 case X86::CMOVE64rr:
485 case X86::CMOVNE16rr:
486 case X86::CMOVNE32rr:
487 case X86::CMOVNE64rr:
488 case X86::CMOVBE16rr:
489 case X86::CMOVBE32rr:
490 case X86::CMOVBE64rr:
491 case X86::CMOVA16rr:
492 case X86::CMOVA32rr:
493 case X86::CMOVA64rr:
494 case X86::CMOVL16rr:
495 case X86::CMOVL32rr:
496 case X86::CMOVL64rr:
497 case X86::CMOVGE16rr:
498 case X86::CMOVGE32rr:
499 case X86::CMOVGE64rr:
500 case X86::CMOVLE16rr:
501 case X86::CMOVLE32rr:
502 case X86::CMOVLE64rr:
503 case X86::CMOVG16rr:
504 case X86::CMOVG32rr:
505 case X86::CMOVG64rr:
506 case X86::CMOVS16rr:
507 case X86::CMOVS32rr:
508 case X86::CMOVS64rr:
509 case X86::CMOVNS16rr:
510 case X86::CMOVNS32rr:
511 case X86::CMOVNS64rr:
512 case X86::CMOVP16rr:
513 case X86::CMOVP32rr:
514 case X86::CMOVP64rr:
515 case X86::CMOVNP16rr:
516 case X86::CMOVNP32rr:
517 case X86::CMOVNP64rr: {
Evan Cheng7ad42d92007-10-05 23:13:21 +0000518 unsigned Opc = 0;
519 switch (MI->getOpcode()) {
520 default: break;
521 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
522 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
523 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
524 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
525 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
526 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
527 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
528 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
529 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
530 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
531 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
532 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
533 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
534 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
535 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
536 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
537 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
538 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
539 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
540 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
541 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
542 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
543 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
544 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
545 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
546 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
547 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
548 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
549 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
550 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
551 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
552 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
553 case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break;
554 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
555 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
556 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
557 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
558 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
559 case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break;
560 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
561 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
562 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
563 }
564
565 MI->setInstrDescriptor(get(Opc));
566 // Fallthrough intended.
567 }
Chris Lattner41e431b2005-01-19 07:11:01 +0000568 default:
Chris Lattner264e6fe2008-01-01 01:05:34 +0000569 return TargetInstrInfoImpl::commuteInstruction(MI);
Chris Lattner41e431b2005-01-19 07:11:01 +0000570 }
571}
572
Chris Lattner7fbe9722006-10-20 17:42:20 +0000573static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
574 switch (BrOpc) {
575 default: return X86::COND_INVALID;
576 case X86::JE: return X86::COND_E;
577 case X86::JNE: return X86::COND_NE;
578 case X86::JL: return X86::COND_L;
579 case X86::JLE: return X86::COND_LE;
580 case X86::JG: return X86::COND_G;
581 case X86::JGE: return X86::COND_GE;
582 case X86::JB: return X86::COND_B;
583 case X86::JBE: return X86::COND_BE;
584 case X86::JA: return X86::COND_A;
585 case X86::JAE: return X86::COND_AE;
586 case X86::JS: return X86::COND_S;
587 case X86::JNS: return X86::COND_NS;
588 case X86::JP: return X86::COND_P;
589 case X86::JNP: return X86::COND_NP;
590 case X86::JO: return X86::COND_O;
591 case X86::JNO: return X86::COND_NO;
592 }
593}
594
595unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
596 switch (CC) {
597 default: assert(0 && "Illegal condition code!");
Evan Chenge5f62042007-09-29 00:00:36 +0000598 case X86::COND_E: return X86::JE;
599 case X86::COND_NE: return X86::JNE;
600 case X86::COND_L: return X86::JL;
601 case X86::COND_LE: return X86::JLE;
602 case X86::COND_G: return X86::JG;
603 case X86::COND_GE: return X86::JGE;
604 case X86::COND_B: return X86::JB;
605 case X86::COND_BE: return X86::JBE;
606 case X86::COND_A: return X86::JA;
607 case X86::COND_AE: return X86::JAE;
608 case X86::COND_S: return X86::JS;
609 case X86::COND_NS: return X86::JNS;
610 case X86::COND_P: return X86::JP;
611 case X86::COND_NP: return X86::JNP;
612 case X86::COND_O: return X86::JO;
613 case X86::COND_NO: return X86::JNO;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000614 }
615}
616
Chris Lattner9cd68752006-10-21 05:52:40 +0000617/// GetOppositeBranchCondition - Return the inverse of the specified condition,
618/// e.g. turning COND_E to COND_NE.
619X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
620 switch (CC) {
621 default: assert(0 && "Illegal condition code!");
622 case X86::COND_E: return X86::COND_NE;
623 case X86::COND_NE: return X86::COND_E;
624 case X86::COND_L: return X86::COND_GE;
625 case X86::COND_LE: return X86::COND_G;
626 case X86::COND_G: return X86::COND_LE;
627 case X86::COND_GE: return X86::COND_L;
628 case X86::COND_B: return X86::COND_AE;
629 case X86::COND_BE: return X86::COND_A;
630 case X86::COND_A: return X86::COND_BE;
631 case X86::COND_AE: return X86::COND_B;
632 case X86::COND_S: return X86::COND_NS;
633 case X86::COND_NS: return X86::COND_S;
634 case X86::COND_P: return X86::COND_NP;
635 case X86::COND_NP: return X86::COND_P;
636 case X86::COND_O: return X86::COND_NO;
637 case X86::COND_NO: return X86::COND_O;
638 }
639}
640
Dale Johannesen318093b2007-06-14 22:03:45 +0000641bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
Evan Cheng14c46552007-07-06 23:22:03 +0000642 const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
643 if (TID->Flags & M_TERMINATOR_FLAG) {
644 // Conditional branch is a special case.
645 if ((TID->Flags & M_BRANCH_FLAG) != 0 && (TID->Flags & M_BARRIER_FLAG) == 0)
646 return true;
647 if ((TID->Flags & M_PREDICABLE) == 0)
648 return true;
Dale Johannesen318093b2007-06-14 22:03:45 +0000649 return !isPredicated(MI);
Evan Cheng14c46552007-07-06 23:22:03 +0000650 }
Dale Johannesen318093b2007-06-14 22:03:45 +0000651 return false;
652}
Chris Lattner9cd68752006-10-21 05:52:40 +0000653
Evan Cheng85dce6c2007-07-26 17:32:14 +0000654// For purposes of branch analysis do not count FP_REG_KILL as a terminator.
655static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI,
656 const X86InstrInfo &TII) {
657 if (MI->getOpcode() == X86::FP_REG_KILL)
658 return false;
659 return TII.isUnpredicatedTerminator(MI);
660}
661
Chris Lattner7fbe9722006-10-20 17:42:20 +0000662bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
663 MachineBasicBlock *&TBB,
664 MachineBasicBlock *&FBB,
665 std::vector<MachineOperand> &Cond) const {
Chris Lattner7fbe9722006-10-20 17:42:20 +0000666 // If the block has no terminators, it just falls into the block after it.
667 MachineBasicBlock::iterator I = MBB.end();
Evan Cheng85dce6c2007-07-26 17:32:14 +0000668 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this))
Chris Lattner7fbe9722006-10-20 17:42:20 +0000669 return false;
670
671 // Get the last instruction in the block.
672 MachineInstr *LastInst = I;
673
674 // If there is only one terminator instruction, process it.
Evan Cheng85dce6c2007-07-26 17:32:14 +0000675 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) {
Chris Lattner7fbe9722006-10-20 17:42:20 +0000676 if (!isBranch(LastInst->getOpcode()))
677 return true;
678
679 // If the block ends with a branch there are 3 possibilities:
680 // it's an unconditional, conditional, or indirect branch.
681
682 if (LastInst->getOpcode() == X86::JMP) {
Chris Lattner8aa797a2007-12-30 23:10:15 +0000683 TBB = LastInst->getOperand(0).getMBB();
Chris Lattner7fbe9722006-10-20 17:42:20 +0000684 return false;
685 }
686 X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode());
687 if (BranchCode == X86::COND_INVALID)
688 return true; // Can't handle indirect branch.
689
690 // Otherwise, block ends with fall-through condbranch.
Chris Lattner8aa797a2007-12-30 23:10:15 +0000691 TBB = LastInst->getOperand(0).getMBB();
Chris Lattner7fbe9722006-10-20 17:42:20 +0000692 Cond.push_back(MachineOperand::CreateImm(BranchCode));
693 return false;
694 }
695
696 // Get the instruction before it if it's a terminator.
697 MachineInstr *SecondLastInst = I;
698
699 // If there are three terminators, we don't know what sort of block this is.
Evan Cheng85dce6c2007-07-26 17:32:14 +0000700 if (SecondLastInst && I != MBB.begin() &&
701 isBrAnalysisUnpredicatedTerminator(--I, *this))
Chris Lattner7fbe9722006-10-20 17:42:20 +0000702 return true;
703
Chris Lattner6ce64432006-10-30 22:27:23 +0000704 // If the block ends with X86::JMP and a conditional branch, handle it.
Chris Lattner7fbe9722006-10-20 17:42:20 +0000705 X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode());
706 if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) {
Chris Lattner8aa797a2007-12-30 23:10:15 +0000707 TBB = SecondLastInst->getOperand(0).getMBB();
Chris Lattner6ce64432006-10-30 22:27:23 +0000708 Cond.push_back(MachineOperand::CreateImm(BranchCode));
Chris Lattner8aa797a2007-12-30 23:10:15 +0000709 FBB = LastInst->getOperand(0).getMBB();
Chris Lattner6ce64432006-10-30 22:27:23 +0000710 return false;
711 }
Chris Lattner7fbe9722006-10-20 17:42:20 +0000712
Dale Johannesen13e8b512007-06-13 17:59:52 +0000713 // If the block ends with two X86::JMPs, handle it. The second one is not
714 // executed, so remove it.
715 if (SecondLastInst->getOpcode() == X86::JMP &&
716 LastInst->getOpcode() == X86::JMP) {
Chris Lattner8aa797a2007-12-30 23:10:15 +0000717 TBB = SecondLastInst->getOperand(0).getMBB();
Dale Johannesen13e8b512007-06-13 17:59:52 +0000718 I = LastInst;
719 I->eraseFromParent();
720 return false;
721 }
722
Chris Lattner7fbe9722006-10-20 17:42:20 +0000723 // Otherwise, can't handle this.
724 return true;
725}
726
Evan Cheng6ae36262007-05-18 00:18:17 +0000727unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
Chris Lattner7fbe9722006-10-20 17:42:20 +0000728 MachineBasicBlock::iterator I = MBB.end();
Evan Cheng6ae36262007-05-18 00:18:17 +0000729 if (I == MBB.begin()) return 0;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000730 --I;
731 if (I->getOpcode() != X86::JMP &&
732 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
Evan Cheng6ae36262007-05-18 00:18:17 +0000733 return 0;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000734
735 // Remove the branch.
736 I->eraseFromParent();
737
738 I = MBB.end();
739
Evan Cheng6ae36262007-05-18 00:18:17 +0000740 if (I == MBB.begin()) return 1;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000741 --I;
742 if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
Evan Cheng6ae36262007-05-18 00:18:17 +0000743 return 1;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000744
745 // Remove the branch.
746 I->eraseFromParent();
Evan Cheng6ae36262007-05-18 00:18:17 +0000747 return 2;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000748}
749
Owen Andersonf6372aa2008-01-01 21:11:32 +0000750static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
751 MachineOperand &MO) {
752 if (MO.isRegister())
753 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
754 false, false, MO.getSubReg());
755 else if (MO.isImmediate())
756 MIB = MIB.addImm(MO.getImm());
757 else if (MO.isFrameIndex())
758 MIB = MIB.addFrameIndex(MO.getIndex());
759 else if (MO.isGlobalAddress())
760 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
761 else if (MO.isConstantPoolIndex())
762 MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
763 else if (MO.isJumpTableIndex())
764 MIB = MIB.addJumpTableIndex(MO.getIndex());
765 else if (MO.isExternalSymbol())
766 MIB = MIB.addExternalSymbol(MO.getSymbolName());
767 else
768 assert(0 && "Unknown operand for X86InstrAddOperand!");
769
770 return MIB;
771}
772
Evan Cheng6ae36262007-05-18 00:18:17 +0000773unsigned
774X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
775 MachineBasicBlock *FBB,
776 const std::vector<MachineOperand> &Cond) const {
Chris Lattner7fbe9722006-10-20 17:42:20 +0000777 // Shouldn't be a fall through.
778 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
Chris Lattner34a84ac2006-10-21 05:34:23 +0000779 assert((Cond.size() == 1 || Cond.size() == 0) &&
780 "X86 branch conditions have one component!");
781
782 if (FBB == 0) { // One way branch.
783 if (Cond.empty()) {
784 // Unconditional branch?
Evan Chengc0f64ff2006-11-27 23:37:22 +0000785 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB);
Chris Lattner34a84ac2006-10-21 05:34:23 +0000786 } else {
787 // Conditional branch.
788 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
Evan Chengc0f64ff2006-11-27 23:37:22 +0000789 BuildMI(&MBB, get(Opc)).addMBB(TBB);
Chris Lattner34a84ac2006-10-21 05:34:23 +0000790 }
Evan Cheng6ae36262007-05-18 00:18:17 +0000791 return 1;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000792 }
793
Chris Lattner879d09c2006-10-21 05:42:09 +0000794 // Two-way Conditional branch.
Chris Lattner7fbe9722006-10-20 17:42:20 +0000795 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
Evan Chengc0f64ff2006-11-27 23:37:22 +0000796 BuildMI(&MBB, get(Opc)).addMBB(TBB);
797 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB);
Evan Cheng6ae36262007-05-18 00:18:17 +0000798 return 2;
Chris Lattner7fbe9722006-10-20 17:42:20 +0000799}
800
Owen Andersond10fd972007-12-31 06:32:00 +0000801void X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
802 MachineBasicBlock::iterator MI,
803 unsigned DestReg, unsigned SrcReg,
804 const TargetRegisterClass *DestRC,
805 const TargetRegisterClass *SrcRC) const {
806 if (DestRC != SrcRC) {
807 // Moving EFLAGS to / from another register requires a push and a pop.
808 if (SrcRC == &X86::CCRRegClass) {
809 assert(SrcReg == X86::EFLAGS);
810 if (DestRC == &X86::GR64RegClass) {
811 BuildMI(MBB, MI, get(X86::PUSHFQ));
812 BuildMI(MBB, MI, get(X86::POP64r), DestReg);
813 return;
814 } else if (DestRC == &X86::GR32RegClass) {
815 BuildMI(MBB, MI, get(X86::PUSHFD));
816 BuildMI(MBB, MI, get(X86::POP32r), DestReg);
817 return;
818 }
819 } else if (DestRC == &X86::CCRRegClass) {
820 assert(DestReg == X86::EFLAGS);
821 if (SrcRC == &X86::GR64RegClass) {
822 BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg);
823 BuildMI(MBB, MI, get(X86::POPFQ));
824 return;
825 } else if (SrcRC == &X86::GR32RegClass) {
826 BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg);
827 BuildMI(MBB, MI, get(X86::POPFD));
828 return;
829 }
830 }
831 cerr << "Not yet supported!";
832 abort();
833 }
834
835 unsigned Opc;
836 if (DestRC == &X86::GR64RegClass) {
837 Opc = X86::MOV64rr;
838 } else if (DestRC == &X86::GR32RegClass) {
839 Opc = X86::MOV32rr;
840 } else if (DestRC == &X86::GR16RegClass) {
841 Opc = X86::MOV16rr;
842 } else if (DestRC == &X86::GR8RegClass) {
843 Opc = X86::MOV8rr;
844 } else if (DestRC == &X86::GR32_RegClass) {
845 Opc = X86::MOV32_rr;
846 } else if (DestRC == &X86::GR16_RegClass) {
847 Opc = X86::MOV16_rr;
848 } else if (DestRC == &X86::RFP32RegClass) {
849 Opc = X86::MOV_Fp3232;
850 } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) {
851 Opc = X86::MOV_Fp6464;
852 } else if (DestRC == &X86::RFP80RegClass) {
853 Opc = X86::MOV_Fp8080;
854 } else if (DestRC == &X86::FR32RegClass) {
855 Opc = X86::FsMOVAPSrr;
856 } else if (DestRC == &X86::FR64RegClass) {
857 Opc = X86::FsMOVAPDrr;
858 } else if (DestRC == &X86::VR128RegClass) {
859 Opc = X86::MOVAPSrr;
860 } else if (DestRC == &X86::VR64RegClass) {
861 Opc = X86::MMX_MOVQ64rr;
862 } else {
863 assert(0 && "Unknown regclass");
864 abort();
865 }
866 BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg);
867}
868
Owen Andersonf6372aa2008-01-01 21:11:32 +0000869static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
870 unsigned StackAlign) {
871 unsigned Opc = 0;
872 if (RC == &X86::GR64RegClass) {
873 Opc = X86::MOV64mr;
874 } else if (RC == &X86::GR32RegClass) {
875 Opc = X86::MOV32mr;
876 } else if (RC == &X86::GR16RegClass) {
877 Opc = X86::MOV16mr;
878 } else if (RC == &X86::GR8RegClass) {
879 Opc = X86::MOV8mr;
880 } else if (RC == &X86::GR32_RegClass) {
881 Opc = X86::MOV32_mr;
882 } else if (RC == &X86::GR16_RegClass) {
883 Opc = X86::MOV16_mr;
884 } else if (RC == &X86::RFP80RegClass) {
885 Opc = X86::ST_FpP80m; // pops
886 } else if (RC == &X86::RFP64RegClass) {
887 Opc = X86::ST_Fp64m;
888 } else if (RC == &X86::RFP32RegClass) {
889 Opc = X86::ST_Fp32m;
890 } else if (RC == &X86::FR32RegClass) {
891 Opc = X86::MOVSSmr;
892 } else if (RC == &X86::FR64RegClass) {
893 Opc = X86::MOVSDmr;
894 } else if (RC == &X86::VR128RegClass) {
895 // FIXME: Use movaps once we are capable of selectively
896 // aligning functions that spill SSE registers on 16-byte boundaries.
897 Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
898 } else if (RC == &X86::VR64RegClass) {
899 Opc = X86::MMX_MOVQ64mr;
900 } else {
901 assert(0 && "Unknown regclass");
902 abort();
903 }
904
905 return Opc;
906}
907
908void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
909 MachineBasicBlock::iterator MI,
910 unsigned SrcReg, bool isKill, int FrameIdx,
911 const TargetRegisterClass *RC) const {
912 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
913 addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx)
914 .addReg(SrcReg, false, false, isKill);
915}
916
917void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
918 bool isKill,
919 SmallVectorImpl<MachineOperand> &Addr,
920 const TargetRegisterClass *RC,
921 SmallVectorImpl<MachineInstr*> &NewMIs) const {
922 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
923 MachineInstrBuilder MIB = BuildMI(get(Opc));
924 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
925 MIB = X86InstrAddOperand(MIB, Addr[i]);
926 MIB.addReg(SrcReg, false, false, isKill);
927 NewMIs.push_back(MIB);
928}
929
930static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
931 unsigned StackAlign) {
932 unsigned Opc = 0;
933 if (RC == &X86::GR64RegClass) {
934 Opc = X86::MOV64rm;
935 } else if (RC == &X86::GR32RegClass) {
936 Opc = X86::MOV32rm;
937 } else if (RC == &X86::GR16RegClass) {
938 Opc = X86::MOV16rm;
939 } else if (RC == &X86::GR8RegClass) {
940 Opc = X86::MOV8rm;
941 } else if (RC == &X86::GR32_RegClass) {
942 Opc = X86::MOV32_rm;
943 } else if (RC == &X86::GR16_RegClass) {
944 Opc = X86::MOV16_rm;
945 } else if (RC == &X86::RFP80RegClass) {
946 Opc = X86::LD_Fp80m;
947 } else if (RC == &X86::RFP64RegClass) {
948 Opc = X86::LD_Fp64m;
949 } else if (RC == &X86::RFP32RegClass) {
950 Opc = X86::LD_Fp32m;
951 } else if (RC == &X86::FR32RegClass) {
952 Opc = X86::MOVSSrm;
953 } else if (RC == &X86::FR64RegClass) {
954 Opc = X86::MOVSDrm;
955 } else if (RC == &X86::VR128RegClass) {
956 // FIXME: Use movaps once we are capable of selectively
957 // aligning functions that spill SSE registers on 16-byte boundaries.
958 Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
959 } else if (RC == &X86::VR64RegClass) {
960 Opc = X86::MMX_MOVQ64rm;
961 } else {
962 assert(0 && "Unknown regclass");
963 abort();
964 }
965
966 return Opc;
967}
968
969void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
970 MachineBasicBlock::iterator MI,
971 unsigned DestReg, int FrameIdx,
972 const TargetRegisterClass *RC) const{
973 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
974 addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx);
975}
976
977void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
978 SmallVectorImpl<MachineOperand> &Addr,
979 const TargetRegisterClass *RC,
980 SmallVectorImpl<MachineInstr*> &NewMIs) const {
981 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
982 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
983 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
984 MIB = X86InstrAddOperand(MIB, Addr[i]);
985 NewMIs.push_back(MIB);
986}
987
Owen Andersond94b6a12008-01-04 23:57:37 +0000988bool X86InstrInfo::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
989 MachineBasicBlock::iterator MI,
990 const std::vector<CalleeSavedInfo> &CSI) const {
991 if (CSI.empty())
992 return false;
993
994 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
995 unsigned SlotSize = is64Bit ? 8 : 4;
996
997 MachineFunction &MF = *MBB.getParent();
998 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
999 X86FI->setCalleeSavedFrameSize(CSI.size() * SlotSize);
1000
1001 unsigned Opc = is64Bit ? X86::PUSH64r : X86::PUSH32r;
1002 for (unsigned i = CSI.size(); i != 0; --i) {
1003 unsigned Reg = CSI[i-1].getReg();
1004 // Add the callee-saved register as live-in. It's killed at the spill.
1005 MBB.addLiveIn(Reg);
1006 BuildMI(MBB, MI, get(Opc)).addReg(Reg);
1007 }
1008 return true;
1009}
1010
1011bool X86InstrInfo::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1012 MachineBasicBlock::iterator MI,
1013 const std::vector<CalleeSavedInfo> &CSI) const {
1014 if (CSI.empty())
1015 return false;
1016
1017 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
1018
1019 unsigned Opc = is64Bit ? X86::POP64r : X86::POP32r;
1020 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1021 unsigned Reg = CSI[i].getReg();
1022 BuildMI(MBB, MI, get(Opc), Reg);
1023 }
1024 return true;
1025}
1026
Chris Lattnerc24ff8e2006-10-28 17:29:57 +00001027bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
1028 if (MBB.empty()) return false;
1029
1030 switch (MBB.back().getOpcode()) {
Arnold Schwaighoferc85e1712007-10-11 19:40:01 +00001031 case X86::TCRETURNri:
1032 case X86::TCRETURNdi:
Evan Cheng126f17a2007-05-21 18:44:17 +00001033 case X86::RET: // Return.
1034 case X86::RETI:
1035 case X86::TAILJMPd:
1036 case X86::TAILJMPr:
1037 case X86::TAILJMPm:
Chris Lattnerc24ff8e2006-10-28 17:29:57 +00001038 case X86::JMP: // Uncond branch.
1039 case X86::JMP32r: // Indirect branch.
Dan Gohmana0a7c1d2007-09-17 15:19:08 +00001040 case X86::JMP64r: // Indirect branch (64-bit).
Chris Lattnerc24ff8e2006-10-28 17:29:57 +00001041 case X86::JMP32m: // Indirect branch through mem.
Dan Gohmana0a7c1d2007-09-17 15:19:08 +00001042 case X86::JMP64m: // Indirect branch through mem (64-bit).
Chris Lattnerc24ff8e2006-10-28 17:29:57 +00001043 return true;
1044 default: return false;
1045 }
1046}
1047
Chris Lattner7fbe9722006-10-20 17:42:20 +00001048bool X86InstrInfo::
1049ReverseBranchCondition(std::vector<MachineOperand> &Cond) const {
Chris Lattner9cd68752006-10-21 05:52:40 +00001050 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
1051 Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm()));
1052 return false;
Chris Lattner7fbe9722006-10-20 17:42:20 +00001053}
1054
Evan Cheng25ab6902006-09-08 06:48:29 +00001055const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const {
1056 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
1057 if (Subtarget->is64Bit())
1058 return &X86::GR64RegClass;
1059 else
1060 return &X86::GR32RegClass;
1061}