blob: 6041669f81825b746c279bd0bc9d7f57d6324a0e [file] [log] [blame]
Chris Lattner5930d3d2005-11-16 22:59:19 +00001//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
Chris Lattner655e7df2005-11-16 01:54:32 +00002//
3// The LLVM Compiler Infrastructure
4//
Chris Lattnerf3ebc3f2007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Chris Lattner655e7df2005-11-16 01:54:32 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a DAG pattern matching instruction selector for X86,
11// converting from a legalized dag to a X86 dag.
12//
13//===----------------------------------------------------------------------===//
14
Evan Chengb9d34bd2006-08-07 22:28:20 +000015#define DEBUG_TYPE "x86-isel"
Chris Lattner655e7df2005-11-16 01:54:32 +000016#include "X86.h"
Evan Chengbc7a0f442006-01-11 06:09:51 +000017#include "X86InstrBuilder.h"
Evan Chengf55b7382008-01-05 00:41:47 +000018#include "X86MachineFunctionInfo.h"
Chris Lattner7c551262006-01-11 01:15:34 +000019#include "X86RegisterInfo.h"
Chris Lattner655e7df2005-11-16 01:54:32 +000020#include "X86Subtarget.h"
Evan Cheng2dd2c652006-03-13 23:20:37 +000021#include "X86TargetMachine.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000022#include "llvm/ADT/Statistic.h"
Evan Cheng73a1ad92006-01-10 20:26:56 +000023#include "llvm/CodeGen/MachineFrameInfo.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000024#include "llvm/CodeGen/MachineFunction.h"
Chris Lattner7c551262006-01-11 01:15:34 +000025#include "llvm/CodeGen/MachineInstrBuilder.h"
Chris Lattnera10fff52007-12-31 04:13:23 +000026#include "llvm/CodeGen/MachineRegisterInfo.h"
Chris Lattner655e7df2005-11-16 01:54:32 +000027#include "llvm/CodeGen/SelectionDAGISel.h"
Chandler Carruth9fb823b2013-01-02 11:36:10 +000028#include "llvm/IR/Instructions.h"
29#include "llvm/IR/Intrinsics.h"
30#include "llvm/IR/Type.h"
Evan Cheng11b0a5d2006-09-08 06:48:29 +000031#include "llvm/Support/Debug.h"
Torok Edwinfb8d6d52009-07-08 20:53:28 +000032#include "llvm/Support/ErrorHandling.h"
Evan Cheng11b0a5d2006-09-08 06:48:29 +000033#include "llvm/Support/MathExtras.h"
Torok Edwinfb8d6d52009-07-08 20:53:28 +000034#include "llvm/Support/raw_ostream.h"
Chandler Carruthed0881b2012-12-03 16:50:05 +000035#include "llvm/Target/TargetMachine.h"
36#include "llvm/Target/TargetOptions.h"
Chris Lattner655e7df2005-11-16 01:54:32 +000037using namespace llvm;
38
Chris Lattner1ef9cd42006-12-19 22:59:26 +000039STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
40
Chris Lattner655e7df2005-11-16 01:54:32 +000041//===----------------------------------------------------------------------===//
42// Pattern Matcher Implementation
43//===----------------------------------------------------------------------===//
44
45namespace {
Chris Lattner3f0f71b2005-11-19 02:11:08 +000046 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
Dan Gohman2ce6f2a2008-07-27 21:46:04 +000047 /// SDValue's instead of register numbers for the leaves of the matched
Chris Lattner3f0f71b2005-11-19 02:11:08 +000048 /// tree.
49 struct X86ISelAddressMode {
50 enum {
51 RegBase,
Chris Lattneraa2372562006-05-24 17:04:05 +000052 FrameIndexBase
Chris Lattner3f0f71b2005-11-19 02:11:08 +000053 } BaseType;
54
Dan Gohman0fd54fb2010-04-29 23:30:41 +000055 // This is really a union, discriminated by BaseType!
56 SDValue Base_Reg;
57 int Base_FrameIndex;
Chris Lattner3f0f71b2005-11-19 02:11:08 +000058
59 unsigned Scale;
Chad Rosier24c19d22012-08-01 18:39:17 +000060 SDValue IndexReg;
Dan Gohman059c4fa2008-11-11 15:52:29 +000061 int32_t Disp;
Rafael Espindola3b2df102009-04-08 21:14:34 +000062 SDValue Segment;
Dan Gohmanbcaf6812010-04-15 01:51:59 +000063 const GlobalValue *GV;
64 const Constant *CP;
65 const BlockAddress *BlockAddr;
Evan Cheng11b0a5d2006-09-08 06:48:29 +000066 const char *ES;
67 int JT;
Evan Cheng77d86ff2006-02-25 10:09:08 +000068 unsigned Align; // CP alignment.
Chris Lattnerbd7e26d2009-06-26 05:51:45 +000069 unsigned char SymbolFlags; // X86II::MO_*
Chris Lattner3f0f71b2005-11-19 02:11:08 +000070
71 X86ISelAddressMode()
Dan Gohman0fd54fb2010-04-29 23:30:41 +000072 : BaseType(RegBase), Base_FrameIndex(0), Scale(1), IndexReg(), Disp(0),
Chris Lattner50ba5c32009-11-01 03:25:03 +000073 Segment(), GV(0), CP(0), BlockAddr(0), ES(0), JT(-1), Align(0),
Dan Gohman0f6bf2d2009-08-25 17:47:44 +000074 SymbolFlags(X86II::MO_NO_FLAG) {
Chris Lattner3f0f71b2005-11-19 02:11:08 +000075 }
Dan Gohman4e3e3de2009-02-07 00:43:41 +000076
77 bool hasSymbolicDisplacement() const {
Chris Lattner50ba5c32009-11-01 03:25:03 +000078 return GV != 0 || CP != 0 || ES != 0 || JT != -1 || BlockAddr != 0;
Dan Gohman4e3e3de2009-02-07 00:43:41 +000079 }
Chad Rosier24c19d22012-08-01 18:39:17 +000080
Chris Lattnerfea81da2009-06-27 04:16:01 +000081 bool hasBaseOrIndexReg() const {
Dan Gohman0fd54fb2010-04-29 23:30:41 +000082 return IndexReg.getNode() != 0 || Base_Reg.getNode() != 0;
Chris Lattnerfea81da2009-06-27 04:16:01 +000083 }
Chad Rosier24c19d22012-08-01 18:39:17 +000084
Chris Lattnerfea81da2009-06-27 04:16:01 +000085 /// isRIPRelative - Return true if this addressing mode is already RIP
86 /// relative.
87 bool isRIPRelative() const {
88 if (BaseType != RegBase) return false;
89 if (RegisterSDNode *RegNode =
Dan Gohman0fd54fb2010-04-29 23:30:41 +000090 dyn_cast_or_null<RegisterSDNode>(Base_Reg.getNode()))
Chris Lattnerfea81da2009-06-27 04:16:01 +000091 return RegNode->getReg() == X86::RIP;
92 return false;
93 }
Chad Rosier24c19d22012-08-01 18:39:17 +000094
Chris Lattnerfea81da2009-06-27 04:16:01 +000095 void setBaseReg(SDValue Reg) {
96 BaseType = RegBase;
Dan Gohman0fd54fb2010-04-29 23:30:41 +000097 Base_Reg = Reg;
Chris Lattnerfea81da2009-06-27 04:16:01 +000098 }
Dan Gohman4e3e3de2009-02-07 00:43:41 +000099
Manman Ren19f49ac2012-09-11 22:23:19 +0000100#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
Dale Johannesendafdbf72008-08-11 23:46:25 +0000101 void dump() {
David Greenedbdb1b22010-01-05 01:29:08 +0000102 dbgs() << "X86ISelAddressMode " << this << '\n';
Dan Gohman0fd54fb2010-04-29 23:30:41 +0000103 dbgs() << "Base_Reg ";
104 if (Base_Reg.getNode() != 0)
Chad Rosier24c19d22012-08-01 18:39:17 +0000105 Base_Reg.getNode()->dump();
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000106 else
David Greenedbdb1b22010-01-05 01:29:08 +0000107 dbgs() << "nul";
Dan Gohman0fd54fb2010-04-29 23:30:41 +0000108 dbgs() << " Base.FrameIndex " << Base_FrameIndex << '\n'
Benjamin Kramer940fbb02009-08-23 11:52:17 +0000109 << " Scale" << Scale << '\n'
110 << "IndexReg ";
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000111 if (IndexReg.getNode() != 0)
112 IndexReg.getNode()->dump();
113 else
Chad Rosier24c19d22012-08-01 18:39:17 +0000114 dbgs() << "nul";
David Greenedbdb1b22010-01-05 01:29:08 +0000115 dbgs() << " Disp " << Disp << '\n'
Benjamin Kramer940fbb02009-08-23 11:52:17 +0000116 << "GV ";
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000117 if (GV)
118 GV->dump();
119 else
David Greenedbdb1b22010-01-05 01:29:08 +0000120 dbgs() << "nul";
121 dbgs() << " CP ";
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000122 if (CP)
123 CP->dump();
124 else
David Greenedbdb1b22010-01-05 01:29:08 +0000125 dbgs() << "nul";
126 dbgs() << '\n'
Benjamin Kramer940fbb02009-08-23 11:52:17 +0000127 << "ES ";
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000128 if (ES)
David Greenedbdb1b22010-01-05 01:29:08 +0000129 dbgs() << ES;
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000130 else
David Greenedbdb1b22010-01-05 01:29:08 +0000131 dbgs() << "nul";
132 dbgs() << " JT" << JT << " Align" << Align << '\n';
Dale Johannesendafdbf72008-08-11 23:46:25 +0000133 }
Manman Ren742534c2012-09-06 19:06:06 +0000134#endif
Chris Lattner3f0f71b2005-11-19 02:11:08 +0000135 };
136}
137
138namespace {
Chris Lattner655e7df2005-11-16 01:54:32 +0000139 //===--------------------------------------------------------------------===//
140 /// ISel - X86 specific code to select X86 machine instructions for
141 /// SelectionDAG operations.
142 ///
Nick Lewycky02d5f772009-10-25 06:33:48 +0000143 class X86DAGToDAGISel : public SelectionDAGISel {
Chris Lattner655e7df2005-11-16 01:54:32 +0000144 /// X86Lowering - This object fully describes how to lower LLVM code to an
145 /// X86-specific SelectionDAG.
Dan Gohman21cea8a2010-04-17 15:26:15 +0000146 const X86TargetLowering &X86Lowering;
Chris Lattner655e7df2005-11-16 01:54:32 +0000147
148 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
149 /// make the right decision when generating code for different targets.
150 const X86Subtarget *Subtarget;
Evan Cheng5588de92006-02-18 00:15:05 +0000151
Evan Cheng7d6fa972008-09-26 23:41:32 +0000152 /// OptForSize - If true, selector should try to optimize for code size
153 /// instead of performance.
154 bool OptForSize;
155
Chris Lattner655e7df2005-11-16 01:54:32 +0000156 public:
Bill Wendling026e5d72009-04-29 23:29:43 +0000157 explicit X86DAGToDAGISel(X86TargetMachine &tm, CodeGenOpt::Level OptLevel)
Bill Wendling084669a2009-04-29 00:15:41 +0000158 : SelectionDAGISel(tm, OptLevel),
Dan Gohman4751bb92009-06-03 20:20:00 +0000159 X86Lowering(*tm.getTargetLowering()),
160 Subtarget(&tm.getSubtarget<X86Subtarget>()),
Devang Patel1b76f2c2008-10-01 23:18:38 +0000161 OptForSize(false) {}
Chris Lattner655e7df2005-11-16 01:54:32 +0000162
163 virtual const char *getPassName() const {
164 return "X86 DAG->DAG Instruction Selection";
165 }
166
Dan Gohmanc87b74d2010-04-14 20:17:22 +0000167 virtual void EmitFunctionEntryCode();
Anton Korobeynikov90910742007-09-25 21:52:30 +0000168
Evan Cheng5e73ff22010-02-15 19:41:07 +0000169 virtual bool IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const;
170
Chris Lattnerf98f1242010-03-02 06:34:30 +0000171 virtual void PreprocessISelDAG();
172
Jakob Stoklund Olesen08aede22010-09-03 00:35:18 +0000173 inline bool immSext8(SDNode *N) const {
174 return isInt<8>(cast<ConstantSDNode>(N)->getSExtValue());
175 }
176
177 // i64immSExt32 predicate - True if the 64-bit immediate fits in a 32-bit
178 // sign extended field.
179 inline bool i64immSExt32(SDNode *N) const {
180 uint64_t v = cast<ConstantSDNode>(N)->getZExtValue();
181 return (int64_t)v == (int32_t)v;
182 }
183
Chris Lattner655e7df2005-11-16 01:54:32 +0000184// Include the pieces autogenerated from the target description.
185#include "X86GenDAGISel.inc"
186
187 private:
Dan Gohmanea6f91f2010-01-05 01:24:18 +0000188 SDNode *Select(SDNode *N);
Manman Rena0982042012-06-26 19:47:59 +0000189 SDNode *SelectGather(SDNode *N, unsigned Opc);
Dale Johannesen867d5492008-10-02 18:53:47 +0000190 SDNode *SelectAtomic64(SDNode *Node, unsigned Opc);
Eric Christophera1d9e292011-05-17 08:10:18 +0000191 SDNode *SelectAtomicLoadArith(SDNode *Node, EVT NVT);
Chris Lattner655e7df2005-11-16 01:54:32 +0000192
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000193 bool FoldOffsetIntoAddress(uint64_t Offset, X86ISelAddressMode &AM);
Chris Lattner8a236b62010-09-22 04:39:11 +0000194 bool MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM);
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000195 bool MatchWrapper(SDValue N, X86ISelAddressMode &AM);
Dan Gohman824ab402009-07-22 23:26:55 +0000196 bool MatchAddress(SDValue N, X86ISelAddressMode &AM);
197 bool MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
198 unsigned Depth);
Rafael Espindola92773792009-03-31 16:16:57 +0000199 bool MatchAddressBase(SDValue N, X86ISelAddressMode &AM);
Chris Lattnerd58d7c12010-09-21 22:07:31 +0000200 bool SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
Rafael Espindola3b2df102009-04-08 21:14:34 +0000201 SDValue &Scale, SDValue &Index, SDValue &Disp,
202 SDValue &Segment);
Chris Lattner0e023ea2010-09-21 20:31:19 +0000203 bool SelectLEAAddr(SDValue N, SDValue &Base,
Chris Lattnerf4693072010-07-08 23:46:44 +0000204 SDValue &Scale, SDValue &Index, SDValue &Disp,
205 SDValue &Segment);
Chris Lattner0e023ea2010-09-21 20:31:19 +0000206 bool SelectTLSADDRAddr(SDValue N, SDValue &Base,
Chris Lattnerf4693072010-07-08 23:46:44 +0000207 SDValue &Scale, SDValue &Index, SDValue &Disp,
208 SDValue &Segment);
Chris Lattnerbd6e1932010-03-01 22:51:11 +0000209 bool SelectScalarSSELoad(SDNode *Root, SDValue N,
Chris Lattnerafac7dad2010-02-16 22:35:06 +0000210 SDValue &Base, SDValue &Scale,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000211 SDValue &Index, SDValue &Disp,
Rafael Espindola3b2df102009-04-08 21:14:34 +0000212 SDValue &Segment,
Chris Lattner18a32ce2010-02-21 03:17:59 +0000213 SDValue &NodeWithChain);
Chad Rosier24c19d22012-08-01 18:39:17 +0000214
Dan Gohmanea6f91f2010-01-05 01:24:18 +0000215 bool TryFoldLoad(SDNode *P, SDValue N,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000216 SDValue &Base, SDValue &Scale,
Rafael Espindola3b2df102009-04-08 21:14:34 +0000217 SDValue &Index, SDValue &Disp,
218 SDValue &Segment);
Chad Rosier24c19d22012-08-01 18:39:17 +0000219
Chris Lattnerba1ed582006-06-08 18:03:49 +0000220 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
221 /// inline asm expressions.
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000222 virtual bool SelectInlineAsmMemoryOperand(const SDValue &Op,
Chris Lattnerba1ed582006-06-08 18:03:49 +0000223 char ConstraintCode,
Dan Gohmaneb0cee92008-08-23 02:25:05 +0000224 std::vector<SDValue> &OutOps);
Chad Rosier24c19d22012-08-01 18:39:17 +0000225
Anton Korobeynikov90910742007-09-25 21:52:30 +0000226 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
227
Chad Rosier24c19d22012-08-01 18:39:17 +0000228 inline void getAddressOperands(X86ISelAddressMode &AM, SDValue &Base,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000229 SDValue &Scale, SDValue &Index,
Rafael Espindola3b2df102009-04-08 21:14:34 +0000230 SDValue &Disp, SDValue &Segment) {
Evan Cheng67ed58e2005-12-12 21:49:40 +0000231 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
Dan Gohman0fd54fb2010-04-29 23:30:41 +0000232 CurDAG->getTargetFrameIndex(AM.Base_FrameIndex, TLI.getPointerTy()) :
233 AM.Base_Reg;
Evan Cheng1d712482005-12-17 09:13:43 +0000234 Scale = getI8Imm(AM.Scale);
Evan Cheng67ed58e2005-12-12 21:49:40 +0000235 Index = AM.IndexReg;
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000236 // These are 32-bit even in 64-bit mode since RIP relative offset
237 // is 32-bit.
238 if (AM.GV)
Devang Patela3ca21b2010-07-06 22:08:15 +0000239 Disp = CurDAG->getTargetGlobalAddress(AM.GV, DebugLoc(),
240 MVT::i32, AM.Disp,
Chris Lattnerbd7e26d2009-06-26 05:51:45 +0000241 AM.SymbolFlags);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000242 else if (AM.CP)
Owen Anderson9f944592009-08-11 20:47:22 +0000243 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32,
Chris Lattnerbd7e26d2009-06-26 05:51:45 +0000244 AM.Align, AM.Disp, AM.SymbolFlags);
Michael Liaoabb87d42012-09-12 21:43:09 +0000245 else if (AM.ES) {
246 assert(!AM.Disp && "Non-zero displacement is ignored with ES.");
Owen Anderson9f944592009-08-11 20:47:22 +0000247 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32, AM.SymbolFlags);
Michael Liaoabb87d42012-09-12 21:43:09 +0000248 } else if (AM.JT != -1) {
249 assert(!AM.Disp && "Non-zero displacement is ignored with JT.");
Owen Anderson9f944592009-08-11 20:47:22 +0000250 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32, AM.SymbolFlags);
Michael Liaoabb87d42012-09-12 21:43:09 +0000251 } else if (AM.BlockAddr)
252 Disp = CurDAG->getTargetBlockAddress(AM.BlockAddr, MVT::i32, AM.Disp,
253 AM.SymbolFlags);
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000254 else
Owen Anderson9f944592009-08-11 20:47:22 +0000255 Disp = CurDAG->getTargetConstant(AM.Disp, MVT::i32);
Rafael Espindola3b2df102009-04-08 21:14:34 +0000256
257 if (AM.Segment.getNode())
258 Segment = AM.Segment;
259 else
Owen Anderson9f944592009-08-11 20:47:22 +0000260 Segment = CurDAG->getRegister(0, MVT::i32);
Evan Cheng67ed58e2005-12-12 21:49:40 +0000261 }
262
Chris Lattner3f0f71b2005-11-19 02:11:08 +0000263 /// getI8Imm - Return a target constant with the specified value, of type
264 /// i8.
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000265 inline SDValue getI8Imm(unsigned Imm) {
Owen Anderson9f944592009-08-11 20:47:22 +0000266 return CurDAG->getTargetConstant(Imm, MVT::i8);
Chris Lattner3f0f71b2005-11-19 02:11:08 +0000267 }
268
Chris Lattner655e7df2005-11-16 01:54:32 +0000269 /// getI32Imm - Return a target constant with the specified value, of type
270 /// i32.
Dan Gohman2ce6f2a2008-07-27 21:46:04 +0000271 inline SDValue getI32Imm(unsigned Imm) {
Owen Anderson9f944592009-08-11 20:47:22 +0000272 return CurDAG->getTargetConstant(Imm, MVT::i32);
Chris Lattner655e7df2005-11-16 01:54:32 +0000273 }
Evan Chengd49cc362006-02-10 22:24:32 +0000274
Dan Gohman24300732008-09-23 18:22:58 +0000275 /// getGlobalBaseReg - Return an SDNode that returns the value of
276 /// the global base register. Output instructions required to
277 /// initialize the global base register, if necessary.
278 ///
Evan Cheng61413a32006-08-26 05:34:46 +0000279 SDNode *getGlobalBaseReg();
Evan Cheng5588de92006-02-18 00:15:05 +0000280
Dan Gohman4751bb92009-06-03 20:20:00 +0000281 /// getTargetMachine - Return a reference to the TargetMachine, casted
282 /// to the target-specific type.
Jakub Staszake167cf52013-02-19 21:54:59 +0000283 const X86TargetMachine &getTargetMachine() const {
Dan Gohman4751bb92009-06-03 20:20:00 +0000284 return static_cast<const X86TargetMachine &>(TM);
285 }
286
287 /// getInstrInfo - Return a reference to the TargetInstrInfo, casted
288 /// to the target-specific type.
Jakub Staszake167cf52013-02-19 21:54:59 +0000289 const X86InstrInfo *getInstrInfo() const {
Dan Gohman4751bb92009-06-03 20:20:00 +0000290 return getTargetMachine().getInstrInfo();
291 }
Chris Lattner655e7df2005-11-16 01:54:32 +0000292 };
293}
294
Evan Cheng72bb66a2006-08-08 00:31:00 +0000295
Evan Cheng5e73ff22010-02-15 19:41:07 +0000296bool
297X86DAGToDAGISel::IsProfitableToFold(SDValue N, SDNode *U, SDNode *Root) const {
Bill Wendling026e5d72009-04-29 23:29:43 +0000298 if (OptLevel == CodeGenOpt::None) return false;
Evan Chengb86375c2006-10-14 08:33:25 +0000299
Evan Cheng5e73ff22010-02-15 19:41:07 +0000300 if (!N.hasOneUse())
301 return false;
302
303 if (N.getOpcode() != ISD::LOAD)
304 return true;
305
306 // If N is a load, do additional profitability checks.
307 if (U == Root) {
Evan Cheng83bdb382008-11-27 00:49:46 +0000308 switch (U->getOpcode()) {
309 default: break;
Dan Gohman85d4fdf2010-01-04 20:51:50 +0000310 case X86ISD::ADD:
311 case X86ISD::SUB:
312 case X86ISD::AND:
313 case X86ISD::XOR:
314 case X86ISD::OR:
Evan Cheng83bdb382008-11-27 00:49:46 +0000315 case ISD::ADD:
316 case ISD::ADDC:
317 case ISD::ADDE:
318 case ISD::AND:
319 case ISD::OR:
320 case ISD::XOR: {
Rafael Espindolabb834f02009-04-10 10:09:34 +0000321 SDValue Op1 = U->getOperand(1);
322
Evan Cheng83bdb382008-11-27 00:49:46 +0000323 // If the other operand is a 8-bit immediate we should fold the immediate
324 // instead. This reduces code size.
325 // e.g.
326 // movl 4(%esp), %eax
327 // addl $4, %eax
328 // vs.
329 // movl $4, %eax
330 // addl 4(%esp), %eax
331 // The former is 2 bytes shorter. In case where the increment is 1, then
332 // the saving can be 4 bytes (by using incl %eax).
Rafael Espindolabb834f02009-04-10 10:09:34 +0000333 if (ConstantSDNode *Imm = dyn_cast<ConstantSDNode>(Op1))
Dan Gohman2293eb62009-03-14 02:07:16 +0000334 if (Imm->getAPIntValue().isSignedIntN(8))
335 return false;
Rafael Espindolabb834f02009-04-10 10:09:34 +0000336
337 // If the other operand is a TLS address, we should fold it instead.
338 // This produces
339 // movl %gs:0, %eax
340 // leal i@NTPOFF(%eax), %eax
341 // instead of
342 // movl $i@NTPOFF, %eax
343 // addl %gs:0, %eax
344 // if the block also has an access to a second TLS address this will save
345 // a load.
346 // FIXME: This is probably also true for non TLS addresses.
347 if (Op1.getOpcode() == X86ISD::Wrapper) {
348 SDValue Val = Op1.getOperand(0);
349 if (Val.getOpcode() == ISD::TargetGlobalTLSAddress)
350 return false;
351 }
Evan Cheng83bdb382008-11-27 00:49:46 +0000352 }
353 }
Evan Cheng5e73ff22010-02-15 19:41:07 +0000354 }
355
356 return true;
357}
358
Evan Chengd703df62010-03-14 03:48:46 +0000359/// MoveBelowCallOrigChain - Replace the original chain operand of the call with
360/// load's chain operand and move load below the call's chain operand.
361static void MoveBelowOrigChain(SelectionDAG *CurDAG, SDValue Load,
Evan Cheng214156c2012-10-02 23:49:13 +0000362 SDValue Call, SDValue OrigChain) {
Evan Chengf00f1e52008-08-25 21:27:18 +0000363 SmallVector<SDValue, 8> Ops;
Evan Chengd703df62010-03-14 03:48:46 +0000364 SDValue Chain = OrigChain.getOperand(0);
Evan Cheng6c7e8512009-01-26 18:43:34 +0000365 if (Chain.getNode() == Load.getNode())
366 Ops.push_back(Load.getOperand(0));
367 else {
368 assert(Chain.getOpcode() == ISD::TokenFactor &&
Evan Chengd703df62010-03-14 03:48:46 +0000369 "Unexpected chain operand");
Evan Cheng6c7e8512009-01-26 18:43:34 +0000370 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i)
371 if (Chain.getOperand(i).getNode() == Load.getNode())
372 Ops.push_back(Load.getOperand(0));
373 else
374 Ops.push_back(Chain.getOperand(i));
375 SDValue NewChain =
Dale Johannesen9f3f72f2009-02-06 01:31:28 +0000376 CurDAG->getNode(ISD::TokenFactor, Load.getDebugLoc(),
Owen Anderson9f944592009-08-11 20:47:22 +0000377 MVT::Other, &Ops[0], Ops.size());
Evan Cheng6c7e8512009-01-26 18:43:34 +0000378 Ops.clear();
379 Ops.push_back(NewChain);
380 }
Evan Chengd703df62010-03-14 03:48:46 +0000381 for (unsigned i = 1, e = OrigChain.getNumOperands(); i != e; ++i)
382 Ops.push_back(OrigChain.getOperand(i));
Dan Gohman92c11ac2010-06-18 15:30:29 +0000383 CurDAG->UpdateNodeOperands(OrigChain.getNode(), &Ops[0], Ops.size());
384 CurDAG->UpdateNodeOperands(Load.getNode(), Call.getOperand(0),
Evan Chengf00f1e52008-08-25 21:27:18 +0000385 Load.getOperand(1), Load.getOperand(2));
Evan Cheng214156c2012-10-02 23:49:13 +0000386
Evan Cheng214156c2012-10-02 23:49:13 +0000387 unsigned NumOps = Call.getNode()->getNumOperands();
Evan Chengf00f1e52008-08-25 21:27:18 +0000388 Ops.clear();
Gabor Greiff304a7a2008-08-28 21:40:38 +0000389 Ops.push_back(SDValue(Load.getNode(), 1));
Evan Cheng214156c2012-10-02 23:49:13 +0000390 for (unsigned i = 1, e = NumOps; i != e; ++i)
Evan Chengf00f1e52008-08-25 21:27:18 +0000391 Ops.push_back(Call.getOperand(i));
Evan Cheng847ad442012-10-05 01:48:22 +0000392 CurDAG->UpdateNodeOperands(Call.getNode(), &Ops[0], NumOps);
Evan Chengf00f1e52008-08-25 21:27:18 +0000393}
394
395/// isCalleeLoad - Return true if call address is a load and it can be
396/// moved below CALLSEQ_START and the chains leading up to the call.
397/// Return the CALLSEQ_START by reference as a second output.
Evan Chengd703df62010-03-14 03:48:46 +0000398/// In the case of a tail call, there isn't a callseq node between the call
399/// chain and the load.
400static bool isCalleeLoad(SDValue Callee, SDValue &Chain, bool HasCallSeq) {
Evan Cheng847ad442012-10-05 01:48:22 +0000401 // The transformation is somewhat dangerous if the call's chain was glued to
402 // the call. After MoveBelowOrigChain the load is moved between the call and
403 // the chain, this can create a cycle if the load is not folded. So it is
404 // *really* important that we are sure the load will be folded.
Gabor Greiff304a7a2008-08-28 21:40:38 +0000405 if (Callee.getNode() == Chain.getNode() || !Callee.hasOneUse())
Evan Chengf00f1e52008-08-25 21:27:18 +0000406 return false;
Gabor Greiff304a7a2008-08-28 21:40:38 +0000407 LoadSDNode *LD = dyn_cast<LoadSDNode>(Callee.getNode());
Evan Chengf00f1e52008-08-25 21:27:18 +0000408 if (!LD ||
409 LD->isVolatile() ||
410 LD->getAddressingMode() != ISD::UNINDEXED ||
411 LD->getExtensionType() != ISD::NON_EXTLOAD)
412 return false;
413
414 // Now let's find the callseq_start.
Evan Chengd703df62010-03-14 03:48:46 +0000415 while (HasCallSeq && Chain.getOpcode() != ISD::CALLSEQ_START) {
Evan Chengf00f1e52008-08-25 21:27:18 +0000416 if (!Chain.hasOneUse())
417 return false;
418 Chain = Chain.getOperand(0);
419 }
Evan Chengd703df62010-03-14 03:48:46 +0000420
421 if (!Chain.getNumOperands())
422 return false;
Evan Cheng3fb03e22013-01-06 19:00:15 +0000423 // Since we are not checking for AA here, conservatively abort if the chain
424 // writes to memory. It's not safe to move the callee (a load) across a store.
425 if (isa<MemSDNode>(Chain.getNode()) &&
426 cast<MemSDNode>(Chain.getNode())->writeMem())
427 return false;
Evan Cheng6c7e8512009-01-26 18:43:34 +0000428 if (Chain.getOperand(0).getNode() == Callee.getNode())
429 return true;
430 if (Chain.getOperand(0).getOpcode() == ISD::TokenFactor &&
Dan Gohman520a6852009-09-15 01:22:01 +0000431 Callee.getValue(1).isOperandOf(Chain.getOperand(0).getNode()) &&
432 Callee.getValue(1).hasOneUse())
Evan Cheng6c7e8512009-01-26 18:43:34 +0000433 return true;
434 return false;
Evan Chengf00f1e52008-08-25 21:27:18 +0000435}
436
Chris Lattner8d637042010-03-02 23:12:51 +0000437void X86DAGToDAGISel::PreprocessISelDAG() {
Chris Lattner82cc5332010-03-04 01:43:43 +0000438 // OptForSize is used in pattern predicates that isel is matching.
Bill Wendling698e84f2012-12-30 10:32:01 +0000439 OptForSize = MF->getFunction()->getAttributes().
440 hasAttribute(AttributeSet::FunctionIndex, Attribute::OptimizeForSize);
Chad Rosier24c19d22012-08-01 18:39:17 +0000441
Dan Gohmaneb0cee92008-08-23 02:25:05 +0000442 for (SelectionDAG::allnodes_iterator I = CurDAG->allnodes_begin(),
443 E = CurDAG->allnodes_end(); I != E; ) {
Chris Lattnera91f77e2008-01-24 08:07:48 +0000444 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
Chris Lattner8d637042010-03-02 23:12:51 +0000445
Evan Chengd703df62010-03-14 03:48:46 +0000446 if (OptLevel != CodeGenOpt::None &&
Michael Liao96b42602013-03-28 23:13:21 +0000447 // Only does this when target favors doesn't favor register indirect
448 // call.
449 ((N->getOpcode() == X86ISD::CALL && !Subtarget->callRegIndirect()) ||
Evan Cheng847ad442012-10-05 01:48:22 +0000450 (N->getOpcode() == X86ISD::TC_RETURN &&
Nick Lewyckyf41a80e2013-01-13 19:03:55 +0000451 // Only does this if load can be folded into TC_RETURN.
Evan Cheng847ad442012-10-05 01:48:22 +0000452 (Subtarget->is64Bit() ||
453 getTargetMachine().getRelocationModel() != Reloc::PIC_)))) {
Chris Lattner8d637042010-03-02 23:12:51 +0000454 /// Also try moving call address load from outside callseq_start to just
455 /// before the call to allow it to be folded.
456 ///
457 /// [Load chain]
458 /// ^
459 /// |
460 /// [Load]
461 /// ^ ^
462 /// | |
463 /// / \--
464 /// / |
465 ///[CALLSEQ_START] |
466 /// ^ |
467 /// | |
468 /// [LOAD/C2Reg] |
469 /// | |
470 /// \ /
471 /// \ /
472 /// [CALL]
Evan Chengd703df62010-03-14 03:48:46 +0000473 bool HasCallSeq = N->getOpcode() == X86ISD::CALL;
Chris Lattner8d637042010-03-02 23:12:51 +0000474 SDValue Chain = N->getOperand(0);
475 SDValue Load = N->getOperand(1);
Evan Chengd703df62010-03-14 03:48:46 +0000476 if (!isCalleeLoad(Load, Chain, HasCallSeq))
Chris Lattner8d637042010-03-02 23:12:51 +0000477 continue;
Evan Chengd703df62010-03-14 03:48:46 +0000478 MoveBelowOrigChain(CurDAG, Load, SDValue(N, 0), Chain);
Chris Lattner8d637042010-03-02 23:12:51 +0000479 ++NumLoadMoved;
480 continue;
481 }
Chad Rosier24c19d22012-08-01 18:39:17 +0000482
Chris Lattner8d637042010-03-02 23:12:51 +0000483 // Lower fpround and fpextend nodes that target the FP stack to be store and
484 // load to the stack. This is a gross hack. We would like to simply mark
485 // these as being illegal, but when we do that, legalize produces these when
486 // it expands calls, then expands these in the same legalize pass. We would
487 // like dag combine to be able to hack on these between the call expansion
488 // and the node legalization. As such this pass basically does "really
489 // late" legalization of these inline with the X86 isel pass.
490 // FIXME: This should only happen when not compiled with -O0.
Chris Lattnera91f77e2008-01-24 08:07:48 +0000491 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
492 continue;
Chad Rosier24c19d22012-08-01 18:39:17 +0000493
Owen Anderson53aa7a92009-08-10 22:56:29 +0000494 EVT SrcVT = N->getOperand(0).getValueType();
495 EVT DstVT = N->getValueType(0);
Bruno Cardoso Lopes616fe602011-08-01 21:54:05 +0000496
497 // If any of the sources are vectors, no fp stack involved.
498 if (SrcVT.isVector() || DstVT.isVector())
499 continue;
500
501 // If the source and destination are SSE registers, then this is a legal
502 // conversion that should not be lowered.
Chris Lattnera91f77e2008-01-24 08:07:48 +0000503 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
504 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
505 if (SrcIsSSE && DstIsSSE)
506 continue;
507
Chris Lattnerd587e582008-03-09 07:05:32 +0000508 if (!SrcIsSSE && !DstIsSSE) {
509 // If this is an FPStack extension, it is a noop.
510 if (N->getOpcode() == ISD::FP_EXTEND)
511 continue;
512 // If this is a value-preserving FPStack truncation, it is a noop.
513 if (N->getConstantOperandVal(1))
514 continue;
515 }
Chad Rosier24c19d22012-08-01 18:39:17 +0000516
Chris Lattnera91f77e2008-01-24 08:07:48 +0000517 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
518 // FPStack has extload and truncstore. SSE can fold direct loads into other
519 // operations. Based on this, decide what we want to do.
Owen Anderson53aa7a92009-08-10 22:56:29 +0000520 EVT MemVT;
Chris Lattnera91f77e2008-01-24 08:07:48 +0000521 if (N->getOpcode() == ISD::FP_ROUND)
522 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
523 else
524 MemVT = SrcIsSSE ? SrcVT : DstVT;
Chad Rosier24c19d22012-08-01 18:39:17 +0000525
Dan Gohmaneb0cee92008-08-23 02:25:05 +0000526 SDValue MemTmp = CurDAG->CreateStackTemporary(MemVT);
Dale Johannesen14f2d9d2009-02-03 21:48:12 +0000527 DebugLoc dl = N->getDebugLoc();
Chad Rosier24c19d22012-08-01 18:39:17 +0000528
Chris Lattnera91f77e2008-01-24 08:07:48 +0000529 // FIXME: optimize the case where the src/dest is a load or store?
Dale Johannesen14f2d9d2009-02-03 21:48:12 +0000530 SDValue Store = CurDAG->getTruncStore(CurDAG->getEntryNode(), dl,
Dan Gohmaneb0cee92008-08-23 02:25:05 +0000531 N->getOperand(0),
Chris Lattner3d178ed2010-09-21 17:04:51 +0000532 MemTmp, MachinePointerInfo(), MemVT,
David Greenecbd39c52010-02-15 16:57:43 +0000533 false, false, 0);
Stuart Hastings81c43062011-02-16 16:23:55 +0000534 SDValue Result = CurDAG->getExtLoad(ISD::EXTLOAD, dl, DstVT, Store, MemTmp,
Chris Lattner3d178ed2010-09-21 17:04:51 +0000535 MachinePointerInfo(),
536 MemVT, false, false, 0);
Chris Lattnera91f77e2008-01-24 08:07:48 +0000537
538 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
539 // extload we created. This will cause general havok on the dag because
540 // anything below the conversion could be folded into other existing nodes.
541 // To avoid invalidating 'I', back it up to the convert node.
542 --I;
Dan Gohmaneb0cee92008-08-23 02:25:05 +0000543 CurDAG->ReplaceAllUsesOfValueWith(SDValue(N, 0), Result);
Chad Rosier24c19d22012-08-01 18:39:17 +0000544
Chris Lattnera91f77e2008-01-24 08:07:48 +0000545 // Now that we did that, the node is dead. Increment the iterator to the
546 // next node to process, then delete N.
547 ++I;
Dan Gohmaneb0cee92008-08-23 02:25:05 +0000548 CurDAG->DeleteNode(N);
Chad Rosier24c19d22012-08-01 18:39:17 +0000549 }
Chris Lattnera91f77e2008-01-24 08:07:48 +0000550}
551
Chris Lattner655e7df2005-11-16 01:54:32 +0000552
Anton Korobeynikov90910742007-09-25 21:52:30 +0000553/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
554/// the main function.
555void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
556 MachineFrameInfo *MFI) {
557 const TargetInstrInfo *TII = TM.getInstrInfo();
Bill Wendling81d40712011-01-06 00:47:10 +0000558 if (Subtarget->isTargetCygMing()) {
559 unsigned CallOp =
Jakob Stoklund Olesen97e31152012-02-16 17:56:02 +0000560 Subtarget->is64Bit() ? X86::CALL64pcrel32 : X86::CALLpcrel32;
Chris Lattner6f306d72010-04-02 20:16:16 +0000561 BuildMI(BB, DebugLoc(),
Bill Wendling81d40712011-01-06 00:47:10 +0000562 TII->get(CallOp)).addExternalSymbol("__main");
563 }
Anton Korobeynikov90910742007-09-25 21:52:30 +0000564}
565
Dan Gohmanc87b74d2010-04-14 20:17:22 +0000566void X86DAGToDAGISel::EmitFunctionEntryCode() {
Anton Korobeynikov90910742007-09-25 21:52:30 +0000567 // If this is main, emit special code for main.
Dan Gohmanc87b74d2010-04-14 20:17:22 +0000568 if (const Function *Fn = MF->getFunction())
569 if (Fn->hasExternalLinkage() && Fn->getName() == "main")
570 EmitSpecialCodeForMain(MF->begin(), MF->getFrameInfo());
Anton Korobeynikov90910742007-09-25 21:52:30 +0000571}
572
Eli Friedman344ec792011-07-13 21:29:53 +0000573static bool isDispSafeForFrameIndex(int64_t Val) {
574 // On 64-bit platforms, we can run into an issue where a frame index
575 // includes a displacement that, when added to the explicit displacement,
576 // will overflow the displacement field. Assuming that the frame index
577 // displacement fits into a 31-bit integer (which is only slightly more
578 // aggressive than the current fundamental assumption that it fits into
579 // a 32-bit integer), a 31-bit disp should always be safe.
580 return isInt<31>(Val);
581}
582
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000583bool X86DAGToDAGISel::FoldOffsetIntoAddress(uint64_t Offset,
584 X86ISelAddressMode &AM) {
585 int64_t Val = AM.Disp + Offset;
586 CodeModel::Model M = TM.getCodeModel();
Eli Friedman344ec792011-07-13 21:29:53 +0000587 if (Subtarget->is64Bit()) {
588 if (!X86::isOffsetSuitableForCodeModel(Val, M,
589 AM.hasSymbolicDisplacement()))
590 return true;
591 // In addition to the checks required for a register base, check that
592 // we do not try to use an unsafe Disp with a frame index.
593 if (AM.BaseType == X86ISelAddressMode::FrameIndexBase &&
594 !isDispSafeForFrameIndex(Val))
595 return true;
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000596 }
Eli Friedman344ec792011-07-13 21:29:53 +0000597 AM.Disp = Val;
598 return false;
599
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000600}
Rafael Espindola3b2df102009-04-08 21:14:34 +0000601
Chris Lattner8a236b62010-09-22 04:39:11 +0000602bool X86DAGToDAGISel::MatchLoadInAddress(LoadSDNode *N, X86ISelAddressMode &AM){
603 SDValue Address = N->getOperand(1);
Chad Rosier24c19d22012-08-01 18:39:17 +0000604
Chris Lattner8a236b62010-09-22 04:39:11 +0000605 // load gs:0 -> GS segment register.
606 // load fs:0 -> FS segment register.
607 //
Rafael Espindola3b2df102009-04-08 21:14:34 +0000608 // This optimization is valid because the GNU TLS model defines that
609 // gs:0 (or fs:0 on X86-64) contains its own address.
610 // For more information see http://people.redhat.com/drepper/tls.pdf
Chris Lattner8a236b62010-09-22 04:39:11 +0000611 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Address))
612 if (C->getSExtValue() == 0 && AM.Segment.getNode() == 0 &&
David Chisnall5b8c1682012-07-24 20:04:16 +0000613 Subtarget->isTargetLinux())
Chris Lattner8a236b62010-09-22 04:39:11 +0000614 switch (N->getPointerInfo().getAddrSpace()) {
615 case 256:
616 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
617 return false;
618 case 257:
619 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
620 return false;
621 }
Chad Rosier24c19d22012-08-01 18:39:17 +0000622
Rafael Espindola3b2df102009-04-08 21:14:34 +0000623 return true;
624}
625
Chris Lattnerfea81da2009-06-27 04:16:01 +0000626/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
627/// into an addressing mode. These wrap things that will resolve down into a
628/// symbol reference. If no match is possible, this returns true, otherwise it
Anton Korobeynikov741ea0d2009-08-05 23:01:26 +0000629/// returns false.
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000630bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
Chris Lattnerfea81da2009-06-27 04:16:01 +0000631 // If the addressing mode already has a symbol as the displacement, we can
632 // never match another symbol.
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000633 if (AM.hasSymbolicDisplacement())
634 return true;
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000635
636 SDValue N0 = N.getOperand(0);
Anton Korobeynikov741ea0d2009-08-05 23:01:26 +0000637 CodeModel::Model M = TM.getCodeModel();
638
Chris Lattnerfea81da2009-06-27 04:16:01 +0000639 // Handle X86-64 rip-relative addresses. We check this before checking direct
640 // folding because RIP is preferable to non-RIP accesses.
Chandler Carruth3779ac12012-04-09 02:13:06 +0000641 if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
Chris Lattnerfea81da2009-06-27 04:16:01 +0000642 // Under X86-64 non-small code model, GV (and friends) are 64-bits, so
643 // they cannot be folded into immediate fields.
644 // FIXME: This can be improved for kernel and other models?
Chandler Carruth3779ac12012-04-09 02:13:06 +0000645 (M == CodeModel::Small || M == CodeModel::Kernel)) {
646 // Base and index reg must be 0 in order to use %rip as base.
647 if (AM.hasBaseOrIndexReg())
648 return true;
Chris Lattnerfea81da2009-06-27 04:16:01 +0000649 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000650 X86ISelAddressMode Backup = AM;
Chris Lattnerfea81da2009-06-27 04:16:01 +0000651 AM.GV = G->getGlobal();
Chris Lattnerbd7e26d2009-06-26 05:51:45 +0000652 AM.SymbolFlags = G->getTargetFlags();
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000653 if (FoldOffsetIntoAddress(G->getOffset(), AM)) {
654 AM = Backup;
655 return true;
656 }
Chris Lattnerfea81da2009-06-27 04:16:01 +0000657 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000658 X86ISelAddressMode Backup = AM;
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000659 AM.CP = CP->getConstVal();
660 AM.Align = CP->getAlignment();
Chris Lattner1d3b65a2009-06-26 05:56:49 +0000661 AM.SymbolFlags = CP->getTargetFlags();
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000662 if (FoldOffsetIntoAddress(CP->getOffset(), AM)) {
663 AM = Backup;
664 return true;
665 }
Chris Lattnerfea81da2009-06-27 04:16:01 +0000666 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
667 AM.ES = S->getSymbol();
668 AM.SymbolFlags = S->getTargetFlags();
Chris Lattner50ba5c32009-11-01 03:25:03 +0000669 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
Chris Lattnerfea81da2009-06-27 04:16:01 +0000670 AM.JT = J->getIndex();
671 AM.SymbolFlags = J->getTargetFlags();
Michael Liaoabb87d42012-09-12 21:43:09 +0000672 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
673 X86ISelAddressMode Backup = AM;
674 AM.BlockAddr = BA->getBlockAddress();
675 AM.SymbolFlags = BA->getTargetFlags();
676 if (FoldOffsetIntoAddress(BA->getOffset(), AM)) {
677 AM = Backup;
678 return true;
679 }
680 } else
681 llvm_unreachable("Unhandled symbol reference node.");
Anton Korobeynikov741ea0d2009-08-05 23:01:26 +0000682
Chris Lattnerfea81da2009-06-27 04:16:01 +0000683 if (N.getOpcode() == X86ISD::WrapperRIP)
Owen Anderson9f944592009-08-11 20:47:22 +0000684 AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000685 return false;
Chris Lattnerfea81da2009-06-27 04:16:01 +0000686 }
687
688 // Handle the case when globals fit in our immediate field: This is true for
Chandler Carruth3779ac12012-04-09 02:13:06 +0000689 // X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
690 // mode, this only applies to a non-RIP-relative computation.
Chris Lattnerfea81da2009-06-27 04:16:01 +0000691 if (!Subtarget->is64Bit() ||
Chandler Carruth3779ac12012-04-09 02:13:06 +0000692 M == CodeModel::Small || M == CodeModel::Kernel) {
693 assert(N.getOpcode() != X86ISD::WrapperRIP &&
694 "RIP-relative addressing already handled");
Chris Lattnerfea81da2009-06-27 04:16:01 +0000695 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
696 AM.GV = G->getGlobal();
697 AM.Disp += G->getOffset();
698 AM.SymbolFlags = G->getTargetFlags();
699 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
700 AM.CP = CP->getConstVal();
701 AM.Align = CP->getAlignment();
702 AM.Disp += CP->getOffset();
703 AM.SymbolFlags = CP->getTargetFlags();
704 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
705 AM.ES = S->getSymbol();
706 AM.SymbolFlags = S->getTargetFlags();
Chris Lattner50ba5c32009-11-01 03:25:03 +0000707 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
Chris Lattnerfea81da2009-06-27 04:16:01 +0000708 AM.JT = J->getIndex();
709 AM.SymbolFlags = J->getTargetFlags();
Michael Liaoabb87d42012-09-12 21:43:09 +0000710 } else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
711 AM.BlockAddr = BA->getBlockAddress();
712 AM.Disp += BA->getOffset();
713 AM.SymbolFlags = BA->getTargetFlags();
714 } else
715 llvm_unreachable("Unhandled symbol reference node.");
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000716 return false;
717 }
718
719 return true;
720}
721
Chris Lattner3f0f71b2005-11-19 02:11:08 +0000722/// MatchAddress - Add the specified node to the specified addressing mode,
723/// returning true if it cannot be done. This just pattern matches for the
Chris Lattnerff87f05e2007-12-08 07:22:58 +0000724/// addressing mode.
Dan Gohman824ab402009-07-22 23:26:55 +0000725bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM) {
Dan Gohman99ba4da2010-06-18 01:24:29 +0000726 if (MatchAddressRecursively(N, AM, 0))
Dan Gohman824ab402009-07-22 23:26:55 +0000727 return true;
728
729 // Post-processing: Convert lea(,%reg,2) to lea(%reg,%reg), which has
730 // a smaller encoding and avoids a scaled-index.
731 if (AM.Scale == 2 &&
732 AM.BaseType == X86ISelAddressMode::RegBase &&
Dan Gohman0fd54fb2010-04-29 23:30:41 +0000733 AM.Base_Reg.getNode() == 0) {
734 AM.Base_Reg = AM.IndexReg;
Dan Gohman824ab402009-07-22 23:26:55 +0000735 AM.Scale = 1;
736 }
737
Dan Gohman05046082009-08-20 18:23:44 +0000738 // Post-processing: Convert foo to foo(%rip), even in non-PIC mode,
739 // because it has a smaller encoding.
740 // TODO: Which other code models can use this?
741 if (TM.getCodeModel() == CodeModel::Small &&
742 Subtarget->is64Bit() &&
743 AM.Scale == 1 &&
744 AM.BaseType == X86ISelAddressMode::RegBase &&
Dan Gohman0fd54fb2010-04-29 23:30:41 +0000745 AM.Base_Reg.getNode() == 0 &&
Dan Gohman05046082009-08-20 18:23:44 +0000746 AM.IndexReg.getNode() == 0 &&
Dan Gohman0f6bf2d2009-08-25 17:47:44 +0000747 AM.SymbolFlags == X86II::MO_NO_FLAG &&
Dan Gohman05046082009-08-20 18:23:44 +0000748 AM.hasSymbolicDisplacement())
Dan Gohman0fd54fb2010-04-29 23:30:41 +0000749 AM.Base_Reg = CurDAG->getRegister(X86::RIP, MVT::i64);
Dan Gohman05046082009-08-20 18:23:44 +0000750
Dan Gohman824ab402009-07-22 23:26:55 +0000751 return false;
752}
753
Chandler Carruth3eacfb82012-01-11 11:04:36 +0000754// Insert a node into the DAG at least before the Pos node's position. This
755// will reposition the node as needed, and will assign it a node ID that is <=
756// the Pos node's ID. Note that this does *not* preserve the uniqueness of node
757// IDs! The selection DAG must no longer depend on their uniqueness when this
758// is used.
759static void InsertDAGNode(SelectionDAG &DAG, SDValue Pos, SDValue N) {
760 if (N.getNode()->getNodeId() == -1 ||
761 N.getNode()->getNodeId() > Pos.getNode()->getNodeId()) {
762 DAG.RepositionNode(Pos.getNode(), N.getNode());
763 N.getNode()->setNodeId(Pos.getNode()->getNodeId());
764 }
765}
766
Chandler Carruth51d30762012-01-11 08:48:20 +0000767// Transform "(X >> (8-C1)) & C2" to "(X >> 8) & 0xff)" if safe. This
768// allows us to convert the shift and and into an h-register extract and
769// a scaled index. Returns false if the simplification is performed.
770static bool FoldMaskAndShiftToExtract(SelectionDAG &DAG, SDValue N,
771 uint64_t Mask,
772 SDValue Shift, SDValue X,
773 X86ISelAddressMode &AM) {
774 if (Shift.getOpcode() != ISD::SRL ||
775 !isa<ConstantSDNode>(Shift.getOperand(1)) ||
776 !Shift.hasOneUse())
777 return true;
778
779 int ScaleLog = 8 - Shift.getConstantOperandVal(1);
780 if (ScaleLog <= 0 || ScaleLog >= 4 ||
781 Mask != (0xffu << ScaleLog))
782 return true;
783
784 EVT VT = N.getValueType();
785 DebugLoc DL = N.getDebugLoc();
786 SDValue Eight = DAG.getConstant(8, MVT::i8);
787 SDValue NewMask = DAG.getConstant(0xff, VT);
788 SDValue Srl = DAG.getNode(ISD::SRL, DL, VT, X, Eight);
789 SDValue And = DAG.getNode(ISD::AND, DL, VT, Srl, NewMask);
790 SDValue ShlCount = DAG.getConstant(ScaleLog, MVT::i8);
791 SDValue Shl = DAG.getNode(ISD::SHL, DL, VT, And, ShlCount);
792
Chandler Carrutheb21da02012-01-12 01:34:44 +0000793 // Insert the new nodes into the topological ordering. We must do this in
794 // a valid topological ordering as nothing is going to go back and re-sort
795 // these nodes. We continually insert before 'N' in sequence as this is
796 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
797 // hierarchy left to express.
798 InsertDAGNode(DAG, N, Eight);
799 InsertDAGNode(DAG, N, Srl);
800 InsertDAGNode(DAG, N, NewMask);
Chandler Carruth3eacfb82012-01-11 11:04:36 +0000801 InsertDAGNode(DAG, N, And);
Chandler Carrutheb21da02012-01-12 01:34:44 +0000802 InsertDAGNode(DAG, N, ShlCount);
Chandler Carruth3eacfb82012-01-11 11:04:36 +0000803 InsertDAGNode(DAG, N, Shl);
Chandler Carruth51d30762012-01-11 08:48:20 +0000804 DAG.ReplaceAllUsesWith(N, Shl);
805 AM.IndexReg = And;
806 AM.Scale = (1 << ScaleLog);
807 return false;
808}
809
Chandler Carruthaa01e662012-01-11 09:35:00 +0000810// Transforms "(X << C1) & C2" to "(X & (C2>>C1)) << C1" if safe and if this
811// allows us to fold the shift into this addressing mode. Returns false if the
812// transform succeeded.
813static bool FoldMaskedShiftToScaledMask(SelectionDAG &DAG, SDValue N,
814 uint64_t Mask,
815 SDValue Shift, SDValue X,
816 X86ISelAddressMode &AM) {
817 if (Shift.getOpcode() != ISD::SHL ||
818 !isa<ConstantSDNode>(Shift.getOperand(1)))
819 return true;
820
821 // Not likely to be profitable if either the AND or SHIFT node has more
822 // than one use (unless all uses are for address computation). Besides,
823 // isel mechanism requires their node ids to be reused.
824 if (!N.hasOneUse() || !Shift.hasOneUse())
825 return true;
826
827 // Verify that the shift amount is something we can fold.
828 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
829 if (ShiftAmt != 1 && ShiftAmt != 2 && ShiftAmt != 3)
830 return true;
831
832 EVT VT = N.getValueType();
833 DebugLoc DL = N.getDebugLoc();
834 SDValue NewMask = DAG.getConstant(Mask >> ShiftAmt, VT);
835 SDValue NewAnd = DAG.getNode(ISD::AND, DL, VT, X, NewMask);
836 SDValue NewShift = DAG.getNode(ISD::SHL, DL, VT, NewAnd, Shift.getOperand(1));
837
Chandler Carrutheb21da02012-01-12 01:34:44 +0000838 // Insert the new nodes into the topological ordering. We must do this in
839 // a valid topological ordering as nothing is going to go back and re-sort
840 // these nodes. We continually insert before 'N' in sequence as this is
841 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
842 // hierarchy left to express.
843 InsertDAGNode(DAG, N, NewMask);
844 InsertDAGNode(DAG, N, NewAnd);
Chandler Carruth3eacfb82012-01-11 11:04:36 +0000845 InsertDAGNode(DAG, N, NewShift);
Chandler Carruthaa01e662012-01-11 09:35:00 +0000846 DAG.ReplaceAllUsesWith(N, NewShift);
847
848 AM.Scale = 1 << ShiftAmt;
849 AM.IndexReg = NewAnd;
850 return false;
851}
852
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000853// Implement some heroics to detect shifts of masked values where the mask can
854// be replaced by extending the shift and undoing that in the addressing mode
855// scale. Patterns such as (shl (srl x, c1), c2) are canonicalized into (and
856// (srl x, SHIFT), MASK) by DAGCombines that don't know the shl can be done in
857// the addressing mode. This results in code such as:
858//
859// int f(short *y, int *lookup_table) {
860// ...
861// return *y + lookup_table[*y >> 11];
862// }
863//
864// Turning into:
865// movzwl (%rdi), %eax
866// movl %eax, %ecx
867// shrl $11, %ecx
868// addl (%rsi,%rcx,4), %eax
869//
870// Instead of:
871// movzwl (%rdi), %eax
872// movl %eax, %ecx
873// shrl $9, %ecx
874// andl $124, %rcx
875// addl (%rsi,%rcx), %eax
876//
Chandler Carruth3dbcda82012-01-11 09:35:02 +0000877// Note that this function assumes the mask is provided as a mask *after* the
878// value is shifted. The input chain may or may not match that, but computing
879// such a mask is trivial.
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000880static bool FoldMaskAndShiftToScale(SelectionDAG &DAG, SDValue N,
Chandler Carruth3dbcda82012-01-11 09:35:02 +0000881 uint64_t Mask,
882 SDValue Shift, SDValue X,
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000883 X86ISelAddressMode &AM) {
Chandler Carruth3dbcda82012-01-11 09:35:02 +0000884 if (Shift.getOpcode() != ISD::SRL || !Shift.hasOneUse() ||
885 !isa<ConstantSDNode>(Shift.getOperand(1)))
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000886 return true;
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000887
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000888 unsigned ShiftAmt = Shift.getConstantOperandVal(1);
889 unsigned MaskLZ = CountLeadingZeros_64(Mask);
890 unsigned MaskTZ = CountTrailingZeros_64(Mask);
891
892 // The amount of shift we're trying to fit into the addressing mode is taken
Chandler Carruth3dbcda82012-01-11 09:35:02 +0000893 // from the trailing zeros of the mask.
894 unsigned AMShiftAmt = MaskTZ;
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000895
896 // There is nothing we can do here unless the mask is removing some bits.
897 // Also, the addressing mode can only represent shifts of 1, 2, or 3 bits.
898 if (AMShiftAmt <= 0 || AMShiftAmt > 3) return true;
899
900 // We also need to ensure that mask is a continuous run of bits.
901 if (CountTrailingOnes_64(Mask >> MaskTZ) + MaskTZ + MaskLZ != 64) return true;
902
903 // Scale the leading zero count down based on the actual size of the value.
Chandler Carruth3dbcda82012-01-11 09:35:02 +0000904 // Also scale it down based on the size of the shift.
905 MaskLZ -= (64 - X.getValueSizeInBits()) + ShiftAmt;
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000906
907 // The final check is to ensure that any masked out high bits of X are
908 // already known to be zero. Otherwise, the mask has a semantic impact
909 // other than masking out a couple of low bits. Unfortunately, because of
910 // the mask, zero extensions will be removed from operands in some cases.
911 // This code works extra hard to look through extensions because we can
912 // replace them with zero extensions cheaply if necessary.
913 bool ReplacingAnyExtend = false;
914 if (X.getOpcode() == ISD::ANY_EXTEND) {
915 unsigned ExtendBits =
916 X.getValueSizeInBits() - X.getOperand(0).getValueSizeInBits();
917 // Assume that we'll replace the any-extend with a zero-extend, and
918 // narrow the search to the extended value.
919 X = X.getOperand(0);
920 MaskLZ = ExtendBits > MaskLZ ? 0 : MaskLZ - ExtendBits;
921 ReplacingAnyExtend = true;
922 }
923 APInt MaskedHighBits = APInt::getHighBitsSet(X.getValueSizeInBits(),
924 MaskLZ);
925 APInt KnownZero, KnownOne;
Rafael Espindolaba0a6ca2012-04-04 12:51:34 +0000926 DAG.ComputeMaskedBits(X, KnownZero, KnownOne);
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000927 if (MaskedHighBits != KnownZero) return true;
928
929 // We've identified a pattern that can be transformed into a single shift
930 // and an addressing mode. Make it so.
931 EVT VT = N.getValueType();
932 if (ReplacingAnyExtend) {
933 assert(X.getValueType() != VT);
934 // We looked through an ANY_EXTEND node, insert a ZERO_EXTEND.
935 SDValue NewX = DAG.getNode(ISD::ZERO_EXTEND, X.getDebugLoc(), VT, X);
Chandler Carruth3eacfb82012-01-11 11:04:36 +0000936 InsertDAGNode(DAG, N, NewX);
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000937 X = NewX;
938 }
939 DebugLoc DL = N.getDebugLoc();
940 SDValue NewSRLAmt = DAG.getConstant(ShiftAmt + AMShiftAmt, MVT::i8);
941 SDValue NewSRL = DAG.getNode(ISD::SRL, DL, VT, X, NewSRLAmt);
942 SDValue NewSHLAmt = DAG.getConstant(AMShiftAmt, MVT::i8);
943 SDValue NewSHL = DAG.getNode(ISD::SHL, DL, VT, NewSRL, NewSHLAmt);
Chandler Carrutheb21da02012-01-12 01:34:44 +0000944
945 // Insert the new nodes into the topological ordering. We must do this in
946 // a valid topological ordering as nothing is going to go back and re-sort
947 // these nodes. We continually insert before 'N' in sequence as this is
948 // essentially a pre-flattened and pre-sorted sequence of nodes. There is no
949 // hierarchy left to express.
Chandler Carruth3eacfb82012-01-11 11:04:36 +0000950 InsertDAGNode(DAG, N, NewSRLAmt);
951 InsertDAGNode(DAG, N, NewSRL);
952 InsertDAGNode(DAG, N, NewSHLAmt);
953 InsertDAGNode(DAG, N, NewSHL);
Chandler Carruth55b2cde2012-01-11 08:41:08 +0000954 DAG.ReplaceAllUsesWith(N, NewSHL);
955
956 AM.Scale = 1 << AMShiftAmt;
957 AM.IndexReg = NewSRL;
958 return false;
959}
960
Dan Gohman824ab402009-07-22 23:26:55 +0000961bool X86DAGToDAGISel::MatchAddressRecursively(SDValue N, X86ISelAddressMode &AM,
962 unsigned Depth) {
Dale Johannesen9c310712009-02-07 19:59:05 +0000963 DebugLoc dl = N.getDebugLoc();
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000964 DEBUG({
David Greenedbdb1b22010-01-05 01:29:08 +0000965 dbgs() << "MatchAddress: ";
Bill Wendlingfe3bdb42009-08-07 21:33:25 +0000966 AM.dump();
967 });
Dan Gohmanccb36112007-08-13 20:03:06 +0000968 // Limit recursion.
969 if (Depth > 5)
Rafael Espindola92773792009-03-31 16:16:57 +0000970 return MatchAddressBase(N, AM);
Anton Korobeynikov741ea0d2009-08-05 23:01:26 +0000971
Chris Lattnerfea81da2009-06-27 04:16:01 +0000972 // If this is already a %rip relative address, we can only merge immediates
973 // into it. Instead of handling this in every case, we handle it here.
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000974 // RIP relative addressing: %rip + 32-bit displacement!
Chris Lattnerfea81da2009-06-27 04:16:01 +0000975 if (AM.isRIPRelative()) {
976 // FIXME: JumpTable and ExternalSymbol address currently don't like
977 // displacements. It isn't very important, but this should be fixed for
978 // consistency.
979 if (!AM.ES && AM.JT != -1) return true;
Anton Korobeynikov741ea0d2009-08-05 23:01:26 +0000980
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000981 if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N))
982 if (!FoldOffsetIntoAddress(Cst->getSExtValue(), AM))
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000983 return false;
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000984 return true;
985 }
986
Chris Lattner3f0f71b2005-11-19 02:11:08 +0000987 switch (N.getOpcode()) {
988 default: break;
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000989 case ISD::Constant: {
Dan Gohman059c4fa2008-11-11 15:52:29 +0000990 uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue();
Eli Friedmanef67e7d2011-07-13 20:44:23 +0000991 if (!FoldOffsetIntoAddress(Val, AM))
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000992 return false;
Evan Cheng11b0a5d2006-09-08 06:48:29 +0000993 break;
994 }
Evan Cheng77d86ff2006-02-25 10:09:08 +0000995
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000996 case X86ISD::Wrapper:
Chris Lattnerfea81da2009-06-27 04:16:01 +0000997 case X86ISD::WrapperRIP:
Rafael Espindola6688b0a2009-04-12 21:55:03 +0000998 if (!MatchWrapper(N, AM))
999 return false;
Evan Cheng77d86ff2006-02-25 10:09:08 +00001000 break;
1001
Rafael Espindola3b2df102009-04-08 21:14:34 +00001002 case ISD::LOAD:
Chris Lattner8a236b62010-09-22 04:39:11 +00001003 if (!MatchLoadInAddress(cast<LoadSDNode>(N), AM))
Rafael Espindola3b2df102009-04-08 21:14:34 +00001004 return false;
1005 break;
1006
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001007 case ISD::FrameIndex:
Eli Friedman344ec792011-07-13 21:29:53 +00001008 if (AM.BaseType == X86ISelAddressMode::RegBase &&
1009 AM.Base_Reg.getNode() == 0 &&
1010 (!Subtarget->is64Bit() || isDispSafeForFrameIndex(AM.Disp))) {
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001011 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001012 AM.Base_FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001013 return false;
1014 }
1015 break;
Evan Chengc9fab312005-12-08 02:01:35 +00001016
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001017 case ISD::SHL:
Chris Lattnerfea81da2009-06-27 04:16:01 +00001018 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001019 break;
Chad Rosier24c19d22012-08-01 18:39:17 +00001020
Gabor Greif81d6a382008-08-31 15:37:04 +00001021 if (ConstantSDNode
1022 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) {
Dan Gohmaneffb8942008-09-12 16:56:44 +00001023 unsigned Val = CN->getZExtValue();
Dan Gohman824ab402009-07-22 23:26:55 +00001024 // Note that we handle x<<1 as (,x,2) rather than (x,x) here so
1025 // that the base operand remains free for further matching. If
1026 // the base doesn't end up getting used, a post-processing step
1027 // in MatchAddress turns (,x,2) into (x,x), which is cheaper.
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001028 if (Val == 1 || Val == 2 || Val == 3) {
1029 AM.Scale = 1 << Val;
Gabor Greiff304a7a2008-08-28 21:40:38 +00001030 SDValue ShVal = N.getNode()->getOperand(0);
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001031
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001032 // Okay, we know that we have a scale by now. However, if the scaled
1033 // value is an add of something and a constant, we can fold the
1034 // constant into the disp field here.
Chris Lattner46c01a32011-02-13 22:25:43 +00001035 if (CurDAG->isBaseWithConstantOffset(ShVal)) {
Gabor Greiff304a7a2008-08-28 21:40:38 +00001036 AM.IndexReg = ShVal.getNode()->getOperand(0);
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001037 ConstantSDNode *AddVal =
Gabor Greiff304a7a2008-08-28 21:40:38 +00001038 cast<ConstantSDNode>(ShVal.getNode()->getOperand(1));
Richard Smith228e6d42012-08-24 23:29:28 +00001039 uint64_t Disp = (uint64_t)AddVal->getSExtValue() << Val;
Eli Friedmanef67e7d2011-07-13 20:44:23 +00001040 if (!FoldOffsetIntoAddress(Disp, AM))
1041 return false;
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001042 }
Eli Friedmanef67e7d2011-07-13 20:44:23 +00001043
1044 AM.IndexReg = ShVal;
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001045 return false;
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001046 }
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001047 }
Jakub Staszak43fafaf2013-01-04 23:01:26 +00001048 break;
Evan Chengc9fab312005-12-08 02:01:35 +00001049
Chandler Carruth3dbcda82012-01-11 09:35:02 +00001050 case ISD::SRL: {
1051 // Scale must not be used already.
1052 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
1053
1054 SDValue And = N.getOperand(0);
1055 if (And.getOpcode() != ISD::AND) break;
1056 SDValue X = And.getOperand(0);
1057
1058 // We only handle up to 64-bit values here as those are what matter for
1059 // addressing mode optimizations.
1060 if (X.getValueSizeInBits() > 64) break;
1061
1062 // The mask used for the transform is expected to be post-shift, but we
1063 // found the shift first so just apply the shift to the mask before passing
1064 // it down.
1065 if (!isa<ConstantSDNode>(N.getOperand(1)) ||
1066 !isa<ConstantSDNode>(And.getOperand(1)))
1067 break;
1068 uint64_t Mask = And.getConstantOperandVal(1) >> N.getConstantOperandVal(1);
1069
Chandler Carruth55b2cde2012-01-11 08:41:08 +00001070 // Try to fold the mask and shift into the scale, and return false if we
1071 // succeed.
Chandler Carruth3dbcda82012-01-11 09:35:02 +00001072 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, N, X, AM))
Chandler Carruth55b2cde2012-01-11 08:41:08 +00001073 return false;
1074 break;
Chandler Carruth3dbcda82012-01-11 09:35:02 +00001075 }
Chandler Carruth55b2cde2012-01-11 08:41:08 +00001076
Dan Gohmanbf474952007-10-22 20:22:24 +00001077 case ISD::SMUL_LOHI:
1078 case ISD::UMUL_LOHI:
1079 // A mul_lohi where we need the low part can be folded as a plain multiply.
Gabor Greifabfdf922008-08-26 22:36:50 +00001080 if (N.getResNo() != 0) break;
Dan Gohmanbf474952007-10-22 20:22:24 +00001081 // FALL THROUGH
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001082 case ISD::MUL:
Evan Chenga84a3182009-03-30 21:36:47 +00001083 case X86ISD::MUL_IMM:
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001084 // X*[3,5,9] -> X+X*[2,4,8]
Dan Gohmanf14b77e2008-11-05 04:14:16 +00001085 if (AM.BaseType == X86ISelAddressMode::RegBase &&
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001086 AM.Base_Reg.getNode() == 0 &&
Chris Lattnerfea81da2009-06-27 04:16:01 +00001087 AM.IndexReg.getNode() == 0) {
Gabor Greif81d6a382008-08-31 15:37:04 +00001088 if (ConstantSDNode
1089 *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
Dan Gohmaneffb8942008-09-12 16:56:44 +00001090 if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
1091 CN->getZExtValue() == 9) {
1092 AM.Scale = unsigned(CN->getZExtValue())-1;
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001093
Gabor Greiff304a7a2008-08-28 21:40:38 +00001094 SDValue MulVal = N.getNode()->getOperand(0);
Dan Gohman2ce6f2a2008-07-27 21:46:04 +00001095 SDValue Reg;
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001096
1097 // Okay, we know that we have a scale by now. However, if the scaled
1098 // value is an add of something and a constant, we can fold the
1099 // constant into the disp field here.
Gabor Greiff304a7a2008-08-28 21:40:38 +00001100 if (MulVal.getNode()->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
1101 isa<ConstantSDNode>(MulVal.getNode()->getOperand(1))) {
1102 Reg = MulVal.getNode()->getOperand(0);
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001103 ConstantSDNode *AddVal =
Gabor Greiff304a7a2008-08-28 21:40:38 +00001104 cast<ConstantSDNode>(MulVal.getNode()->getOperand(1));
Eli Friedmanef67e7d2011-07-13 20:44:23 +00001105 uint64_t Disp = AddVal->getSExtValue() * CN->getZExtValue();
1106 if (FoldOffsetIntoAddress(Disp, AM))
Gabor Greiff304a7a2008-08-28 21:40:38 +00001107 Reg = N.getNode()->getOperand(0);
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001108 } else {
Gabor Greiff304a7a2008-08-28 21:40:38 +00001109 Reg = N.getNode()->getOperand(0);
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001110 }
1111
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001112 AM.IndexReg = AM.Base_Reg = Reg;
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001113 return false;
1114 }
Chris Lattnerfe8c5302007-02-04 20:18:17 +00001115 }
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001116 break;
1117
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001118 case ISD::SUB: {
1119 // Given A-B, if A can be completely folded into the address and
1120 // the index field with the index field unused, use -B as the index.
1121 // This is a win if a has multiple parts that can be folded into
1122 // the address. Also, this saves a mov if the base register has
1123 // other uses, since it avoids a two-address sub instruction, however
1124 // it costs an additional mov if the index register has other uses.
1125
Dan Gohman99ba4da2010-06-18 01:24:29 +00001126 // Add an artificial use to this node so that we can keep track of
1127 // it if it gets CSE'd with a different node.
1128 HandleSDNode Handle(N);
1129
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001130 // Test if the LHS of the sub can be folded.
1131 X86ISelAddressMode Backup = AM;
Dan Gohman99ba4da2010-06-18 01:24:29 +00001132 if (MatchAddressRecursively(N.getNode()->getOperand(0), AM, Depth+1)) {
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001133 AM = Backup;
1134 break;
1135 }
1136 // Test if the index field is free for use.
Chris Lattnerfea81da2009-06-27 04:16:01 +00001137 if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001138 AM = Backup;
1139 break;
1140 }
Evan Cheng68333f52010-03-17 23:58:35 +00001141
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001142 int Cost = 0;
Dan Gohman99ba4da2010-06-18 01:24:29 +00001143 SDValue RHS = Handle.getValue().getNode()->getOperand(1);
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001144 // If the RHS involves a register with multiple uses, this
1145 // transformation incurs an extra mov, due to the neg instruction
1146 // clobbering its operand.
1147 if (!RHS.getNode()->hasOneUse() ||
1148 RHS.getNode()->getOpcode() == ISD::CopyFromReg ||
1149 RHS.getNode()->getOpcode() == ISD::TRUNCATE ||
1150 RHS.getNode()->getOpcode() == ISD::ANY_EXTEND ||
1151 (RHS.getNode()->getOpcode() == ISD::ZERO_EXTEND &&
Owen Anderson9f944592009-08-11 20:47:22 +00001152 RHS.getNode()->getOperand(0).getValueType() == MVT::i32))
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001153 ++Cost;
1154 // If the base is a register with multiple uses, this
1155 // transformation may save a mov.
1156 if ((AM.BaseType == X86ISelAddressMode::RegBase &&
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001157 AM.Base_Reg.getNode() &&
1158 !AM.Base_Reg.getNode()->hasOneUse()) ||
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001159 AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1160 --Cost;
1161 // If the folded LHS was interesting, this transformation saves
1162 // address arithmetic.
1163 if ((AM.hasSymbolicDisplacement() && !Backup.hasSymbolicDisplacement()) +
1164 ((AM.Disp != 0) && (Backup.Disp == 0)) +
1165 (AM.Segment.getNode() && !Backup.Segment.getNode()) >= 2)
1166 --Cost;
1167 // If it doesn't look like it may be an overall win, don't do it.
1168 if (Cost >= 0) {
1169 AM = Backup;
1170 break;
1171 }
1172
1173 // Ok, the transformation is legal and appears profitable. Go for it.
1174 SDValue Zero = CurDAG->getConstant(0, N.getValueType());
1175 SDValue Neg = CurDAG->getNode(ISD::SUB, dl, N.getValueType(), Zero, RHS);
1176 AM.IndexReg = Neg;
1177 AM.Scale = 1;
1178
1179 // Insert the new nodes into the topological ordering.
Chandler Carruth3eacfb82012-01-11 11:04:36 +00001180 InsertDAGNode(*CurDAG, N, Zero);
1181 InsertDAGNode(*CurDAG, N, Neg);
Dan Gohmanfaf75c82009-05-11 18:02:53 +00001182 return false;
1183 }
1184
Evan Chengbf38a5e2009-01-17 07:09:27 +00001185 case ISD::ADD: {
Dan Gohman99ba4da2010-06-18 01:24:29 +00001186 // Add an artificial use to this node so that we can keep track of
1187 // it if it gets CSE'd with a different node.
1188 HandleSDNode Handle(N);
Dan Gohman99ba4da2010-06-18 01:24:29 +00001189
Evan Chengbf38a5e2009-01-17 07:09:27 +00001190 X86ISelAddressMode Backup = AM;
Chris Lattner35a2e652011-01-16 08:48:11 +00001191 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
1192 !MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
Dan Gohman99ba4da2010-06-18 01:24:29 +00001193 return false;
1194 AM = Backup;
Chad Rosier24c19d22012-08-01 18:39:17 +00001195
Evan Cheng68333f52010-03-17 23:58:35 +00001196 // Try again after commuting the operands.
Chris Lattner35a2e652011-01-16 08:48:11 +00001197 if (!MatchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1)&&
1198 !MatchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth+1))
Dan Gohman99ba4da2010-06-18 01:24:29 +00001199 return false;
Evan Chengbf38a5e2009-01-17 07:09:27 +00001200 AM = Backup;
Dan Gohmana1d92422009-03-13 02:25:09 +00001201
1202 // If we couldn't fold both operands into the address at the same time,
1203 // see if we can just put each operand into a register and fold at least
1204 // the add.
1205 if (AM.BaseType == X86ISelAddressMode::RegBase &&
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001206 !AM.Base_Reg.getNode() &&
Chris Lattnerfea81da2009-06-27 04:16:01 +00001207 !AM.IndexReg.getNode()) {
Chris Lattner35a2e652011-01-16 08:48:11 +00001208 N = Handle.getValue();
1209 AM.Base_Reg = N.getOperand(0);
1210 AM.IndexReg = N.getOperand(1);
Dan Gohmana1d92422009-03-13 02:25:09 +00001211 AM.Scale = 1;
1212 return false;
1213 }
Chris Lattner35a2e652011-01-16 08:48:11 +00001214 N = Handle.getValue();
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001215 break;
Evan Chengbf38a5e2009-01-17 07:09:27 +00001216 }
Evan Cheng734e1e22006-05-30 06:59:36 +00001217
Chris Lattnerfe8c5302007-02-04 20:18:17 +00001218 case ISD::OR:
Sylvestre Ledru91ce36c2012-09-27 10:14:43 +00001219 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
Chris Lattner46c01a32011-02-13 22:25:43 +00001220 if (CurDAG->isBaseWithConstantOffset(N)) {
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001221 X86ISelAddressMode Backup = AM;
Chris Lattner84776782010-04-20 23:18:40 +00001222 ConstantSDNode *CN = cast<ConstantSDNode>(N.getOperand(1));
Evan Cheng68333f52010-03-17 23:58:35 +00001223
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001224 // Start with the LHS as an addr mode.
Dan Gohman99ba4da2010-06-18 01:24:29 +00001225 if (!MatchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
Eli Friedmanef67e7d2011-07-13 20:44:23 +00001226 !FoldOffsetIntoAddress(CN->getSExtValue(), AM))
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001227 return false;
Chris Lattnerff87f05e2007-12-08 07:22:58 +00001228 AM = Backup;
Evan Cheng734e1e22006-05-30 06:59:36 +00001229 }
1230 break;
Chad Rosier24c19d22012-08-01 18:39:17 +00001231
Evan Cheng827d30d2007-12-13 00:43:27 +00001232 case ISD::AND: {
Dan Gohman57d6bd32009-04-13 16:09:41 +00001233 // Perform some heroic transforms on an and of a constant-count shift
1234 // with a constant to enable use of the scaled offset field.
1235
Evan Cheng827d30d2007-12-13 00:43:27 +00001236 // Scale must not be used already.
Gabor Greiff304a7a2008-08-28 21:40:38 +00001237 if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
Evan Chenga20a7732008-02-07 08:53:49 +00001238
Chandler Carruthaa01e662012-01-11 09:35:00 +00001239 SDValue Shift = N.getOperand(0);
1240 if (Shift.getOpcode() != ISD::SRL && Shift.getOpcode() != ISD::SHL) break;
Dan Gohman57d6bd32009-04-13 16:09:41 +00001241 SDValue X = Shift.getOperand(0);
Chandler Carruthaa01e662012-01-11 09:35:00 +00001242
1243 // We only handle up to 64-bit values here as those are what matter for
1244 // addressing mode optimizations.
1245 if (X.getValueSizeInBits() > 64) break;
1246
Chandler Carruthb0049f42012-01-11 09:35:04 +00001247 if (!isa<ConstantSDNode>(N.getOperand(1)))
1248 break;
1249 uint64_t Mask = N.getConstantOperandVal(1);
Evan Cheng827d30d2007-12-13 00:43:27 +00001250
Chandler Carruth51d30762012-01-11 08:48:20 +00001251 // Try to fold the mask and shift into an extract and scale.
Chandler Carruthb0049f42012-01-11 09:35:04 +00001252 if (!FoldMaskAndShiftToExtract(*CurDAG, N, Mask, Shift, X, AM))
Chandler Carruth51d30762012-01-11 08:48:20 +00001253 return false;
Dan Gohman57d6bd32009-04-13 16:09:41 +00001254
Chandler Carruth51d30762012-01-11 08:48:20 +00001255 // Try to fold the mask and shift directly into the scale.
Chandler Carruthb0049f42012-01-11 09:35:04 +00001256 if (!FoldMaskAndShiftToScale(*CurDAG, N, Mask, Shift, X, AM))
Chandler Carruth55b2cde2012-01-11 08:41:08 +00001257 return false;
1258
Chandler Carruthaa01e662012-01-11 09:35:00 +00001259 // Try to swap the mask and shift to place shifts which can be done as
1260 // a scale on the outside of the mask.
Chandler Carruthb0049f42012-01-11 09:35:04 +00001261 if (!FoldMaskedShiftToScaledMask(*CurDAG, N, Mask, Shift, X, AM))
Chandler Carruthaa01e662012-01-11 09:35:00 +00001262 return false;
1263 break;
Evan Cheng827d30d2007-12-13 00:43:27 +00001264 }
Evan Cheng734e1e22006-05-30 06:59:36 +00001265 }
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001266
Rafael Espindola92773792009-03-31 16:16:57 +00001267 return MatchAddressBase(N, AM);
Dan Gohmanccb36112007-08-13 20:03:06 +00001268}
1269
1270/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
1271/// specified addressing mode without any further recursion.
Rafael Espindola92773792009-03-31 16:16:57 +00001272bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001273 // Is the base register already occupied?
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001274 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base_Reg.getNode()) {
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001275 // If so, check to see if the scale index register is set.
Chris Lattnerfea81da2009-06-27 04:16:01 +00001276 if (AM.IndexReg.getNode() == 0) {
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001277 AM.IndexReg = N;
1278 AM.Scale = 1;
1279 return false;
1280 }
1281
1282 // Otherwise, we cannot select it.
1283 return true;
1284 }
1285
1286 // Default, generate it as a register.
1287 AM.BaseType = X86ISelAddressMode::RegBase;
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001288 AM.Base_Reg = N;
Chris Lattner3f0f71b2005-11-19 02:11:08 +00001289 return false;
1290}
1291
Evan Chengc9fab312005-12-08 02:01:35 +00001292/// SelectAddr - returns true if it is able pattern match an addressing mode.
1293/// It returns the operands which make up the maximal addressing mode it can
1294/// match by reference.
Chris Lattnerd58d7c12010-09-21 22:07:31 +00001295///
1296/// Parent is the parent node of the addr operand that is being matched. It
1297/// is always a load, store, atomic node, or null. It is only null when
1298/// checking memory operands for inline asm nodes.
1299bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +00001300 SDValue &Scale, SDValue &Index,
Rafael Espindola3b2df102009-04-08 21:14:34 +00001301 SDValue &Disp, SDValue &Segment) {
Evan Chengc9fab312005-12-08 02:01:35 +00001302 X86ISelAddressMode AM;
Chad Rosier24c19d22012-08-01 18:39:17 +00001303
Chris Lattner8a236b62010-09-22 04:39:11 +00001304 if (Parent &&
1305 // This list of opcodes are all the nodes that have an "addr:$ptr" operand
1306 // that are not a MemSDNode, and thus don't have proper addrspace info.
Chris Lattner8a236b62010-09-22 04:39:11 +00001307 Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
Eric Christopherc1b3e072010-09-22 20:42:08 +00001308 Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
Michael Liao97bf3632012-10-15 22:39:43 +00001309 Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
1310 Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
1311 Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
Chris Lattner8a236b62010-09-22 04:39:11 +00001312 unsigned AddrSpace =
1313 cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
1314 // AddrSpace 256 -> GS, 257 -> FS.
1315 if (AddrSpace == 256)
1316 AM.Segment = CurDAG->getRegister(X86::GS, MVT::i16);
1317 if (AddrSpace == 257)
1318 AM.Segment = CurDAG->getRegister(X86::FS, MVT::i16);
1319 }
Chad Rosier24c19d22012-08-01 18:39:17 +00001320
Evan Cheng3dfd04e2009-12-18 01:59:21 +00001321 if (MatchAddress(N, AM))
Evan Chengbc7a0f442006-01-11 06:09:51 +00001322 return false;
Evan Chengc9fab312005-12-08 02:01:35 +00001323
Owen Anderson53aa7a92009-08-10 22:56:29 +00001324 EVT VT = N.getValueType();
Evan Chengbc7a0f442006-01-11 06:09:51 +00001325 if (AM.BaseType == X86ISelAddressMode::RegBase) {
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001326 if (!AM.Base_Reg.getNode())
1327 AM.Base_Reg = CurDAG->getRegister(0, VT);
Evan Chengc9fab312005-12-08 02:01:35 +00001328 }
Evan Chengbc7a0f442006-01-11 06:09:51 +00001329
Gabor Greiff304a7a2008-08-28 21:40:38 +00001330 if (!AM.IndexReg.getNode())
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001331 AM.IndexReg = CurDAG->getRegister(0, VT);
Evan Chengbc7a0f442006-01-11 06:09:51 +00001332
Rafael Espindola3b2df102009-04-08 21:14:34 +00001333 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
Evan Chengbc7a0f442006-01-11 06:09:51 +00001334 return true;
Evan Chengc9fab312005-12-08 02:01:35 +00001335}
1336
Chris Lattner398195e2006-10-07 21:55:32 +00001337/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
1338/// match a load whose top elements are either undef or zeros. The load flavor
1339/// is derived from the type of N, which is either v4f32 or v2f64.
Chris Lattner3f482152010-02-17 06:07:47 +00001340///
1341/// We also return:
Chris Lattner18a32ce2010-02-21 03:17:59 +00001342/// PatternChainNode: this is the matched node that has a chain input and
1343/// output.
Chris Lattnerbd6e1932010-03-01 22:51:11 +00001344bool X86DAGToDAGISel::SelectScalarSSELoad(SDNode *Root,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +00001345 SDValue N, SDValue &Base,
1346 SDValue &Scale, SDValue &Index,
Rafael Espindola3b2df102009-04-08 21:14:34 +00001347 SDValue &Disp, SDValue &Segment,
Chris Lattner18a32ce2010-02-21 03:17:59 +00001348 SDValue &PatternNodeWithChain) {
Chris Lattner398195e2006-10-07 21:55:32 +00001349 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
Chris Lattner18a32ce2010-02-21 03:17:59 +00001350 PatternNodeWithChain = N.getOperand(0);
1351 if (ISD::isNON_EXTLoad(PatternNodeWithChain.getNode()) &&
1352 PatternNodeWithChain.hasOneUse() &&
Chris Lattner3c29aff2010-02-21 04:53:34 +00001353 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
Dan Gohman21cea8a2010-04-17 15:26:15 +00001354 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
Chris Lattner18a32ce2010-02-21 03:17:59 +00001355 LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
Chris Lattnerd58d7c12010-09-21 22:07:31 +00001356 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
Chris Lattner398195e2006-10-07 21:55:32 +00001357 return false;
1358 return true;
1359 }
1360 }
Chris Lattnerd5fcfaa2006-10-11 22:09:58 +00001361
1362 // Also handle the case where we explicitly require zeros in the top
Chris Lattner398195e2006-10-07 21:55:32 +00001363 // elements. This is a vector shuffle from the zero vector.
Gabor Greiff304a7a2008-08-28 21:40:38 +00001364 if (N.getOpcode() == X86ISD::VZEXT_MOVL && N.getNode()->hasOneUse() &&
Chris Lattner5728bdd2007-11-25 00:24:49 +00001365 // Check to see if the top elements are all zeros (or bitcast of zeros).
Chad Rosier24c19d22012-08-01 18:39:17 +00001366 N.getOperand(0).getOpcode() == ISD::SCALAR_TO_VECTOR &&
Gabor Greiff304a7a2008-08-28 21:40:38 +00001367 N.getOperand(0).getNode()->hasOneUse() &&
1368 ISD::isNON_EXTLoad(N.getOperand(0).getOperand(0).getNode()) &&
Chris Lattnerafac7dad2010-02-16 22:35:06 +00001369 N.getOperand(0).getOperand(0).hasOneUse() &&
1370 IsProfitableToFold(N.getOperand(0), N.getNode(), Root) &&
Dan Gohman21cea8a2010-04-17 15:26:15 +00001371 IsLegalToFold(N.getOperand(0), N.getNode(), Root, OptLevel)) {
Evan Cheng78af38c2008-05-08 00:57:18 +00001372 // Okay, this is a zero extending load. Fold it.
1373 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(0).getOperand(0));
Chris Lattnerd58d7c12010-09-21 22:07:31 +00001374 if (!SelectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp, Segment))
Evan Cheng78af38c2008-05-08 00:57:18 +00001375 return false;
Chris Lattner18a32ce2010-02-21 03:17:59 +00001376 PatternNodeWithChain = SDValue(LD, 0);
Evan Cheng78af38c2008-05-08 00:57:18 +00001377 return true;
Chris Lattnerd5fcfaa2006-10-11 22:09:58 +00001378 }
Chris Lattner398195e2006-10-07 21:55:32 +00001379 return false;
1380}
1381
1382
Evan Cheng77d86ff2006-02-25 10:09:08 +00001383/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
1384/// mode it matches can be cost effectively emitted as an LEA instruction.
Chris Lattner0e023ea2010-09-21 20:31:19 +00001385bool X86DAGToDAGISel::SelectLEAAddr(SDValue N,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +00001386 SDValue &Base, SDValue &Scale,
Chris Lattnerf4693072010-07-08 23:46:44 +00001387 SDValue &Index, SDValue &Disp,
1388 SDValue &Segment) {
Evan Cheng77d86ff2006-02-25 10:09:08 +00001389 X86ISelAddressMode AM;
Rafael Espindolabb834f02009-04-10 10:09:34 +00001390
1391 // Set AM.Segment to prevent MatchAddress from using one. LEA doesn't support
1392 // segments.
1393 SDValue Copy = AM.Segment;
Owen Anderson9f944592009-08-11 20:47:22 +00001394 SDValue T = CurDAG->getRegister(0, MVT::i32);
Rafael Espindolabb834f02009-04-10 10:09:34 +00001395 AM.Segment = T;
Evan Cheng77d86ff2006-02-25 10:09:08 +00001396 if (MatchAddress(N, AM))
1397 return false;
Rafael Espindolabb834f02009-04-10 10:09:34 +00001398 assert (T == AM.Segment);
1399 AM.Segment = Copy;
Rafael Espindola3b2df102009-04-08 21:14:34 +00001400
Owen Anderson53aa7a92009-08-10 22:56:29 +00001401 EVT VT = N.getValueType();
Evan Cheng77d86ff2006-02-25 10:09:08 +00001402 unsigned Complexity = 0;
1403 if (AM.BaseType == X86ISelAddressMode::RegBase)
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001404 if (AM.Base_Reg.getNode())
Evan Cheng77d86ff2006-02-25 10:09:08 +00001405 Complexity = 1;
1406 else
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001407 AM.Base_Reg = CurDAG->getRegister(0, VT);
Evan Cheng77d86ff2006-02-25 10:09:08 +00001408 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1409 Complexity = 4;
1410
Gabor Greiff304a7a2008-08-28 21:40:38 +00001411 if (AM.IndexReg.getNode())
Evan Cheng77d86ff2006-02-25 10:09:08 +00001412 Complexity++;
1413 else
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001414 AM.IndexReg = CurDAG->getRegister(0, VT);
Evan Cheng77d86ff2006-02-25 10:09:08 +00001415
Chris Lattner3e1d9172007-03-20 06:08:29 +00001416 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1417 // a simple shift.
1418 if (AM.Scale > 1)
Evan Cheng990c3602006-02-28 21:13:57 +00001419 Complexity++;
Evan Cheng77d86ff2006-02-25 10:09:08 +00001420
1421 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1422 // to a LEA. This is determined with some expermentation but is by no means
1423 // optimal (especially for code size consideration). LEA is nice because of
1424 // its three-address nature. Tweak the cost function again when we can run
1425 // convertToThreeAddress() at register allocation time.
Dan Gohman4e3e3de2009-02-07 00:43:41 +00001426 if (AM.hasSymbolicDisplacement()) {
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001427 // For X86-64, we should always use lea to materialize RIP relative
1428 // addresses.
Evan Cheng47e181c2006-12-05 22:03:40 +00001429 if (Subtarget->is64Bit())
Evan Cheng11b0a5d2006-09-08 06:48:29 +00001430 Complexity = 4;
1431 else
1432 Complexity += 2;
1433 }
Evan Cheng77d86ff2006-02-25 10:09:08 +00001434
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001435 if (AM.Disp && (AM.Base_Reg.getNode() || AM.IndexReg.getNode()))
Evan Cheng77d86ff2006-02-25 10:09:08 +00001436 Complexity++;
1437
Chris Lattner4d10f1a2009-07-11 22:50:33 +00001438 // If it isn't worth using an LEA, reject it.
Chris Lattner48cee9b2009-07-11 23:07:30 +00001439 if (Complexity <= 2)
Chris Lattner4d10f1a2009-07-11 22:50:33 +00001440 return false;
Chad Rosier24c19d22012-08-01 18:39:17 +00001441
Chris Lattner4d10f1a2009-07-11 22:50:33 +00001442 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1443 return true;
Evan Cheng77d86ff2006-02-25 10:09:08 +00001444}
1445
Chris Lattner7d2b0492009-06-20 20:38:48 +00001446/// SelectTLSADDRAddr - This is only run on TargetGlobalTLSAddress nodes.
Chris Lattner0e023ea2010-09-21 20:31:19 +00001447bool X86DAGToDAGISel::SelectTLSADDRAddr(SDValue N, SDValue &Base,
Chris Lattner7d2b0492009-06-20 20:38:48 +00001448 SDValue &Scale, SDValue &Index,
Chris Lattnerf4693072010-07-08 23:46:44 +00001449 SDValue &Disp, SDValue &Segment) {
Chris Lattner7d2b0492009-06-20 20:38:48 +00001450 assert(N.getOpcode() == ISD::TargetGlobalTLSAddress);
1451 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(N);
Chad Rosier24c19d22012-08-01 18:39:17 +00001452
Chris Lattner7d2b0492009-06-20 20:38:48 +00001453 X86ISelAddressMode AM;
1454 AM.GV = GA->getGlobal();
1455 AM.Disp += GA->getOffset();
Dan Gohman0fd54fb2010-04-29 23:30:41 +00001456 AM.Base_Reg = CurDAG->getRegister(0, N.getValueType());
Chris Lattner899abc42009-06-26 21:18:37 +00001457 AM.SymbolFlags = GA->getTargetFlags();
1458
Owen Anderson9f944592009-08-11 20:47:22 +00001459 if (N.getValueType() == MVT::i32) {
Chris Lattner7d2b0492009-06-20 20:38:48 +00001460 AM.Scale = 1;
Owen Anderson9f944592009-08-11 20:47:22 +00001461 AM.IndexReg = CurDAG->getRegister(X86::EBX, MVT::i32);
Chris Lattner7d2b0492009-06-20 20:38:48 +00001462 } else {
Owen Anderson9f944592009-08-11 20:47:22 +00001463 AM.IndexReg = CurDAG->getRegister(0, MVT::i64);
Chris Lattner7d2b0492009-06-20 20:38:48 +00001464 }
Chad Rosier24c19d22012-08-01 18:39:17 +00001465
Chris Lattner7d2b0492009-06-20 20:38:48 +00001466 getAddressOperands(AM, Base, Scale, Index, Disp, Segment);
1467 return true;
1468}
1469
1470
Dan Gohmanea6f91f2010-01-05 01:24:18 +00001471bool X86DAGToDAGISel::TryFoldLoad(SDNode *P, SDValue N,
Dan Gohman2ce6f2a2008-07-27 21:46:04 +00001472 SDValue &Base, SDValue &Scale,
Rafael Espindola3b2df102009-04-08 21:14:34 +00001473 SDValue &Index, SDValue &Disp,
1474 SDValue &Segment) {
Chris Lattnerdd030702010-03-02 22:20:06 +00001475 if (!ISD::isNON_EXTLoad(N.getNode()) ||
1476 !IsProfitableToFold(N, P, P) ||
Dan Gohman21cea8a2010-04-17 15:26:15 +00001477 !IsLegalToFold(N, P, P, OptLevel))
Chris Lattnerdd030702010-03-02 22:20:06 +00001478 return false;
Chad Rosier24c19d22012-08-01 18:39:17 +00001479
Chris Lattnerd58d7c12010-09-21 22:07:31 +00001480 return SelectAddr(N.getNode(),
1481 N.getOperand(1), Base, Scale, Index, Disp, Segment);
Evan Cheng10d27902006-01-06 20:36:21 +00001482}
1483
Dan Gohman24300732008-09-23 18:22:58 +00001484/// getGlobalBaseReg - Return an SDNode that returns the value of
1485/// the global base register. Output instructions required to
1486/// initialize the global base register, if necessary.
Evan Cheng5588de92006-02-18 00:15:05 +00001487///
Evan Cheng61413a32006-08-26 05:34:46 +00001488SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
Dan Gohman4751bb92009-06-03 20:20:00 +00001489 unsigned GlobalBaseReg = getInstrInfo()->getGlobalBaseReg(MF);
Gabor Greiff304a7a2008-08-28 21:40:38 +00001490 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).getNode();
Evan Cheng5588de92006-02-18 00:15:05 +00001491}
1492
Dale Johannesen867d5492008-10-02 18:53:47 +00001493SDNode *X86DAGToDAGISel::SelectAtomic64(SDNode *Node, unsigned Opc) {
1494 SDValue Chain = Node->getOperand(0);
1495 SDValue In1 = Node->getOperand(1);
1496 SDValue In2L = Node->getOperand(2);
1497 SDValue In2H = Node->getOperand(3);
Michael Liao83725392012-09-19 19:36:58 +00001498
Rafael Espindola3b2df102009-04-08 21:14:34 +00001499 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
Chris Lattnerd58d7c12010-09-21 22:07:31 +00001500 if (!SelectAddr(Node, In1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
Dale Johannesen867d5492008-10-02 18:53:47 +00001501 return NULL;
Dan Gohman48b185d2009-09-25 20:36:54 +00001502 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1503 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
1504 const SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, In2L, In2H, Chain};
1505 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
1506 MVT::i32, MVT::i32, MVT::Other, Ops,
1507 array_lengthof(Ops));
1508 cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp + 1);
1509 return ResNode;
Dale Johannesen867d5492008-10-02 18:53:47 +00001510}
Christopher Lambb372aba2007-08-10 21:48:46 +00001511
Michael Liao83725392012-09-19 19:36:58 +00001512/// Atomic opcode table
1513///
Eric Christophereb47a2a2011-05-17 07:47:55 +00001514enum AtomicOpc {
Michael Liao83725392012-09-19 19:36:58 +00001515 ADD,
1516 SUB,
1517 INC,
1518 DEC,
Eric Christopherabfe3132011-05-17 07:50:41 +00001519 OR,
Eric Christophera1d9e292011-05-17 08:10:18 +00001520 AND,
1521 XOR,
Eric Christopherabfe3132011-05-17 07:50:41 +00001522 AtomicOpcEnd
Eric Christophereb47a2a2011-05-17 07:47:55 +00001523};
1524
1525enum AtomicSz {
1526 ConstantI8,
1527 I8,
1528 SextConstantI16,
1529 ConstantI16,
1530 I16,
1531 SextConstantI32,
1532 ConstantI32,
1533 I32,
1534 SextConstantI64,
1535 ConstantI64,
Eric Christopherabfe3132011-05-17 07:50:41 +00001536 I64,
1537 AtomicSzEnd
Eric Christophereb47a2a2011-05-17 07:47:55 +00001538};
1539
Craig Topper2dac9622012-03-09 07:45:21 +00001540static const uint16_t AtomicOpcTbl[AtomicOpcEnd][AtomicSzEnd] = {
Eric Christopher2a9dbbb2011-05-11 21:44:58 +00001541 {
Michael Liao83725392012-09-19 19:36:58 +00001542 X86::LOCK_ADD8mi,
1543 X86::LOCK_ADD8mr,
1544 X86::LOCK_ADD16mi8,
1545 X86::LOCK_ADD16mi,
1546 X86::LOCK_ADD16mr,
1547 X86::LOCK_ADD32mi8,
1548 X86::LOCK_ADD32mi,
1549 X86::LOCK_ADD32mr,
1550 X86::LOCK_ADD64mi8,
1551 X86::LOCK_ADD64mi32,
1552 X86::LOCK_ADD64mr,
1553 },
1554 {
1555 X86::LOCK_SUB8mi,
1556 X86::LOCK_SUB8mr,
1557 X86::LOCK_SUB16mi8,
1558 X86::LOCK_SUB16mi,
1559 X86::LOCK_SUB16mr,
1560 X86::LOCK_SUB32mi8,
1561 X86::LOCK_SUB32mi,
1562 X86::LOCK_SUB32mr,
1563 X86::LOCK_SUB64mi8,
1564 X86::LOCK_SUB64mi32,
1565 X86::LOCK_SUB64mr,
1566 },
1567 {
1568 0,
1569 X86::LOCK_INC8m,
1570 0,
1571 0,
1572 X86::LOCK_INC16m,
1573 0,
1574 0,
1575 X86::LOCK_INC32m,
1576 0,
1577 0,
1578 X86::LOCK_INC64m,
1579 },
1580 {
1581 0,
1582 X86::LOCK_DEC8m,
1583 0,
1584 0,
1585 X86::LOCK_DEC16m,
1586 0,
1587 0,
1588 X86::LOCK_DEC32m,
1589 0,
1590 0,
1591 X86::LOCK_DEC64m,
1592 },
1593 {
Eric Christopher2a9dbbb2011-05-11 21:44:58 +00001594 X86::LOCK_OR8mi,
1595 X86::LOCK_OR8mr,
1596 X86::LOCK_OR16mi8,
1597 X86::LOCK_OR16mi,
1598 X86::LOCK_OR16mr,
1599 X86::LOCK_OR32mi8,
1600 X86::LOCK_OR32mi,
1601 X86::LOCK_OR32mr,
1602 X86::LOCK_OR64mi8,
1603 X86::LOCK_OR64mi32,
Michael Liao83725392012-09-19 19:36:58 +00001604 X86::LOCK_OR64mr,
Eric Christophera1d9e292011-05-17 08:10:18 +00001605 },
1606 {
1607 X86::LOCK_AND8mi,
1608 X86::LOCK_AND8mr,
1609 X86::LOCK_AND16mi8,
1610 X86::LOCK_AND16mi,
1611 X86::LOCK_AND16mr,
1612 X86::LOCK_AND32mi8,
1613 X86::LOCK_AND32mi,
1614 X86::LOCK_AND32mr,
1615 X86::LOCK_AND64mi8,
1616 X86::LOCK_AND64mi32,
Michael Liao83725392012-09-19 19:36:58 +00001617 X86::LOCK_AND64mr,
Eric Christophera1d9e292011-05-17 08:10:18 +00001618 },
1619 {
1620 X86::LOCK_XOR8mi,
1621 X86::LOCK_XOR8mr,
1622 X86::LOCK_XOR16mi8,
1623 X86::LOCK_XOR16mi,
1624 X86::LOCK_XOR16mr,
1625 X86::LOCK_XOR32mi8,
1626 X86::LOCK_XOR32mi,
1627 X86::LOCK_XOR32mr,
1628 X86::LOCK_XOR64mi8,
1629 X86::LOCK_XOR64mi32,
Michael Liao83725392012-09-19 19:36:58 +00001630 X86::LOCK_XOR64mr,
Eric Christopher2a9dbbb2011-05-11 21:44:58 +00001631 }
1632};
1633
Michael Liao83725392012-09-19 19:36:58 +00001634// Return the target constant operand for atomic-load-op and do simple
1635// translations, such as from atomic-load-add to lock-sub. The return value is
1636// one of the following 3 cases:
1637// + target-constant, the operand could be supported as a target constant.
1638// + empty, the operand is not needed any more with the new op selected.
1639// + non-empty, otherwise.
1640static SDValue getAtomicLoadArithTargetConstant(SelectionDAG *CurDAG,
1641 DebugLoc dl,
1642 enum AtomicOpc &Op, EVT NVT,
1643 SDValue Val) {
1644 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Val)) {
1645 int64_t CNVal = CN->getSExtValue();
1646 // Quit if not 32-bit imm.
1647 if ((int32_t)CNVal != CNVal)
1648 return Val;
1649 // For atomic-load-add, we could do some optimizations.
1650 if (Op == ADD) {
1651 // Translate to INC/DEC if ADD by 1 or -1.
1652 if ((CNVal == 1) || (CNVal == -1)) {
1653 Op = (CNVal == 1) ? INC : DEC;
1654 // No more constant operand after being translated into INC/DEC.
1655 return SDValue();
1656 }
1657 // Translate to SUB if ADD by negative value.
1658 if (CNVal < 0) {
1659 Op = SUB;
1660 CNVal = -CNVal;
1661 }
1662 }
1663 return CurDAG->getTargetConstant(CNVal, NVT);
1664 }
1665
1666 // If the value operand is single-used, try to optimize it.
1667 if (Op == ADD && Val.hasOneUse()) {
1668 // Translate (atomic-load-add ptr (sub 0 x)) back to (lock-sub x).
1669 if (Val.getOpcode() == ISD::SUB && X86::isZeroNode(Val.getOperand(0))) {
1670 Op = SUB;
1671 return Val.getOperand(1);
1672 }
1673 // A special case for i16, which needs truncating as, in most cases, it's
1674 // promoted to i32. We will translate
1675 // (atomic-load-add (truncate (sub 0 x))) to (lock-sub (EXTRACT_SUBREG x))
1676 if (Val.getOpcode() == ISD::TRUNCATE && NVT == MVT::i16 &&
1677 Val.getOperand(0).getOpcode() == ISD::SUB &&
1678 X86::isZeroNode(Val.getOperand(0).getOperand(0))) {
1679 Op = SUB;
1680 Val = Val.getOperand(0);
1681 return CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl, NVT,
1682 Val.getOperand(1));
1683 }
1684 }
1685
1686 return Val;
1687}
1688
Eric Christophera1d9e292011-05-17 08:10:18 +00001689SDNode *X86DAGToDAGISel::SelectAtomicLoadArith(SDNode *Node, EVT NVT) {
Eric Christopher4a34e612011-05-10 23:57:45 +00001690 if (Node->hasAnyUseOfValue(0))
1691 return 0;
Chad Rosier24c19d22012-08-01 18:39:17 +00001692
Michael Liao83725392012-09-19 19:36:58 +00001693 DebugLoc dl = Node->getDebugLoc();
1694
Eric Christopher56a42eb2011-05-17 08:16:14 +00001695 // Optimize common patterns for __sync_or_and_fetch and similar arith
1696 // operations where the result is not used. This allows us to use the "lock"
1697 // version of the arithmetic instruction.
Eric Christopher4a34e612011-05-10 23:57:45 +00001698 SDValue Chain = Node->getOperand(0);
1699 SDValue Ptr = Node->getOperand(1);
1700 SDValue Val = Node->getOperand(2);
1701 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
1702 if (!SelectAddr(Node, Ptr, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4))
1703 return 0;
1704
Eric Christophera1d9e292011-05-17 08:10:18 +00001705 // Which index into the table.
1706 enum AtomicOpc Op;
1707 switch (Node->getOpcode()) {
Michael Liao83725392012-09-19 19:36:58 +00001708 default:
1709 return 0;
Eric Christophera1d9e292011-05-17 08:10:18 +00001710 case ISD::ATOMIC_LOAD_OR:
1711 Op = OR;
1712 break;
1713 case ISD::ATOMIC_LOAD_AND:
1714 Op = AND;
1715 break;
1716 case ISD::ATOMIC_LOAD_XOR:
1717 Op = XOR;
1718 break;
Michael Liao83725392012-09-19 19:36:58 +00001719 case ISD::ATOMIC_LOAD_ADD:
1720 Op = ADD;
1721 break;
Eric Christophera1d9e292011-05-17 08:10:18 +00001722 }
Michael Liao83725392012-09-19 19:36:58 +00001723
1724 Val = getAtomicLoadArithTargetConstant(CurDAG, dl, Op, NVT, Val);
1725 bool isUnOp = !Val.getNode();
1726 bool isCN = Val.getNode() && (Val.getOpcode() == ISD::TargetConstant);
Chad Rosier24c19d22012-08-01 18:39:17 +00001727
Eric Christopher4a34e612011-05-10 23:57:45 +00001728 unsigned Opc = 0;
1729 switch (NVT.getSimpleVT().SimpleTy) {
1730 default: return 0;
1731 case MVT::i8:
1732 if (isCN)
Eric Christophereb47a2a2011-05-17 07:47:55 +00001733 Opc = AtomicOpcTbl[Op][ConstantI8];
Eric Christopher4a34e612011-05-10 23:57:45 +00001734 else
Eric Christophereb47a2a2011-05-17 07:47:55 +00001735 Opc = AtomicOpcTbl[Op][I8];
Eric Christopher4a34e612011-05-10 23:57:45 +00001736 break;
1737 case MVT::i16:
1738 if (isCN) {
1739 if (immSext8(Val.getNode()))
Eric Christophereb47a2a2011-05-17 07:47:55 +00001740 Opc = AtomicOpcTbl[Op][SextConstantI16];
Eric Christopher4a34e612011-05-10 23:57:45 +00001741 else
Eric Christophereb47a2a2011-05-17 07:47:55 +00001742 Opc = AtomicOpcTbl[Op][ConstantI16];
Eric Christopher4a34e612011-05-10 23:57:45 +00001743 } else
Eric Christophereb47a2a2011-05-17 07:47:55 +00001744 Opc = AtomicOpcTbl[Op][I16];
Eric Christopher4a34e612011-05-10 23:57:45 +00001745 break;
1746 case MVT::i32:
1747 if (isCN) {
1748 if (immSext8(Val.getNode()))
Eric Christophereb47a2a2011-05-17 07:47:55 +00001749 Opc = AtomicOpcTbl[Op][SextConstantI32];
Eric Christopher4a34e612011-05-10 23:57:45 +00001750 else
Eric Christophereb47a2a2011-05-17 07:47:55 +00001751 Opc = AtomicOpcTbl[Op][ConstantI32];
Eric Christopher4a34e612011-05-10 23:57:45 +00001752 } else
Eric Christophereb47a2a2011-05-17 07:47:55 +00001753 Opc = AtomicOpcTbl[Op][I32];
Eric Christopher4a34e612011-05-10 23:57:45 +00001754 break;
1755 case MVT::i64:
Eric Christopherc93217372011-06-30 00:48:30 +00001756 Opc = AtomicOpcTbl[Op][I64];
Eric Christopher4a34e612011-05-10 23:57:45 +00001757 if (isCN) {
1758 if (immSext8(Val.getNode()))
Eric Christophereb47a2a2011-05-17 07:47:55 +00001759 Opc = AtomicOpcTbl[Op][SextConstantI64];
Eric Christopher4a34e612011-05-10 23:57:45 +00001760 else if (i64immSExt32(Val.getNode()))
Eric Christophereb47a2a2011-05-17 07:47:55 +00001761 Opc = AtomicOpcTbl[Op][ConstantI64];
Eric Christopherc93217372011-06-30 00:48:30 +00001762 }
Eric Christopher4a34e612011-05-10 23:57:45 +00001763 break;
1764 }
Chad Rosier24c19d22012-08-01 18:39:17 +00001765
Eric Christopherc93217372011-06-30 00:48:30 +00001766 assert(Opc != 0 && "Invalid arith lock transform!");
1767
Michael Liao83725392012-09-19 19:36:58 +00001768 SDValue Ret;
Eric Christopher4a34e612011-05-10 23:57:45 +00001769 SDValue Undef = SDValue(CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF,
1770 dl, NVT), 0);
1771 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1);
1772 MemOp[0] = cast<MemSDNode>(Node)->getMemOperand();
Michael Liao83725392012-09-19 19:36:58 +00001773 if (isUnOp) {
1774 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Chain };
1775 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops,
1776 array_lengthof(Ops)), 0);
1777 } else {
1778 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Val, Chain };
1779 Ret = SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Other, Ops,
1780 array_lengthof(Ops)), 0);
1781 }
Eric Christopher4a34e612011-05-10 23:57:45 +00001782 cast<MachineSDNode>(Ret)->setMemRefs(MemOp, MemOp + 1);
1783 SDValue RetVals[] = { Undef, Ret };
1784 return CurDAG->getMergeValues(RetVals, 2, dl).getNode();
1785}
1786
Dan Gohman7d9dffb2009-10-09 20:35:19 +00001787/// HasNoSignedComparisonUses - Test whether the given X86ISD::CMP node has
1788/// any uses which require the SF or OF bits to be accurate.
1789static bool HasNoSignedComparisonUses(SDNode *N) {
1790 // Examine each user of the node.
1791 for (SDNode::use_iterator UI = N->use_begin(),
1792 UE = N->use_end(); UI != UE; ++UI) {
1793 // Only examine CopyToReg uses.
1794 if (UI->getOpcode() != ISD::CopyToReg)
1795 return false;
1796 // Only examine CopyToReg uses that copy to EFLAGS.
1797 if (cast<RegisterSDNode>(UI->getOperand(1))->getReg() !=
1798 X86::EFLAGS)
1799 return false;
1800 // Examine each user of the CopyToReg use.
1801 for (SDNode::use_iterator FlagUI = UI->use_begin(),
1802 FlagUE = UI->use_end(); FlagUI != FlagUE; ++FlagUI) {
1803 // Only examine the Flag result.
1804 if (FlagUI.getUse().getResNo() != 1) continue;
1805 // Anything unusual: assume conservatively.
1806 if (!FlagUI->isMachineOpcode()) return false;
1807 // Examine the opcode of the user.
1808 switch (FlagUI->getMachineOpcode()) {
1809 // These comparisons don't treat the most significant bit specially.
1810 case X86::SETAr: case X86::SETAEr: case X86::SETBr: case X86::SETBEr:
1811 case X86::SETEr: case X86::SETNEr: case X86::SETPr: case X86::SETNPr:
1812 case X86::SETAm: case X86::SETAEm: case X86::SETBm: case X86::SETBEm:
1813 case X86::SETEm: case X86::SETNEm: case X86::SETPm: case X86::SETNPm:
Chris Lattner2b0a7a22010-02-11 19:25:55 +00001814 case X86::JA_4: case X86::JAE_4: case X86::JB_4: case X86::JBE_4:
1815 case X86::JE_4: case X86::JNE_4: case X86::JP_4: case X86::JNP_4:
Dan Gohman7d9dffb2009-10-09 20:35:19 +00001816 case X86::CMOVA16rr: case X86::CMOVA16rm:
1817 case X86::CMOVA32rr: case X86::CMOVA32rm:
1818 case X86::CMOVA64rr: case X86::CMOVA64rm:
1819 case X86::CMOVAE16rr: case X86::CMOVAE16rm:
1820 case X86::CMOVAE32rr: case X86::CMOVAE32rm:
1821 case X86::CMOVAE64rr: case X86::CMOVAE64rm:
1822 case X86::CMOVB16rr: case X86::CMOVB16rm:
1823 case X86::CMOVB32rr: case X86::CMOVB32rm:
1824 case X86::CMOVB64rr: case X86::CMOVB64rm:
Chris Lattner1a1c6002010-10-05 23:00:14 +00001825 case X86::CMOVBE16rr: case X86::CMOVBE16rm:
1826 case X86::CMOVBE32rr: case X86::CMOVBE32rm:
1827 case X86::CMOVBE64rr: case X86::CMOVBE64rm:
Dan Gohman7d9dffb2009-10-09 20:35:19 +00001828 case X86::CMOVE16rr: case X86::CMOVE16rm:
1829 case X86::CMOVE32rr: case X86::CMOVE32rm:
1830 case X86::CMOVE64rr: case X86::CMOVE64rm:
1831 case X86::CMOVNE16rr: case X86::CMOVNE16rm:
1832 case X86::CMOVNE32rr: case X86::CMOVNE32rm:
1833 case X86::CMOVNE64rr: case X86::CMOVNE64rm:
1834 case X86::CMOVNP16rr: case X86::CMOVNP16rm:
1835 case X86::CMOVNP32rr: case X86::CMOVNP32rm:
1836 case X86::CMOVNP64rr: case X86::CMOVNP64rm:
1837 case X86::CMOVP16rr: case X86::CMOVP16rm:
1838 case X86::CMOVP32rr: case X86::CMOVP32rm:
1839 case X86::CMOVP64rr: case X86::CMOVP64rm:
1840 continue;
1841 // Anything else: assume conservatively.
1842 default: return false;
1843 }
1844 }
1845 }
1846 return true;
1847}
1848
Joel Jones68d59e82012-03-29 05:45:48 +00001849/// isLoadIncOrDecStore - Check whether or not the chain ending in StoreNode
1850/// is suitable for doing the {load; increment or decrement; store} to modify
1851/// transformation.
Chad Rosier24c19d22012-08-01 18:39:17 +00001852static bool isLoadIncOrDecStore(StoreSDNode *StoreNode, unsigned Opc,
Evan Cheng3e869f02012-04-12 19:14:21 +00001853 SDValue StoredVal, SelectionDAG *CurDAG,
1854 LoadSDNode* &LoadNode, SDValue &InputChain) {
Joel Jones68d59e82012-03-29 05:45:48 +00001855
1856 // is the value stored the result of a DEC or INC?
1857 if (!(Opc == X86ISD::DEC || Opc == X86ISD::INC)) return false;
1858
Joel Jones68d59e82012-03-29 05:45:48 +00001859 // is the stored value result 0 of the load?
1860 if (StoredVal.getResNo() != 0) return false;
1861
1862 // are there other uses of the loaded value than the inc or dec?
1863 if (!StoredVal.getNode()->hasNUsesOfValue(1, 0)) return false;
1864
Joel Jones68d59e82012-03-29 05:45:48 +00001865 // is the store non-extending and non-indexed?
Evan Cheng3e869f02012-04-12 19:14:21 +00001866 if (!ISD::isNormalStore(StoreNode) || StoreNode->isNonTemporal())
Joel Jones68d59e82012-03-29 05:45:48 +00001867 return false;
1868
Evan Cheng3e869f02012-04-12 19:14:21 +00001869 SDValue Load = StoredVal->getOperand(0);
1870 // Is the stored value a non-extending and non-indexed load?
1871 if (!ISD::isNormalLoad(Load.getNode())) return false;
1872
1873 // Return LoadNode by reference.
1874 LoadNode = cast<LoadSDNode>(Load);
1875 // is the size of the value one that we can handle? (i.e. 64, 32, 16, or 8)
Chad Rosier24c19d22012-08-01 18:39:17 +00001876 EVT LdVT = LoadNode->getMemoryVT();
1877 if (LdVT != MVT::i64 && LdVT != MVT::i32 && LdVT != MVT::i16 &&
Evan Cheng3e869f02012-04-12 19:14:21 +00001878 LdVT != MVT::i8)
1879 return false;
1880
1881 // Is store the only read of the loaded value?
1882 if (!Load.hasOneUse())
1883 return false;
Chad Rosier24c19d22012-08-01 18:39:17 +00001884
Evan Cheng3e869f02012-04-12 19:14:21 +00001885 // Is the address of the store the same as the load?
1886 if (LoadNode->getBasePtr() != StoreNode->getBasePtr() ||
1887 LoadNode->getOffset() != StoreNode->getOffset())
1888 return false;
1889
1890 // Check if the chain is produced by the load or is a TokenFactor with
1891 // the load output chain as an operand. Return InputChain by reference.
1892 SDValue Chain = StoreNode->getChain();
1893
1894 bool ChainCheck = false;
1895 if (Chain == Load.getValue(1)) {
1896 ChainCheck = true;
1897 InputChain = LoadNode->getChain();
1898 } else if (Chain.getOpcode() == ISD::TokenFactor) {
1899 SmallVector<SDValue, 4> ChainOps;
1900 for (unsigned i = 0, e = Chain.getNumOperands(); i != e; ++i) {
1901 SDValue Op = Chain.getOperand(i);
1902 if (Op == Load.getValue(1)) {
1903 ChainCheck = true;
1904 continue;
1905 }
Evan Cheng58a95f02012-05-16 01:54:27 +00001906
1907 // Make sure using Op as part of the chain would not cause a cycle here.
1908 // In theory, we could check whether the chain node is a predecessor of
1909 // the load. But that can be very expensive. Instead visit the uses and
1910 // make sure they all have smaller node id than the load.
1911 int LoadId = LoadNode->getNodeId();
1912 for (SDNode::use_iterator UI = Op.getNode()->use_begin(),
1913 UE = UI->use_end(); UI != UE; ++UI) {
1914 if (UI.getUse().getResNo() != 0)
1915 continue;
1916 if (UI->getNodeId() > LoadId)
1917 return false;
1918 }
1919
Evan Cheng3e869f02012-04-12 19:14:21 +00001920 ChainOps.push_back(Op);
1921 }
1922
1923 if (ChainCheck)
1924 // Make a new TokenFactor with all the other input chains except
1925 // for the load.
1926 InputChain = CurDAG->getNode(ISD::TokenFactor, Chain.getDebugLoc(),
1927 MVT::Other, &ChainOps[0], ChainOps.size());
1928 }
1929 if (!ChainCheck)
Joel Jones68d59e82012-03-29 05:45:48 +00001930 return false;
1931
1932 return true;
1933}
1934
Benjamin Kramer8619c372012-03-29 12:37:26 +00001935/// getFusedLdStOpcode - Get the appropriate X86 opcode for an in memory
1936/// increment or decrement. Opc should be X86ISD::DEC or X86ISD::INC.
Joel Jones68d59e82012-03-29 05:45:48 +00001937static unsigned getFusedLdStOpcode(EVT &LdVT, unsigned Opc) {
1938 if (Opc == X86ISD::DEC) {
1939 if (LdVT == MVT::i64) return X86::DEC64m;
1940 if (LdVT == MVT::i32) return X86::DEC32m;
1941 if (LdVT == MVT::i16) return X86::DEC16m;
1942 if (LdVT == MVT::i8) return X86::DEC8m;
Benjamin Kramer8619c372012-03-29 12:37:26 +00001943 } else {
1944 assert(Opc == X86ISD::INC && "unrecognized opcode");
Joel Jones68d59e82012-03-29 05:45:48 +00001945 if (LdVT == MVT::i64) return X86::INC64m;
1946 if (LdVT == MVT::i32) return X86::INC32m;
1947 if (LdVT == MVT::i16) return X86::INC16m;
1948 if (LdVT == MVT::i8) return X86::INC8m;
Joel Jones68d59e82012-03-29 05:45:48 +00001949 }
Benjamin Kramer8619c372012-03-29 12:37:26 +00001950 llvm_unreachable("unrecognized size for LdVT");
Joel Jones68d59e82012-03-29 05:45:48 +00001951}
1952
Manman Rena0982042012-06-26 19:47:59 +00001953/// SelectGather - Customized ISel for GATHER operations.
1954///
1955SDNode *X86DAGToDAGISel::SelectGather(SDNode *Node, unsigned Opc) {
1956 // Operands of Gather: VSrc, Base, VIdx, VMask, Scale
1957 SDValue Chain = Node->getOperand(0);
1958 SDValue VSrc = Node->getOperand(2);
1959 SDValue Base = Node->getOperand(3);
1960 SDValue VIdx = Node->getOperand(4);
1961 SDValue VMask = Node->getOperand(5);
1962 ConstantSDNode *Scale = dyn_cast<ConstantSDNode>(Node->getOperand(6));
Craig Topperfbb954f72012-07-01 02:17:08 +00001963 if (!Scale)
1964 return 0;
Manman Rena0982042012-06-26 19:47:59 +00001965
Craig Topperf7755df2012-07-12 06:52:41 +00001966 SDVTList VTs = CurDAG->getVTList(VSrc.getValueType(), VSrc.getValueType(),
1967 MVT::Other);
1968
Manman Rena0982042012-06-26 19:47:59 +00001969 // Memory Operands: Base, Scale, Index, Disp, Segment
1970 SDValue Disp = CurDAG->getTargetConstant(0, MVT::i32);
1971 SDValue Segment = CurDAG->getRegister(0, MVT::i32);
1972 const SDValue Ops[] = { VSrc, Base, getI8Imm(Scale->getSExtValue()), VIdx,
1973 Disp, Segment, VMask, Chain};
1974 SDNode *ResNode = CurDAG->getMachineNode(Opc, Node->getDebugLoc(),
Craig Topperf7755df2012-07-12 06:52:41 +00001975 VTs, Ops, array_lengthof(Ops));
1976 // Node has 2 outputs: VDst and MVT::Other.
1977 // ResNode has 3 outputs: VDst, VMask_wb, and MVT::Other.
1978 // We replace VDst of Node with VDst of ResNode, and Other of Node with Other
1979 // of ResNode.
1980 ReplaceUses(SDValue(Node, 0), SDValue(ResNode, 0));
1981 ReplaceUses(SDValue(Node, 1), SDValue(ResNode, 2));
Manman Rena0982042012-06-26 19:47:59 +00001982 return ResNode;
1983}
1984
Dan Gohmanea6f91f2010-01-05 01:24:18 +00001985SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
Owen Anderson53aa7a92009-08-10 22:56:29 +00001986 EVT NVT = Node->getValueType(0);
Evan Cheng10d27902006-01-06 20:36:21 +00001987 unsigned Opc, MOpc;
1988 unsigned Opcode = Node->getOpcode();
Dale Johannesen14f2d9d2009-02-03 21:48:12 +00001989 DebugLoc dl = Node->getDebugLoc();
Chad Rosier24c19d22012-08-01 18:39:17 +00001990
Chris Lattnerf98f1242010-03-02 06:34:30 +00001991 DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << '\n');
Evan Chengd49cc362006-02-10 22:24:32 +00001992
Dan Gohman17059682008-07-17 19:10:17 +00001993 if (Node->isMachineOpcode()) {
Chris Lattnerf98f1242010-03-02 06:34:30 +00001994 DEBUG(dbgs() << "== "; Node->dump(CurDAG); dbgs() << '\n');
Evan Chengbd1c5a82006-08-11 09:08:15 +00001995 return NULL; // Already selected.
Evan Cheng6dc90ca2006-02-09 00:37:58 +00001996 }
Evan Cheng2ae799a2006-01-11 22:15:18 +00001997
Evan Cheng10d27902006-01-06 20:36:21 +00001998 switch (Opcode) {
Dan Gohman757eee82009-08-02 16:10:52 +00001999 default: break;
Manman Rena0982042012-06-26 19:47:59 +00002000 case ISD::INTRINSIC_W_CHAIN: {
2001 unsigned IntNo = cast<ConstantSDNode>(Node->getOperand(1))->getZExtValue();
2002 switch (IntNo) {
2003 default: break;
2004 case Intrinsic::x86_avx2_gather_d_pd:
Manman Rena0982042012-06-26 19:47:59 +00002005 case Intrinsic::x86_avx2_gather_d_pd_256:
Manman Rena0982042012-06-26 19:47:59 +00002006 case Intrinsic::x86_avx2_gather_q_pd:
Manman Rena0982042012-06-26 19:47:59 +00002007 case Intrinsic::x86_avx2_gather_q_pd_256:
Manman Rena0982042012-06-26 19:47:59 +00002008 case Intrinsic::x86_avx2_gather_d_ps:
Manman Rena0982042012-06-26 19:47:59 +00002009 case Intrinsic::x86_avx2_gather_d_ps_256:
Manman Rena0982042012-06-26 19:47:59 +00002010 case Intrinsic::x86_avx2_gather_q_ps:
Manman Rena0982042012-06-26 19:47:59 +00002011 case Intrinsic::x86_avx2_gather_q_ps_256:
Manman Ren98a5bf22012-06-29 00:54:20 +00002012 case Intrinsic::x86_avx2_gather_d_q:
Manman Ren98a5bf22012-06-29 00:54:20 +00002013 case Intrinsic::x86_avx2_gather_d_q_256:
Manman Ren98a5bf22012-06-29 00:54:20 +00002014 case Intrinsic::x86_avx2_gather_q_q:
Manman Ren98a5bf22012-06-29 00:54:20 +00002015 case Intrinsic::x86_avx2_gather_q_q_256:
Manman Ren98a5bf22012-06-29 00:54:20 +00002016 case Intrinsic::x86_avx2_gather_d_d:
Manman Ren98a5bf22012-06-29 00:54:20 +00002017 case Intrinsic::x86_avx2_gather_d_d_256:
Manman Ren98a5bf22012-06-29 00:54:20 +00002018 case Intrinsic::x86_avx2_gather_q_d:
Craig Topperdef044b2012-07-01 02:05:52 +00002019 case Intrinsic::x86_avx2_gather_q_d_256: {
2020 unsigned Opc;
2021 switch (IntNo) {
Craig Topper3af251d2012-07-01 02:55:34 +00002022 default: llvm_unreachable("Impossible intrinsic");
Craig Topperdef044b2012-07-01 02:05:52 +00002023 case Intrinsic::x86_avx2_gather_d_pd: Opc = X86::VGATHERDPDrm; break;
2024 case Intrinsic::x86_avx2_gather_d_pd_256: Opc = X86::VGATHERDPDYrm; break;
2025 case Intrinsic::x86_avx2_gather_q_pd: Opc = X86::VGATHERQPDrm; break;
2026 case Intrinsic::x86_avx2_gather_q_pd_256: Opc = X86::VGATHERQPDYrm; break;
2027 case Intrinsic::x86_avx2_gather_d_ps: Opc = X86::VGATHERDPSrm; break;
2028 case Intrinsic::x86_avx2_gather_d_ps_256: Opc = X86::VGATHERDPSYrm; break;
2029 case Intrinsic::x86_avx2_gather_q_ps: Opc = X86::VGATHERQPSrm; break;
2030 case Intrinsic::x86_avx2_gather_q_ps_256: Opc = X86::VGATHERQPSYrm; break;
2031 case Intrinsic::x86_avx2_gather_d_q: Opc = X86::VPGATHERDQrm; break;
2032 case Intrinsic::x86_avx2_gather_d_q_256: Opc = X86::VPGATHERDQYrm; break;
2033 case Intrinsic::x86_avx2_gather_q_q: Opc = X86::VPGATHERQQrm; break;
2034 case Intrinsic::x86_avx2_gather_q_q_256: Opc = X86::VPGATHERQQYrm; break;
2035 case Intrinsic::x86_avx2_gather_d_d: Opc = X86::VPGATHERDDrm; break;
2036 case Intrinsic::x86_avx2_gather_d_d_256: Opc = X86::VPGATHERDDYrm; break;
2037 case Intrinsic::x86_avx2_gather_q_d: Opc = X86::VPGATHERQDrm; break;
2038 case Intrinsic::x86_avx2_gather_q_d_256: Opc = X86::VPGATHERQDYrm; break;
2039 }
Craig Topperfbb954f72012-07-01 02:17:08 +00002040 SDNode *RetVal = SelectGather(Node, Opc);
2041 if (RetVal)
Craig Topperf7755df2012-07-12 06:52:41 +00002042 // We already called ReplaceUses inside SelectGather.
2043 return NULL;
Craig Toppere15e5f72012-07-01 02:18:18 +00002044 break;
Craig Topperdef044b2012-07-01 02:05:52 +00002045 }
Manman Rena0982042012-06-26 19:47:59 +00002046 }
2047 break;
2048 }
Dan Gohman757eee82009-08-02 16:10:52 +00002049 case X86ISD::GlobalBaseReg:
2050 return getGlobalBaseReg();
Evan Chenge0ed6ec2006-02-23 20:41:18 +00002051
Craig Topper3af251d2012-07-01 02:55:34 +00002052
Dan Gohman757eee82009-08-02 16:10:52 +00002053 case X86ISD::ATOMOR64_DAG:
Dan Gohman757eee82009-08-02 16:10:52 +00002054 case X86ISD::ATOMXOR64_DAG:
Dan Gohman757eee82009-08-02 16:10:52 +00002055 case X86ISD::ATOMADD64_DAG:
Dan Gohman757eee82009-08-02 16:10:52 +00002056 case X86ISD::ATOMSUB64_DAG:
Dan Gohman757eee82009-08-02 16:10:52 +00002057 case X86ISD::ATOMNAND64_DAG:
Dan Gohman757eee82009-08-02 16:10:52 +00002058 case X86ISD::ATOMAND64_DAG:
Michael Liaode51caf2012-09-25 18:08:13 +00002059 case X86ISD::ATOMMAX64_DAG:
2060 case X86ISD::ATOMMIN64_DAG:
2061 case X86ISD::ATOMUMAX64_DAG:
2062 case X86ISD::ATOMUMIN64_DAG:
Craig Topper3af251d2012-07-01 02:55:34 +00002063 case X86ISD::ATOMSWAP64_DAG: {
2064 unsigned Opc;
2065 switch (Opcode) {
Craig Topper22cb0c52012-08-11 17:44:14 +00002066 default: llvm_unreachable("Impossible opcode");
Craig Topper3af251d2012-07-01 02:55:34 +00002067 case X86ISD::ATOMOR64_DAG: Opc = X86::ATOMOR6432; break;
2068 case X86ISD::ATOMXOR64_DAG: Opc = X86::ATOMXOR6432; break;
2069 case X86ISD::ATOMADD64_DAG: Opc = X86::ATOMADD6432; break;
2070 case X86ISD::ATOMSUB64_DAG: Opc = X86::ATOMSUB6432; break;
2071 case X86ISD::ATOMNAND64_DAG: Opc = X86::ATOMNAND6432; break;
2072 case X86ISD::ATOMAND64_DAG: Opc = X86::ATOMAND6432; break;
Michael Liaode51caf2012-09-25 18:08:13 +00002073 case X86ISD::ATOMMAX64_DAG: Opc = X86::ATOMMAX6432; break;
2074 case X86ISD::ATOMMIN64_DAG: Opc = X86::ATOMMIN6432; break;
2075 case X86ISD::ATOMUMAX64_DAG: Opc = X86::ATOMUMAX6432; break;
2076 case X86ISD::ATOMUMIN64_DAG: Opc = X86::ATOMUMIN6432; break;
Craig Topper3af251d2012-07-01 02:55:34 +00002077 case X86ISD::ATOMSWAP64_DAG: Opc = X86::ATOMSWAP6432; break;
2078 }
2079 SDNode *RetVal = SelectAtomic64(Node, Opc);
2080 if (RetVal)
2081 return RetVal;
2082 break;
2083 }
Dale Johannesen867d5492008-10-02 18:53:47 +00002084
Eric Christophera1d9e292011-05-17 08:10:18 +00002085 case ISD::ATOMIC_LOAD_XOR:
2086 case ISD::ATOMIC_LOAD_AND:
Michael Liao83725392012-09-19 19:36:58 +00002087 case ISD::ATOMIC_LOAD_OR:
2088 case ISD::ATOMIC_LOAD_ADD: {
Eric Christophera1d9e292011-05-17 08:10:18 +00002089 SDNode *RetVal = SelectAtomicLoadArith(Node, NVT);
Eric Christopher4a34e612011-05-10 23:57:45 +00002090 if (RetVal)
2091 return RetVal;
2092 break;
2093 }
Benjamin Kramer4c816242011-04-22 15:30:40 +00002094 case ISD::AND:
2095 case ISD::OR:
2096 case ISD::XOR: {
2097 // For operations of the form (x << C1) op C2, check if we can use a smaller
2098 // encoding for C2 by transforming it into (x op (C2>>C1)) << C1.
2099 SDValue N0 = Node->getOperand(0);
2100 SDValue N1 = Node->getOperand(1);
2101
2102 if (N0->getOpcode() != ISD::SHL || !N0->hasOneUse())
2103 break;
2104
2105 // i8 is unshrinkable, i16 should be promoted to i32.
2106 if (NVT != MVT::i32 && NVT != MVT::i64)
2107 break;
2108
2109 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N1);
2110 ConstantSDNode *ShlCst = dyn_cast<ConstantSDNode>(N0->getOperand(1));
2111 if (!Cst || !ShlCst)
2112 break;
2113
2114 int64_t Val = Cst->getSExtValue();
2115 uint64_t ShlVal = ShlCst->getZExtValue();
2116
2117 // Make sure that we don't change the operation by removing bits.
2118 // This only matters for OR and XOR, AND is unaffected.
Richard Smith228e6d42012-08-24 23:29:28 +00002119 uint64_t RemovedBitsMask = (1ULL << ShlVal) - 1;
2120 if (Opcode != ISD::AND && (Val & RemovedBitsMask) != 0)
Benjamin Kramer4c816242011-04-22 15:30:40 +00002121 break;
2122
Craig Topper22cb0c52012-08-11 17:44:14 +00002123 unsigned ShlOp, Op;
Benjamin Kramer4c816242011-04-22 15:30:40 +00002124 EVT CstVT = NVT;
2125
2126 // Check the minimum bitwidth for the new constant.
2127 // TODO: AND32ri is the same as AND64ri32 with zext imm.
2128 // TODO: MOV32ri+OR64r is cheaper than MOV64ri64+OR64rr
2129 // TODO: Using 16 and 8 bit operations is also possible for or32 & xor32.
2130 if (!isInt<8>(Val) && isInt<8>(Val >> ShlVal))
2131 CstVT = MVT::i8;
2132 else if (!isInt<32>(Val) && isInt<32>(Val >> ShlVal))
2133 CstVT = MVT::i32;
2134
2135 // Bail if there is no smaller encoding.
2136 if (NVT == CstVT)
2137 break;
2138
2139 switch (NVT.getSimpleVT().SimpleTy) {
2140 default: llvm_unreachable("Unsupported VT!");
2141 case MVT::i32:
2142 assert(CstVT == MVT::i8);
2143 ShlOp = X86::SHL32ri;
2144
2145 switch (Opcode) {
Craig Topper22cb0c52012-08-11 17:44:14 +00002146 default: llvm_unreachable("Impossible opcode");
Benjamin Kramer4c816242011-04-22 15:30:40 +00002147 case ISD::AND: Op = X86::AND32ri8; break;
2148 case ISD::OR: Op = X86::OR32ri8; break;
2149 case ISD::XOR: Op = X86::XOR32ri8; break;
2150 }
2151 break;
2152 case MVT::i64:
2153 assert(CstVT == MVT::i8 || CstVT == MVT::i32);
2154 ShlOp = X86::SHL64ri;
2155
2156 switch (Opcode) {
Craig Topper22cb0c52012-08-11 17:44:14 +00002157 default: llvm_unreachable("Impossible opcode");
Benjamin Kramer4c816242011-04-22 15:30:40 +00002158 case ISD::AND: Op = CstVT==MVT::i8? X86::AND64ri8 : X86::AND64ri32; break;
2159 case ISD::OR: Op = CstVT==MVT::i8? X86::OR64ri8 : X86::OR64ri32; break;
2160 case ISD::XOR: Op = CstVT==MVT::i8? X86::XOR64ri8 : X86::XOR64ri32; break;
2161 }
2162 break;
2163 }
2164
2165 // Emit the smaller op and the shift.
2166 SDValue NewCst = CurDAG->getTargetConstant(Val >> ShlVal, CstVT);
2167 SDNode *New = CurDAG->getMachineNode(Op, dl, NVT, N0->getOperand(0),NewCst);
2168 return CurDAG->SelectNodeTo(Node, ShlOp, NVT, SDValue(New, 0),
2169 getI8Imm(ShlVal));
Benjamin Kramer4c816242011-04-22 15:30:40 +00002170 }
Chris Lattner364bb0a2010-12-05 07:30:36 +00002171 case X86ISD::UMUL: {
2172 SDValue N0 = Node->getOperand(0);
2173 SDValue N1 = Node->getOperand(1);
Chad Rosier24c19d22012-08-01 18:39:17 +00002174
Ted Kremenekb5241b22011-01-14 22:34:13 +00002175 unsigned LoReg;
Chris Lattner364bb0a2010-12-05 07:30:36 +00002176 switch (NVT.getSimpleVT().SimpleTy) {
2177 default: llvm_unreachable("Unsupported VT!");
Ted Kremenekb5241b22011-01-14 22:34:13 +00002178 case MVT::i8: LoReg = X86::AL; Opc = X86::MUL8r; break;
2179 case MVT::i16: LoReg = X86::AX; Opc = X86::MUL16r; break;
2180 case MVT::i32: LoReg = X86::EAX; Opc = X86::MUL32r; break;
2181 case MVT::i64: LoReg = X86::RAX; Opc = X86::MUL64r; break;
Chris Lattner364bb0a2010-12-05 07:30:36 +00002182 }
Chad Rosier24c19d22012-08-01 18:39:17 +00002183
Chris Lattner364bb0a2010-12-05 07:30:36 +00002184 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, LoReg,
2185 N0, SDValue()).getValue(1);
Chad Rosier24c19d22012-08-01 18:39:17 +00002186
Chris Lattner364bb0a2010-12-05 07:30:36 +00002187 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::i32);
2188 SDValue Ops[] = {N1, InFlag};
2189 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops, 2);
Chad Rosier24c19d22012-08-01 18:39:17 +00002190
Chris Lattner364bb0a2010-12-05 07:30:36 +00002191 ReplaceUses(SDValue(Node, 0), SDValue(CNode, 0));
2192 ReplaceUses(SDValue(Node, 1), SDValue(CNode, 1));
2193 ReplaceUses(SDValue(Node, 2), SDValue(CNode, 2));
2194 return NULL;
2195 }
Chad Rosier24c19d22012-08-01 18:39:17 +00002196
Dan Gohman757eee82009-08-02 16:10:52 +00002197 case ISD::SMUL_LOHI:
2198 case ISD::UMUL_LOHI: {
2199 SDValue N0 = Node->getOperand(0);
2200 SDValue N1 = Node->getOperand(1);
2201
2202 bool isSigned = Opcode == ISD::SMUL_LOHI;
Michael Liaof9f7b552012-09-26 08:22:37 +00002203 bool hasBMI2 = Subtarget->hasBMI2();
Bill Wendlingfe3bdb42009-08-07 21:33:25 +00002204 if (!isSigned) {
Owen Anderson9f944592009-08-11 20:47:22 +00002205 switch (NVT.getSimpleVT().SimpleTy) {
Dan Gohman757eee82009-08-02 16:10:52 +00002206 default: llvm_unreachable("Unsupported VT!");
Owen Anderson9f944592009-08-11 20:47:22 +00002207 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
2208 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
Michael Liaof9f7b552012-09-26 08:22:37 +00002209 case MVT::i32: Opc = hasBMI2 ? X86::MULX32rr : X86::MUL32r;
2210 MOpc = hasBMI2 ? X86::MULX32rm : X86::MUL32m; break;
2211 case MVT::i64: Opc = hasBMI2 ? X86::MULX64rr : X86::MUL64r;
2212 MOpc = hasBMI2 ? X86::MULX64rm : X86::MUL64m; break;
Dan Gohman757eee82009-08-02 16:10:52 +00002213 }
Bill Wendlingfe3bdb42009-08-07 21:33:25 +00002214 } else {
Owen Anderson9f944592009-08-11 20:47:22 +00002215 switch (NVT.getSimpleVT().SimpleTy) {
Dan Gohman757eee82009-08-02 16:10:52 +00002216 default: llvm_unreachable("Unsupported VT!");
Owen Anderson9f944592009-08-11 20:47:22 +00002217 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
2218 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
2219 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
2220 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
Dan Gohman757eee82009-08-02 16:10:52 +00002221 }
Bill Wendlingfe3bdb42009-08-07 21:33:25 +00002222 }
Dan Gohman757eee82009-08-02 16:10:52 +00002223
Michael Liaof9f7b552012-09-26 08:22:37 +00002224 unsigned SrcReg, LoReg, HiReg;
2225 switch (Opc) {
2226 default: llvm_unreachable("Unknown MUL opcode!");
2227 case X86::IMUL8r:
2228 case X86::MUL8r:
2229 SrcReg = LoReg = X86::AL; HiReg = X86::AH;
2230 break;
2231 case X86::IMUL16r:
2232 case X86::MUL16r:
2233 SrcReg = LoReg = X86::AX; HiReg = X86::DX;
2234 break;
2235 case X86::IMUL32r:
2236 case X86::MUL32r:
2237 SrcReg = LoReg = X86::EAX; HiReg = X86::EDX;
2238 break;
2239 case X86::IMUL64r:
2240 case X86::MUL64r:
2241 SrcReg = LoReg = X86::RAX; HiReg = X86::RDX;
2242 break;
2243 case X86::MULX32rr:
2244 SrcReg = X86::EDX; LoReg = HiReg = 0;
2245 break;
2246 case X86::MULX64rr:
2247 SrcReg = X86::RDX; LoReg = HiReg = 0;
2248 break;
Dan Gohman757eee82009-08-02 16:10:52 +00002249 }
2250
2251 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002252 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
Bill Wendlingfe3bdb42009-08-07 21:33:25 +00002253 // Multiply is commmutative.
Dan Gohman757eee82009-08-02 16:10:52 +00002254 if (!foldedLoad) {
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002255 foldedLoad = TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
Dan Gohman757eee82009-08-02 16:10:52 +00002256 if (foldedLoad)
2257 std::swap(N0, N1);
2258 }
2259
Michael Liaof9f7b552012-09-26 08:22:37 +00002260 SDValue InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, SrcReg,
Craig Toppera4fd6d62012-05-23 05:44:51 +00002261 N0, SDValue()).getValue(1);
Michael Liaof9f7b552012-09-26 08:22:37 +00002262 SDValue ResHi, ResLo;
Dan Gohman757eee82009-08-02 16:10:52 +00002263
2264 if (foldedLoad) {
Michael Liaof9f7b552012-09-26 08:22:37 +00002265 SDValue Chain;
Dan Gohman757eee82009-08-02 16:10:52 +00002266 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2267 InFlag };
Michael Liaof9f7b552012-09-26 08:22:37 +00002268 if (MOpc == X86::MULX32rm || MOpc == X86::MULX64rm) {
2269 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Other, MVT::Glue);
2270 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops,
2271 array_lengthof(Ops));
2272 ResHi = SDValue(CNode, 0);
2273 ResLo = SDValue(CNode, 1);
2274 Chain = SDValue(CNode, 2);
2275 InFlag = SDValue(CNode, 3);
2276 } else {
2277 SDVTList VTs = CurDAG->getVTList(MVT::Other, MVT::Glue);
2278 SDNode *CNode = CurDAG->getMachineNode(MOpc, dl, VTs, Ops,
2279 array_lengthof(Ops));
2280 Chain = SDValue(CNode, 0);
2281 InFlag = SDValue(CNode, 1);
2282 }
Chris Lattner364bb0a2010-12-05 07:30:36 +00002283
Dan Gohman757eee82009-08-02 16:10:52 +00002284 // Update the chain.
Michael Liaof9f7b552012-09-26 08:22:37 +00002285 ReplaceUses(N1.getValue(1), Chain);
Dan Gohman757eee82009-08-02 16:10:52 +00002286 } else {
Michael Liaof9f7b552012-09-26 08:22:37 +00002287 SDValue Ops[] = { N1, InFlag };
2288 if (Opc == X86::MULX32rr || Opc == X86::MULX64rr) {
2289 SDVTList VTs = CurDAG->getVTList(NVT, NVT, MVT::Glue);
2290 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops,
2291 array_lengthof(Ops));
2292 ResHi = SDValue(CNode, 0);
2293 ResLo = SDValue(CNode, 1);
2294 InFlag = SDValue(CNode, 2);
2295 } else {
2296 SDVTList VTs = CurDAG->getVTList(MVT::Glue);
2297 SDNode *CNode = CurDAG->getMachineNode(Opc, dl, VTs, Ops,
2298 array_lengthof(Ops));
2299 InFlag = SDValue(CNode, 0);
2300 }
Dan Gohman757eee82009-08-02 16:10:52 +00002301 }
2302
Jakob Stoklund Olesend7d0d4e2010-06-26 00:39:23 +00002303 // Prevent use of AH in a REX instruction by referencing AX instead.
2304 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2305 !SDValue(Node, 1).use_empty()) {
2306 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2307 X86::AX, MVT::i16, InFlag);
2308 InFlag = Result.getValue(2);
2309 // Get the low part if needed. Don't use getCopyFromReg for aliasing
2310 // registers.
2311 if (!SDValue(Node, 0).use_empty())
2312 ReplaceUses(SDValue(Node, 1),
2313 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2314
2315 // Shift AX down 8 bits.
2316 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2317 Result,
2318 CurDAG->getTargetConstant(8, MVT::i8)), 0);
2319 // Then truncate it down to i8.
2320 ReplaceUses(SDValue(Node, 1),
2321 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2322 }
Dan Gohman757eee82009-08-02 16:10:52 +00002323 // Copy the low half of the result, if it is needed.
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002324 if (!SDValue(Node, 0).use_empty()) {
Michael Liaof9f7b552012-09-26 08:22:37 +00002325 if (ResLo.getNode() == 0) {
2326 assert(LoReg && "Register for low half is not defined!");
2327 ResLo = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, LoReg, NVT,
2328 InFlag);
2329 InFlag = ResLo.getValue(2);
2330 }
2331 ReplaceUses(SDValue(Node, 0), ResLo);
2332 DEBUG(dbgs() << "=> "; ResLo.getNode()->dump(CurDAG); dbgs() << '\n');
Dan Gohman757eee82009-08-02 16:10:52 +00002333 }
2334 // Copy the high half of the result, if it is needed.
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002335 if (!SDValue(Node, 1).use_empty()) {
Michael Liaof9f7b552012-09-26 08:22:37 +00002336 if (ResHi.getNode() == 0) {
2337 assert(HiReg && "Register for high half is not defined!");
2338 ResHi = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl, HiReg, NVT,
2339 InFlag);
2340 InFlag = ResHi.getValue(2);
2341 }
2342 ReplaceUses(SDValue(Node, 1), ResHi);
2343 DEBUG(dbgs() << "=> "; ResHi.getNode()->dump(CurDAG); dbgs() << '\n');
Dan Gohman757eee82009-08-02 16:10:52 +00002344 }
Chad Rosier24c19d22012-08-01 18:39:17 +00002345
Dan Gohman757eee82009-08-02 16:10:52 +00002346 return NULL;
2347 }
2348
2349 case ISD::SDIVREM:
2350 case ISD::UDIVREM: {
2351 SDValue N0 = Node->getOperand(0);
2352 SDValue N1 = Node->getOperand(1);
2353
2354 bool isSigned = Opcode == ISD::SDIVREM;
Bill Wendlingfe3bdb42009-08-07 21:33:25 +00002355 if (!isSigned) {
Owen Anderson9f944592009-08-11 20:47:22 +00002356 switch (NVT.getSimpleVT().SimpleTy) {
Dan Gohman757eee82009-08-02 16:10:52 +00002357 default: llvm_unreachable("Unsupported VT!");
Owen Anderson9f944592009-08-11 20:47:22 +00002358 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
2359 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
2360 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
2361 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
Dan Gohman757eee82009-08-02 16:10:52 +00002362 }
Bill Wendlingfe3bdb42009-08-07 21:33:25 +00002363 } else {
Owen Anderson9f944592009-08-11 20:47:22 +00002364 switch (NVT.getSimpleVT().SimpleTy) {
Dan Gohman757eee82009-08-02 16:10:52 +00002365 default: llvm_unreachable("Unsupported VT!");
Owen Anderson9f944592009-08-11 20:47:22 +00002366 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
2367 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
2368 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
2369 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
Dan Gohman757eee82009-08-02 16:10:52 +00002370 }
Bill Wendlingfe3bdb42009-08-07 21:33:25 +00002371 }
Dan Gohman757eee82009-08-02 16:10:52 +00002372
Chris Lattner518b0372009-12-23 01:45:04 +00002373 unsigned LoReg, HiReg, ClrReg;
Dan Gohman757eee82009-08-02 16:10:52 +00002374 unsigned ClrOpcode, SExtOpcode;
Owen Anderson9f944592009-08-11 20:47:22 +00002375 switch (NVT.getSimpleVT().SimpleTy) {
Dan Gohman757eee82009-08-02 16:10:52 +00002376 default: llvm_unreachable("Unsupported VT!");
Owen Anderson9f944592009-08-11 20:47:22 +00002377 case MVT::i8:
Chris Lattner518b0372009-12-23 01:45:04 +00002378 LoReg = X86::AL; ClrReg = HiReg = X86::AH;
Dan Gohman757eee82009-08-02 16:10:52 +00002379 ClrOpcode = 0;
2380 SExtOpcode = X86::CBW;
2381 break;
Owen Anderson9f944592009-08-11 20:47:22 +00002382 case MVT::i16:
Dan Gohman757eee82009-08-02 16:10:52 +00002383 LoReg = X86::AX; HiReg = X86::DX;
Dan Gohmanc1195802010-01-12 04:42:54 +00002384 ClrOpcode = X86::MOV16r0; ClrReg = X86::DX;
Dan Gohman757eee82009-08-02 16:10:52 +00002385 SExtOpcode = X86::CWD;
2386 break;
Owen Anderson9f944592009-08-11 20:47:22 +00002387 case MVT::i32:
Chris Lattner518b0372009-12-23 01:45:04 +00002388 LoReg = X86::EAX; ClrReg = HiReg = X86::EDX;
Dan Gohman757eee82009-08-02 16:10:52 +00002389 ClrOpcode = X86::MOV32r0;
2390 SExtOpcode = X86::CDQ;
2391 break;
Owen Anderson9f944592009-08-11 20:47:22 +00002392 case MVT::i64:
Chris Lattner518b0372009-12-23 01:45:04 +00002393 LoReg = X86::RAX; ClrReg = HiReg = X86::RDX;
Dan Gohmanc1195802010-01-12 04:42:54 +00002394 ClrOpcode = X86::MOV64r0;
Dan Gohman757eee82009-08-02 16:10:52 +00002395 SExtOpcode = X86::CQO;
Evan Chenge62288f2009-07-30 08:33:02 +00002396 break;
2397 }
2398
Dan Gohman757eee82009-08-02 16:10:52 +00002399 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4;
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002400 bool foldedLoad = TryFoldLoad(Node, N1, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4);
Dan Gohman757eee82009-08-02 16:10:52 +00002401 bool signBitIsZero = CurDAG->SignBitIsZero(N0);
Dan Gohmana1603612007-10-08 18:33:35 +00002402
Dan Gohman757eee82009-08-02 16:10:52 +00002403 SDValue InFlag;
Owen Anderson9f944592009-08-11 20:47:22 +00002404 if (NVT == MVT::i8 && (!isSigned || signBitIsZero)) {
Dan Gohman757eee82009-08-02 16:10:52 +00002405 // Special case for div8, just use a move with zero extension to AX to
2406 // clear the upper 8 bits (AH).
2407 SDValue Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, Move, Chain;
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002408 if (TryFoldLoad(Node, N0, Tmp0, Tmp1, Tmp2, Tmp3, Tmp4)) {
Dan Gohman757eee82009-08-02 16:10:52 +00002409 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N0.getOperand(0) };
2410 Move =
Stuart Hastings91f1d242011-05-20 19:04:40 +00002411 SDValue(CurDAG->getMachineNode(X86::MOVZX32rm8, dl, MVT::i32,
Dan Gohman32f71d72009-09-25 18:54:59 +00002412 MVT::Other, Ops,
2413 array_lengthof(Ops)), 0);
Dan Gohman757eee82009-08-02 16:10:52 +00002414 Chain = Move.getValue(1);
2415 ReplaceUses(N0.getValue(1), Chain);
Evan Cheng10d27902006-01-06 20:36:21 +00002416 } else {
Dan Gohman757eee82009-08-02 16:10:52 +00002417 Move =
Stuart Hastings91f1d242011-05-20 19:04:40 +00002418 SDValue(CurDAG->getMachineNode(X86::MOVZX32rr8, dl, MVT::i32, N0),0);
Dan Gohman757eee82009-08-02 16:10:52 +00002419 Chain = CurDAG->getEntryNode();
2420 }
Stuart Hastings91f1d242011-05-20 19:04:40 +00002421 Chain = CurDAG->getCopyToReg(Chain, dl, X86::EAX, Move, SDValue());
Dan Gohman757eee82009-08-02 16:10:52 +00002422 InFlag = Chain.getValue(1);
2423 } else {
2424 InFlag =
2425 CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl,
2426 LoReg, N0, SDValue()).getValue(1);
2427 if (isSigned && !signBitIsZero) {
2428 // Sign extend the low part into the high part.
Evan Chengd1b82d82006-02-09 07:17:49 +00002429 InFlag =
Chris Lattner3e5fbd72010-12-21 02:38:05 +00002430 SDValue(CurDAG->getMachineNode(SExtOpcode, dl, MVT::Glue, InFlag),0);
Dan Gohman757eee82009-08-02 16:10:52 +00002431 } else {
2432 // Zero out the high part, effectively zero extending the input.
Dan Gohmanc1195802010-01-12 04:42:54 +00002433 SDValue ClrNode =
2434 SDValue(CurDAG->getMachineNode(ClrOpcode, dl, NVT), 0);
Chris Lattner518b0372009-12-23 01:45:04 +00002435 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), dl, ClrReg,
Dan Gohman757eee82009-08-02 16:10:52 +00002436 ClrNode, InFlag).getValue(1);
Dan Gohmana1603612007-10-08 18:33:35 +00002437 }
Evan Cheng92e27972006-01-06 23:19:29 +00002438 }
Dan Gohmana1603612007-10-08 18:33:35 +00002439
Dan Gohman757eee82009-08-02 16:10:52 +00002440 if (foldedLoad) {
2441 SDValue Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, Tmp4, N1.getOperand(0),
2442 InFlag };
2443 SDNode *CNode =
Chris Lattner3e5fbd72010-12-21 02:38:05 +00002444 CurDAG->getMachineNode(MOpc, dl, MVT::Other, MVT::Glue, Ops,
Dan Gohman32f71d72009-09-25 18:54:59 +00002445 array_lengthof(Ops));
Dan Gohman757eee82009-08-02 16:10:52 +00002446 InFlag = SDValue(CNode, 1);
2447 // Update the chain.
2448 ReplaceUses(N1.getValue(1), SDValue(CNode, 0));
2449 } else {
2450 InFlag =
Chris Lattner3e5fbd72010-12-21 02:38:05 +00002451 SDValue(CurDAG->getMachineNode(Opc, dl, MVT::Glue, N1, InFlag), 0);
Dan Gohman757eee82009-08-02 16:10:52 +00002452 }
Evan Cheng92e27972006-01-06 23:19:29 +00002453
Jakob Stoklund Olesend7d0d4e2010-06-26 00:39:23 +00002454 // Prevent use of AH in a REX instruction by referencing AX instead.
2455 // Shift it down 8 bits.
2456 if (HiReg == X86::AH && Subtarget->is64Bit() &&
2457 !SDValue(Node, 1).use_empty()) {
2458 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2459 X86::AX, MVT::i16, InFlag);
2460 InFlag = Result.getValue(2);
2461
2462 // If we also need AL (the quotient), get it by extracting a subreg from
2463 // Result. The fast register allocator does not like multiple CopyFromReg
2464 // nodes using aliasing registers.
2465 if (!SDValue(Node, 0).use_empty())
2466 ReplaceUses(SDValue(Node, 0),
2467 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2468
2469 // Shift AX right by 8 bits instead of using AH.
2470 Result = SDValue(CurDAG->getMachineNode(X86::SHR16ri, dl, MVT::i16,
2471 Result,
2472 CurDAG->getTargetConstant(8, MVT::i8)),
2473 0);
2474 ReplaceUses(SDValue(Node, 1),
2475 CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl, MVT::i8, Result));
2476 }
Dan Gohman757eee82009-08-02 16:10:52 +00002477 // Copy the division (low) result, if it is needed.
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002478 if (!SDValue(Node, 0).use_empty()) {
Dan Gohman757eee82009-08-02 16:10:52 +00002479 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2480 LoReg, NVT, InFlag);
2481 InFlag = Result.getValue(2);
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002482 ReplaceUses(SDValue(Node, 0), Result);
Chris Lattnerf98f1242010-03-02 06:34:30 +00002483 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
Dan Gohman757eee82009-08-02 16:10:52 +00002484 }
2485 // Copy the remainder (high) result, if it is needed.
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002486 if (!SDValue(Node, 1).use_empty()) {
Jakob Stoklund Olesend7d0d4e2010-06-26 00:39:23 +00002487 SDValue Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
2488 HiReg, NVT, InFlag);
2489 InFlag = Result.getValue(2);
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002490 ReplaceUses(SDValue(Node, 1), Result);
Chris Lattnerf98f1242010-03-02 06:34:30 +00002491 DEBUG(dbgs() << "=> "; Result.getNode()->dump(CurDAG); dbgs() << '\n');
Dan Gohman757eee82009-08-02 16:10:52 +00002492 }
Dan Gohman757eee82009-08-02 16:10:52 +00002493 return NULL;
2494 }
2495
Manman Ren1be131b2012-08-08 00:51:41 +00002496 case X86ISD::CMP:
2497 case X86ISD::SUB: {
2498 // Sometimes a SUB is used to perform comparison.
2499 if (Opcode == X86ISD::SUB && Node->hasAnyUseOfValue(0))
2500 // This node is not a CMP.
2501 break;
Dan Gohmanac33a902009-08-19 18:16:17 +00002502 SDValue N0 = Node->getOperand(0);
2503 SDValue N1 = Node->getOperand(1);
2504
2505 // Look for (X86cmp (and $op, $imm), 0) and see if we can convert it to
2506 // use a smaller encoding.
Eli Friedman39d0f572010-08-04 22:40:58 +00002507 if (N0.getOpcode() == ISD::TRUNCATE && N0.hasOneUse() &&
2508 HasNoSignedComparisonUses(Node))
Evan Cheng050df1b2010-04-28 08:30:49 +00002509 // Look past the truncate if CMP is the only use of it.
2510 N0 = N0.getOperand(0);
Dan Gohman198b7ff2011-11-03 21:49:52 +00002511 if ((N0.getNode()->getOpcode() == ISD::AND ||
2512 (N0.getResNo() == 0 && N0.getNode()->getOpcode() == X86ISD::AND)) &&
2513 N0.getNode()->hasOneUse() &&
Dan Gohmanac33a902009-08-19 18:16:17 +00002514 N0.getValueType() != MVT::i8 &&
2515 X86::isZeroNode(N1)) {
2516 ConstantSDNode *C = dyn_cast<ConstantSDNode>(N0.getNode()->getOperand(1));
2517 if (!C) break;
2518
2519 // For example, convert "testl %eax, $8" to "testb %al, $8"
Dan Gohman7d9dffb2009-10-09 20:35:19 +00002520 if ((C->getZExtValue() & ~UINT64_C(0xff)) == 0 &&
2521 (!(C->getZExtValue() & 0x80) ||
2522 HasNoSignedComparisonUses(Node))) {
Dan Gohmanac33a902009-08-19 18:16:17 +00002523 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i8);
2524 SDValue Reg = N0.getNode()->getOperand(0);
2525
2526 // On x86-32, only the ABCD registers have 8-bit subregisters.
2527 if (!Subtarget->is64Bit()) {
Craig Toppercc830f82012-02-22 07:28:11 +00002528 const TargetRegisterClass *TRC;
Dan Gohmanac33a902009-08-19 18:16:17 +00002529 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2530 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2531 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2532 default: llvm_unreachable("Unsupported TEST operand type!");
2533 }
2534 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
Dan Gohman32f71d72009-09-25 18:54:59 +00002535 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2536 Reg.getValueType(), Reg, RC), 0);
Dan Gohmanac33a902009-08-19 18:16:17 +00002537 }
2538
2539 // Extract the l-register.
Jakob Stoklund Olesen9340ea52010-05-24 14:48:17 +00002540 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
Dan Gohmanac33a902009-08-19 18:16:17 +00002541 MVT::i8, Reg);
2542
2543 // Emit a testb.
Manman Ren511c6d02012-09-28 18:53:24 +00002544 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri, dl, MVT::i32,
2545 Subreg, Imm);
2546 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2547 // one, do not call ReplaceAllUsesWith.
2548 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2549 SDValue(NewNode, 0));
2550 return NULL;
Dan Gohmanac33a902009-08-19 18:16:17 +00002551 }
2552
2553 // For example, "testl %eax, $2048" to "testb %ah, $8".
Dan Gohman7d9dffb2009-10-09 20:35:19 +00002554 if ((C->getZExtValue() & ~UINT64_C(0xff00)) == 0 &&
2555 (!(C->getZExtValue() & 0x8000) ||
2556 HasNoSignedComparisonUses(Node))) {
Dan Gohmanac33a902009-08-19 18:16:17 +00002557 // Shift the immediate right by 8 bits.
2558 SDValue ShiftedImm = CurDAG->getTargetConstant(C->getZExtValue() >> 8,
2559 MVT::i8);
2560 SDValue Reg = N0.getNode()->getOperand(0);
2561
2562 // Put the value in an ABCD register.
Craig Toppercc830f82012-02-22 07:28:11 +00002563 const TargetRegisterClass *TRC;
Dan Gohmanac33a902009-08-19 18:16:17 +00002564 switch (N0.getValueType().getSimpleVT().SimpleTy) {
2565 case MVT::i64: TRC = &X86::GR64_ABCDRegClass; break;
2566 case MVT::i32: TRC = &X86::GR32_ABCDRegClass; break;
2567 case MVT::i16: TRC = &X86::GR16_ABCDRegClass; break;
2568 default: llvm_unreachable("Unsupported TEST operand type!");
2569 }
2570 SDValue RC = CurDAG->getTargetConstant(TRC->getID(), MVT::i32);
Dan Gohman32f71d72009-09-25 18:54:59 +00002571 Reg = SDValue(CurDAG->getMachineNode(X86::COPY_TO_REGCLASS, dl,
2572 Reg.getValueType(), Reg, RC), 0);
Dan Gohmanac33a902009-08-19 18:16:17 +00002573
2574 // Extract the h-register.
Jakob Stoklund Olesen9340ea52010-05-24 14:48:17 +00002575 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
Dan Gohmanac33a902009-08-19 18:16:17 +00002576 MVT::i8, Reg);
2577
Jakob Stoklund Olesen729abd32011-10-08 18:28:28 +00002578 // Emit a testb. The EXTRACT_SUBREG becomes a COPY that can only
2579 // target GR8_NOREX registers, so make sure the register class is
2580 // forced.
Manman Ren511c6d02012-09-28 18:53:24 +00002581 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST8ri_NOREX, dl,
2582 MVT::i32, Subreg, ShiftedImm);
2583 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2584 // one, do not call ReplaceAllUsesWith.
2585 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2586 SDValue(NewNode, 0));
2587 return NULL;
Dan Gohmanac33a902009-08-19 18:16:17 +00002588 }
2589
2590 // For example, "testl %eax, $32776" to "testw %ax, $32776".
2591 if ((C->getZExtValue() & ~UINT64_C(0xffff)) == 0 &&
Dan Gohman7d9dffb2009-10-09 20:35:19 +00002592 N0.getValueType() != MVT::i16 &&
2593 (!(C->getZExtValue() & 0x8000) ||
2594 HasNoSignedComparisonUses(Node))) {
Dan Gohmanac33a902009-08-19 18:16:17 +00002595 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i16);
2596 SDValue Reg = N0.getNode()->getOperand(0);
2597
2598 // Extract the 16-bit subregister.
Jakob Stoklund Olesen9340ea52010-05-24 14:48:17 +00002599 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
Dan Gohmanac33a902009-08-19 18:16:17 +00002600 MVT::i16, Reg);
2601
2602 // Emit a testw.
Manman Ren511c6d02012-09-28 18:53:24 +00002603 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST16ri, dl, MVT::i32,
2604 Subreg, Imm);
2605 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2606 // one, do not call ReplaceAllUsesWith.
2607 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2608 SDValue(NewNode, 0));
2609 return NULL;
Dan Gohmanac33a902009-08-19 18:16:17 +00002610 }
2611
2612 // For example, "testq %rax, $268468232" to "testl %eax, $268468232".
2613 if ((C->getZExtValue() & ~UINT64_C(0xffffffff)) == 0 &&
Dan Gohman7d9dffb2009-10-09 20:35:19 +00002614 N0.getValueType() == MVT::i64 &&
2615 (!(C->getZExtValue() & 0x80000000) ||
2616 HasNoSignedComparisonUses(Node))) {
Dan Gohmanac33a902009-08-19 18:16:17 +00002617 SDValue Imm = CurDAG->getTargetConstant(C->getZExtValue(), MVT::i32);
2618 SDValue Reg = N0.getNode()->getOperand(0);
2619
2620 // Extract the 32-bit subregister.
Jakob Stoklund Olesen9340ea52010-05-24 14:48:17 +00002621 SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
Dan Gohmanac33a902009-08-19 18:16:17 +00002622 MVT::i32, Reg);
2623
2624 // Emit a testl.
Manman Ren511c6d02012-09-28 18:53:24 +00002625 SDNode *NewNode = CurDAG->getMachineNode(X86::TEST32ri, dl, MVT::i32,
2626 Subreg, Imm);
2627 // Replace SUB|CMP with TEST, since SUB has two outputs while TEST has
2628 // one, do not call ReplaceAllUsesWith.
2629 ReplaceUses(SDValue(Node, (Opcode == X86ISD::SUB ? 1 : 0)),
2630 SDValue(NewNode, 0));
2631 return NULL;
Dan Gohmanac33a902009-08-19 18:16:17 +00002632 }
2633 }
2634 break;
2635 }
Pete Cooper7c7ba1b2011-11-15 21:57:53 +00002636 case ISD::STORE: {
Joel Jones68d59e82012-03-29 05:45:48 +00002637 // Change a chain of {load; incr or dec; store} of the same value into
2638 // a simple increment or decrement through memory of that value, if the
2639 // uses of the modified value and its address are suitable.
Pete Cooper48784ed2011-11-16 19:03:23 +00002640 // The DEC64m tablegen pattern is currently not able to match the case where
Chad Rosier24c19d22012-08-01 18:39:17 +00002641 // the EFLAGS on the original DEC are used. (This also applies to
Joel Jones68d59e82012-03-29 05:45:48 +00002642 // {INC,DEC}X{64,32,16,8}.)
2643 // We'll need to improve tablegen to allow flags to be transferred from a
Pete Cooper48784ed2011-11-16 19:03:23 +00002644 // node in the pattern to the result node. probably with a new keyword
2645 // for example, we have this
2646 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2647 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2648 // (implicit EFLAGS)]>;
2649 // but maybe need something like this
2650 // def DEC64m : RI<0xFF, MRM1m, (outs), (ins i64mem:$dst), "dec{q}\t$dst",
2651 // [(store (add (loadi64 addr:$dst), -1), addr:$dst),
2652 // (transferrable EFLAGS)]>;
Joel Jones68d59e82012-03-29 05:45:48 +00002653
Pete Cooper7c7ba1b2011-11-15 21:57:53 +00002654 StoreSDNode *StoreNode = cast<StoreSDNode>(Node);
Pete Cooper7c7ba1b2011-11-15 21:57:53 +00002655 SDValue StoredVal = StoreNode->getOperand(1);
Joel Jones68d59e82012-03-29 05:45:48 +00002656 unsigned Opc = StoredVal->getOpcode();
Pete Cooper7c7ba1b2011-11-15 21:57:53 +00002657
Evan Cheng3e869f02012-04-12 19:14:21 +00002658 LoadSDNode *LoadNode = 0;
2659 SDValue InputChain;
2660 if (!isLoadIncOrDecStore(StoreNode, Opc, StoredVal, CurDAG,
2661 LoadNode, InputChain))
2662 break;
Pete Cooper7c7ba1b2011-11-15 21:57:53 +00002663
2664 SDValue Base, Scale, Index, Disp, Segment;
2665 if (!SelectAddr(LoadNode, LoadNode->getBasePtr(),
2666 Base, Scale, Index, Disp, Segment))
2667 break;
2668
2669 MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(2);
2670 MemOp[0] = StoreNode->getMemOperand();
2671 MemOp[1] = LoadNode->getMemOperand();
2672 const SDValue Ops[] = { Base, Scale, Index, Disp, Segment, InputChain };
Chad Rosier24c19d22012-08-01 18:39:17 +00002673 EVT LdVT = LoadNode->getMemoryVT();
Joel Jones68d59e82012-03-29 05:45:48 +00002674 unsigned newOpc = getFusedLdStOpcode(LdVT, Opc);
2675 MachineSDNode *Result = CurDAG->getMachineNode(newOpc,
Pete Cooper7c7ba1b2011-11-15 21:57:53 +00002676 Node->getDebugLoc(),
2677 MVT::i32, MVT::Other, Ops,
2678 array_lengthof(Ops));
2679 Result->setMemRefs(MemOp, MemOp + 2);
2680
2681 ReplaceUses(SDValue(StoreNode, 0), SDValue(Result, 1));
2682 ReplaceUses(SDValue(StoredVal.getNode(), 1), SDValue(Result, 0));
2683
2684 return Result;
2685 }
Chris Lattner655e7df2005-11-16 01:54:32 +00002686 }
2687
Dan Gohmanea6f91f2010-01-05 01:24:18 +00002688 SDNode *ResNode = SelectCode(Node);
Evan Chengbd1c5a82006-08-11 09:08:15 +00002689
Chris Lattnerf98f1242010-03-02 06:34:30 +00002690 DEBUG(dbgs() << "=> ";
2691 if (ResNode == NULL || ResNode == Node)
2692 Node->dump(CurDAG);
2693 else
2694 ResNode->dump(CurDAG);
2695 dbgs() << '\n');
Evan Chengbd1c5a82006-08-11 09:08:15 +00002696
2697 return ResNode;
Chris Lattner655e7df2005-11-16 01:54:32 +00002698}
2699
Chris Lattnerba1ed582006-06-08 18:03:49 +00002700bool X86DAGToDAGISel::
Dan Gohman2ce6f2a2008-07-27 21:46:04 +00002701SelectInlineAsmMemoryOperand(const SDValue &Op, char ConstraintCode,
Dan Gohmaneb0cee92008-08-23 02:25:05 +00002702 std::vector<SDValue> &OutOps) {
Rafael Espindola3b2df102009-04-08 21:14:34 +00002703 SDValue Op0, Op1, Op2, Op3, Op4;
Chris Lattnerba1ed582006-06-08 18:03:49 +00002704 switch (ConstraintCode) {
2705 case 'o': // offsetable ??
2706 case 'v': // not offsetable ??
2707 default: return true;
2708 case 'm': // memory
Chris Lattnerd58d7c12010-09-21 22:07:31 +00002709 if (!SelectAddr(0, Op, Op0, Op1, Op2, Op3, Op4))
Chris Lattnerba1ed582006-06-08 18:03:49 +00002710 return true;
2711 break;
2712 }
Chad Rosier24c19d22012-08-01 18:39:17 +00002713
Evan Cheng2d487222006-08-26 01:05:16 +00002714 OutOps.push_back(Op0);
2715 OutOps.push_back(Op1);
2716 OutOps.push_back(Op2);
2717 OutOps.push_back(Op3);
Rafael Espindola3b2df102009-04-08 21:14:34 +00002718 OutOps.push_back(Op4);
Chris Lattnerba1ed582006-06-08 18:03:49 +00002719 return false;
2720}
2721
Chad Rosier24c19d22012-08-01 18:39:17 +00002722/// createX86ISelDag - This pass converts a legalized DAG into a
Chris Lattner655e7df2005-11-16 01:54:32 +00002723/// X86-specific DAG, ready for instruction scheduling.
2724///
Bill Wendling026e5d72009-04-29 23:29:43 +00002725FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM,
Craig Topperf6e7e122012-03-27 07:21:54 +00002726 CodeGenOpt::Level OptLevel) {
Bill Wendling084669a2009-04-29 00:15:41 +00002727 return new X86DAGToDAGISel(TM, OptLevel);
Chris Lattner655e7df2005-11-16 01:54:32 +00002728}