blob: c51cd80cb9f52e2c9f8c39111eed1aed0d272540 [file] [log] [blame]
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001//===- X86ISelDAGToDAG.cpp - A DAG pattern matching inst selector for X86 -===//
2//
3// The LLVM Compiler Infrastructure
4//
Chris Lattner081ce942007-12-29 20:36:04 +00005// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
Dan Gohmanf17a25c2007-07-18 16:29:46 +00007//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a DAG pattern matching instruction selector for X86,
11// converting from a legalized dag to a X86 dag.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "x86-isel"
16#include "X86.h"
17#include "X86InstrBuilder.h"
18#include "X86ISelLowering.h"
Evan Cheng0729ccf2008-01-05 00:41:47 +000019#include "X86MachineFunctionInfo.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000020#include "X86RegisterInfo.h"
21#include "X86Subtarget.h"
22#include "X86TargetMachine.h"
23#include "llvm/GlobalValue.h"
24#include "llvm/Instructions.h"
25#include "llvm/Intrinsics.h"
26#include "llvm/Support/CFG.h"
27#include "llvm/Type.h"
28#include "llvm/CodeGen/MachineConstantPool.h"
29#include "llvm/CodeGen/MachineFunction.h"
30#include "llvm/CodeGen/MachineFrameInfo.h"
31#include "llvm/CodeGen/MachineInstrBuilder.h"
Chris Lattner1b989192007-12-31 04:13:23 +000032#include "llvm/CodeGen/MachineRegisterInfo.h"
Dan Gohmanf17a25c2007-07-18 16:29:46 +000033#include "llvm/CodeGen/SelectionDAGISel.h"
34#include "llvm/Target/TargetMachine.h"
35#include "llvm/Support/Compiler.h"
36#include "llvm/Support/Debug.h"
37#include "llvm/Support/MathExtras.h"
38#include "llvm/ADT/Statistic.h"
39#include <queue>
40#include <set>
41using namespace llvm;
42
43STATISTIC(NumFPKill , "Number of FP_REG_KILL instructions added");
44STATISTIC(NumLoadMoved, "Number of loads moved below TokenFactor");
45
46
47//===----------------------------------------------------------------------===//
48// Pattern Matcher Implementation
49//===----------------------------------------------------------------------===//
50
51namespace {
52 /// X86ISelAddressMode - This corresponds to X86AddressMode, but uses
53 /// SDOperand's instead of register numbers for the leaves of the matched
54 /// tree.
55 struct X86ISelAddressMode {
56 enum {
57 RegBase,
58 FrameIndexBase
59 } BaseType;
60
61 struct { // This is really a union, discriminated by BaseType!
62 SDOperand Reg;
63 int FrameIndex;
64 } Base;
65
66 bool isRIPRel; // RIP relative?
67 unsigned Scale;
68 SDOperand IndexReg;
69 unsigned Disp;
70 GlobalValue *GV;
71 Constant *CP;
72 const char *ES;
73 int JT;
74 unsigned Align; // CP alignment.
75
76 X86ISelAddressMode()
77 : BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0),
78 GV(0), CP(0), ES(0), JT(-1), Align(0) {
79 }
80 };
81}
82
83namespace {
84 //===--------------------------------------------------------------------===//
85 /// ISel - X86 specific code to select X86 machine instructions for
86 /// SelectionDAG operations.
87 ///
88 class VISIBILITY_HIDDEN X86DAGToDAGISel : public SelectionDAGISel {
89 /// ContainsFPCode - Every instruction we select that uses or defines a FP
90 /// register should set this to true.
91 bool ContainsFPCode;
92
93 /// FastISel - Enable fast(er) instruction selection.
94 ///
95 bool FastISel;
96
97 /// TM - Keep a reference to X86TargetMachine.
98 ///
99 X86TargetMachine &TM;
100
101 /// X86Lowering - This object fully describes how to lower LLVM code to an
102 /// X86-specific SelectionDAG.
103 X86TargetLowering X86Lowering;
104
105 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
106 /// make the right decision when generating code for different targets.
107 const X86Subtarget *Subtarget;
108
109 /// GlobalBaseReg - keeps track of the virtual register mapped onto global
110 /// base register.
111 unsigned GlobalBaseReg;
112
113 public:
114 X86DAGToDAGISel(X86TargetMachine &tm, bool fast)
115 : SelectionDAGISel(X86Lowering),
116 ContainsFPCode(false), FastISel(fast), TM(tm),
117 X86Lowering(*TM.getTargetLowering()),
118 Subtarget(&TM.getSubtarget<X86Subtarget>()) {}
119
120 virtual bool runOnFunction(Function &Fn) {
121 // Make sure we re-emit a set of the global base reg if necessary
122 GlobalBaseReg = 0;
123 return SelectionDAGISel::runOnFunction(Fn);
124 }
125
126 virtual const char *getPassName() const {
127 return "X86 DAG->DAG Instruction Selection";
128 }
129
130 /// InstructionSelectBasicBlock - This callback is invoked by
131 /// SelectionDAGISel when it has created a SelectionDAG for us to codegen.
132 virtual void InstructionSelectBasicBlock(SelectionDAG &DAG);
133
Anton Korobeynikov34ef31e2007-09-25 21:52:30 +0000134 virtual void EmitFunctionEntryCode(Function &Fn, MachineFunction &MF);
135
Dan Gohmand6098272007-07-24 23:00:27 +0000136 virtual bool CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000137
138// Include the pieces autogenerated from the target description.
139#include "X86GenDAGISel.inc"
140
141 private:
142 SDNode *Select(SDOperand N);
143
144 bool MatchAddress(SDOperand N, X86ISelAddressMode &AM,
145 bool isRoot = true, unsigned Depth = 0);
Dan Gohmana60c1b32007-08-13 20:03:06 +0000146 bool MatchAddressBase(SDOperand N, X86ISelAddressMode &AM,
147 bool isRoot, unsigned Depth);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000148 bool SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
149 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
150 bool SelectLEAAddr(SDOperand Op, SDOperand N, SDOperand &Base,
151 SDOperand &Scale, SDOperand &Index, SDOperand &Disp);
152 bool SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
153 SDOperand N, SDOperand &Base, SDOperand &Scale,
154 SDOperand &Index, SDOperand &Disp,
155 SDOperand &InChain, SDOperand &OutChain);
156 bool TryFoldLoad(SDOperand P, SDOperand N,
157 SDOperand &Base, SDOperand &Scale,
158 SDOperand &Index, SDOperand &Disp);
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000159 void PreprocessForRMW(SelectionDAG &DAG);
160 void PreprocessForFPConvert(SelectionDAG &DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000161
162 /// SelectInlineAsmMemoryOperand - Implement addressing mode selection for
163 /// inline asm expressions.
164 virtual bool SelectInlineAsmMemoryOperand(const SDOperand &Op,
165 char ConstraintCode,
166 std::vector<SDOperand> &OutOps,
167 SelectionDAG &DAG);
168
Anton Korobeynikov34ef31e2007-09-25 21:52:30 +0000169 void EmitSpecialCodeForMain(MachineBasicBlock *BB, MachineFrameInfo *MFI);
170
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000171 inline void getAddressOperands(X86ISelAddressMode &AM, SDOperand &Base,
172 SDOperand &Scale, SDOperand &Index,
173 SDOperand &Disp) {
174 Base = (AM.BaseType == X86ISelAddressMode::FrameIndexBase) ?
175 CurDAG->getTargetFrameIndex(AM.Base.FrameIndex, TLI.getPointerTy()) :
176 AM.Base.Reg;
177 Scale = getI8Imm(AM.Scale);
178 Index = AM.IndexReg;
179 // These are 32-bit even in 64-bit mode since RIP relative offset
180 // is 32-bit.
181 if (AM.GV)
182 Disp = CurDAG->getTargetGlobalAddress(AM.GV, MVT::i32, AM.Disp);
183 else if (AM.CP)
184 Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i32, AM.Align, AM.Disp);
185 else if (AM.ES)
186 Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i32);
187 else if (AM.JT != -1)
188 Disp = CurDAG->getTargetJumpTable(AM.JT, MVT::i32);
189 else
190 Disp = getI32Imm(AM.Disp);
191 }
192
193 /// getI8Imm - Return a target constant with the specified value, of type
194 /// i8.
195 inline SDOperand getI8Imm(unsigned Imm) {
196 return CurDAG->getTargetConstant(Imm, MVT::i8);
197 }
198
199 /// getI16Imm - Return a target constant with the specified value, of type
200 /// i16.
201 inline SDOperand getI16Imm(unsigned Imm) {
202 return CurDAG->getTargetConstant(Imm, MVT::i16);
203 }
204
205 /// getI32Imm - Return a target constant with the specified value, of type
206 /// i32.
207 inline SDOperand getI32Imm(unsigned Imm) {
208 return CurDAG->getTargetConstant(Imm, MVT::i32);
209 }
210
211 /// getGlobalBaseReg - insert code into the entry mbb to materialize the PIC
212 /// base register. Return the virtual register that holds this value.
213 SDNode *getGlobalBaseReg();
214
Christopher Lamb0a7c8662007-08-10 21:48:46 +0000215 /// getTruncate - return an SDNode that implements a subreg based truncate
216 /// of the specified operand to the the specified value type.
217 SDNode *getTruncate(SDOperand N0, MVT::ValueType VT);
218
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000219#ifndef NDEBUG
220 unsigned Indent;
221#endif
222 };
223}
224
225static SDNode *findFlagUse(SDNode *N) {
226 unsigned FlagResNo = N->getNumValues()-1;
227 for (SDNode::use_iterator I = N->use_begin(), E = N->use_end(); I != E; ++I) {
228 SDNode *User = *I;
229 for (unsigned i = 0, e = User->getNumOperands(); i != e; ++i) {
230 SDOperand Op = User->getOperand(i);
231 if (Op.Val == N && Op.ResNo == FlagResNo)
232 return User;
233 }
234 }
235 return NULL;
236}
237
238static void findNonImmUse(SDNode *Use, SDNode* Def, SDNode *ImmedUse,
239 SDNode *Root, SDNode *Skip, bool &found,
240 std::set<SDNode *> &Visited) {
241 if (found ||
242 Use->getNodeId() > Def->getNodeId() ||
243 !Visited.insert(Use).second)
244 return;
245
246 for (unsigned i = 0, e = Use->getNumOperands(); !found && i != e; ++i) {
247 SDNode *N = Use->getOperand(i).Val;
248 if (N == Skip)
249 continue;
250 if (N == Def) {
251 if (Use == ImmedUse)
252 continue; // Immediate use is ok.
253 if (Use == Root) {
254 assert(Use->getOpcode() == ISD::STORE ||
255 Use->getOpcode() == X86ISD::CMP);
256 continue;
257 }
258 found = true;
259 break;
260 }
261 findNonImmUse(N, Def, ImmedUse, Root, Skip, found, Visited);
262 }
263}
264
265/// isNonImmUse - Start searching from Root up the DAG to check is Def can
266/// be reached. Return true if that's the case. However, ignore direct uses
267/// by ImmedUse (which would be U in the example illustrated in
268/// CanBeFoldedBy) and by Root (which can happen in the store case).
269/// FIXME: to be really generic, we should allow direct use by any node
270/// that is being folded. But realisticly since we only fold loads which
271/// have one non-chain use, we only need to watch out for load/op/store
272/// and load/op/cmp case where the root (store / cmp) may reach the load via
273/// its chain operand.
274static inline bool isNonImmUse(SDNode *Root, SDNode *Def, SDNode *ImmedUse,
275 SDNode *Skip = NULL) {
276 std::set<SDNode *> Visited;
277 bool found = false;
278 findNonImmUse(Root, Def, ImmedUse, Root, Skip, found, Visited);
279 return found;
280}
281
282
Dan Gohmand6098272007-07-24 23:00:27 +0000283bool X86DAGToDAGISel::CanBeFoldedBy(SDNode *N, SDNode *U, SDNode *Root) const {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000284 if (FastISel) return false;
285
286 // If U use can somehow reach N through another path then U can't fold N or
287 // it will create a cycle. e.g. In the following diagram, U can reach N
288 // through X. If N is folded into into U, then X is both a predecessor and
289 // a successor of U.
290 //
291 // [ N ]
292 // ^ ^
293 // | |
294 // / \---
295 // / [X]
296 // | ^
297 // [U]--------|
298
299 if (isNonImmUse(Root, N, U))
300 return false;
301
302 // If U produces a flag, then it gets (even more) interesting. Since it
303 // would have been "glued" together with its flag use, we need to check if
304 // it might reach N:
305 //
306 // [ N ]
307 // ^ ^
308 // | |
309 // [U] \--
310 // ^ [TF]
311 // | ^
312 // | |
313 // \ /
314 // [FU]
315 //
316 // If FU (flag use) indirectly reach N (the load), and U fold N (call it
317 // NU), then TF is a predecessor of FU and a successor of NU. But since
318 // NU and FU are flagged together, this effectively creates a cycle.
319 bool HasFlagUse = false;
320 MVT::ValueType VT = Root->getValueType(Root->getNumValues()-1);
321 while ((VT == MVT::Flag && !Root->use_empty())) {
322 SDNode *FU = findFlagUse(Root);
323 if (FU == NULL)
324 break;
325 else {
326 Root = FU;
327 HasFlagUse = true;
328 }
329 VT = Root->getValueType(Root->getNumValues()-1);
330 }
331
332 if (HasFlagUse)
333 return !isNonImmUse(Root, N, Root, U);
334 return true;
335}
336
337/// MoveBelowTokenFactor - Replace TokenFactor operand with load's chain operand
338/// and move load below the TokenFactor. Replace store's chain operand with
339/// load's chain result.
340static void MoveBelowTokenFactor(SelectionDAG &DAG, SDOperand Load,
341 SDOperand Store, SDOperand TF) {
342 std::vector<SDOperand> Ops;
343 for (unsigned i = 0, e = TF.Val->getNumOperands(); i != e; ++i)
344 if (Load.Val == TF.Val->getOperand(i).Val)
345 Ops.push_back(Load.Val->getOperand(0));
346 else
347 Ops.push_back(TF.Val->getOperand(i));
348 DAG.UpdateNodeOperands(TF, &Ops[0], Ops.size());
349 DAG.UpdateNodeOperands(Load, TF, Load.getOperand(1), Load.getOperand(2));
350 DAG.UpdateNodeOperands(Store, Load.getValue(1), Store.getOperand(1),
351 Store.getOperand(2), Store.getOperand(3));
352}
353
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000354/// PreprocessForRMW - Preprocess the DAG to make instruction selection better.
355/// This is only run if not in -fast mode (aka -O0).
356/// This allows the instruction selector to pick more read-modify-write
357/// instructions. This is a common case:
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000358///
359/// [Load chain]
360/// ^
361/// |
362/// [Load]
363/// ^ ^
364/// | |
365/// / \-
366/// / |
367/// [TokenFactor] [Op]
368/// ^ ^
369/// | |
370/// \ /
371/// \ /
372/// [Store]
373///
374/// The fact the store's chain operand != load's chain will prevent the
375/// (store (op (load))) instruction from being selected. We can transform it to:
376///
377/// [Load chain]
378/// ^
379/// |
380/// [TokenFactor]
381/// ^
382/// |
383/// [Load]
384/// ^ ^
385/// | |
386/// | \-
387/// | |
388/// | [Op]
389/// | ^
390/// | |
391/// \ /
392/// \ /
393/// [Store]
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000394void X86DAGToDAGISel::PreprocessForRMW(SelectionDAG &DAG) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000395 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
396 E = DAG.allnodes_end(); I != E; ++I) {
397 if (!ISD::isNON_TRUNCStore(I))
398 continue;
399 SDOperand Chain = I->getOperand(0);
400 if (Chain.Val->getOpcode() != ISD::TokenFactor)
401 continue;
402
403 SDOperand N1 = I->getOperand(1);
404 SDOperand N2 = I->getOperand(2);
405 if (MVT::isFloatingPoint(N1.getValueType()) ||
406 MVT::isVector(N1.getValueType()) ||
407 !N1.hasOneUse())
408 continue;
409
410 bool RModW = false;
411 SDOperand Load;
412 unsigned Opcode = N1.Val->getOpcode();
413 switch (Opcode) {
414 case ISD::ADD:
415 case ISD::MUL:
416 case ISD::AND:
417 case ISD::OR:
418 case ISD::XOR:
419 case ISD::ADDC:
420 case ISD::ADDE: {
421 SDOperand N10 = N1.getOperand(0);
422 SDOperand N11 = N1.getOperand(1);
423 if (ISD::isNON_EXTLoad(N10.Val))
424 RModW = true;
425 else if (ISD::isNON_EXTLoad(N11.Val)) {
426 RModW = true;
427 std::swap(N10, N11);
428 }
429 RModW = RModW && N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
430 (N10.getOperand(1) == N2) &&
431 (N10.Val->getValueType(0) == N1.getValueType());
432 if (RModW)
433 Load = N10;
434 break;
435 }
436 case ISD::SUB:
437 case ISD::SHL:
438 case ISD::SRA:
439 case ISD::SRL:
440 case ISD::ROTL:
441 case ISD::ROTR:
442 case ISD::SUBC:
443 case ISD::SUBE:
444 case X86ISD::SHLD:
445 case X86ISD::SHRD: {
446 SDOperand N10 = N1.getOperand(0);
447 if (ISD::isNON_EXTLoad(N10.Val))
448 RModW = N10.Val->isOperand(Chain.Val) && N10.hasOneUse() &&
449 (N10.getOperand(1) == N2) &&
450 (N10.Val->getValueType(0) == N1.getValueType());
451 if (RModW)
452 Load = N10;
453 break;
454 }
455 }
456
457 if (RModW) {
458 MoveBelowTokenFactor(DAG, Load, SDOperand(I, 0), Chain);
459 ++NumLoadMoved;
460 }
461 }
462}
463
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000464
465/// PreprocessForFPConvert - Walk over the dag lowering fpround and fpextend
466/// nodes that target the FP stack to be store and load to the stack. This is a
467/// gross hack. We would like to simply mark these as being illegal, but when
468/// we do that, legalize produces these when it expands calls, then expands
469/// these in the same legalize pass. We would like dag combine to be able to
470/// hack on these between the call expansion and the node legalization. As such
471/// this pass basically does "really late" legalization of these inline with the
472/// X86 isel pass.
473void X86DAGToDAGISel::PreprocessForFPConvert(SelectionDAG &DAG) {
474 for (SelectionDAG::allnodes_iterator I = DAG.allnodes_begin(),
475 E = DAG.allnodes_end(); I != E; ) {
476 SDNode *N = I++; // Preincrement iterator to avoid invalidation issues.
477 if (N->getOpcode() != ISD::FP_ROUND && N->getOpcode() != ISD::FP_EXTEND)
478 continue;
479
480 // If the source and destination are SSE registers, then this is a legal
481 // conversion that should not be lowered.
482 MVT::ValueType SrcVT = N->getOperand(0).getValueType();
483 MVT::ValueType DstVT = N->getValueType(0);
484 bool SrcIsSSE = X86Lowering.isScalarFPTypeInSSEReg(SrcVT);
485 bool DstIsSSE = X86Lowering.isScalarFPTypeInSSEReg(DstVT);
486 if (SrcIsSSE && DstIsSSE)
487 continue;
488
489 // If this is an FPStack extension (but not a truncation), it is a noop.
490 if (!SrcIsSSE && !DstIsSSE && N->getOpcode() == ISD::FP_EXTEND)
491 continue;
492
493 // Here we could have an FP stack truncation or an FPStack <-> SSE convert.
494 // FPStack has extload and truncstore. SSE can fold direct loads into other
495 // operations. Based on this, decide what we want to do.
496 MVT::ValueType MemVT;
497 if (N->getOpcode() == ISD::FP_ROUND)
498 MemVT = DstVT; // FP_ROUND must use DstVT, we can't do a 'trunc load'.
499 else
500 MemVT = SrcIsSSE ? SrcVT : DstVT;
501
502 SDOperand MemTmp = DAG.CreateStackTemporary(MemVT);
503
504 // FIXME: optimize the case where the src/dest is a load or store?
505 SDOperand Store = DAG.getTruncStore(DAG.getEntryNode(), N->getOperand(0),
506 MemTmp, NULL, 0, MemVT);
507 SDOperand Result = DAG.getExtLoad(ISD::EXTLOAD, DstVT, Store, MemTmp,
508 NULL, 0, MemVT);
509
510 // We're about to replace all uses of the FP_ROUND/FP_EXTEND with the
511 // extload we created. This will cause general havok on the dag because
512 // anything below the conversion could be folded into other existing nodes.
513 // To avoid invalidating 'I', back it up to the convert node.
514 --I;
515 DAG.ReplaceAllUsesOfValueWith(SDOperand(N, 0), Result);
516
517 // Now that we did that, the node is dead. Increment the iterator to the
518 // next node to process, then delete N.
519 ++I;
520 DAG.DeleteNode(N);
521 }
522}
523
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000524/// InstructionSelectBasicBlock - This callback is invoked by SelectionDAGISel
525/// when it has created a SelectionDAG for us to codegen.
526void X86DAGToDAGISel::InstructionSelectBasicBlock(SelectionDAG &DAG) {
527 DEBUG(BB->dump());
528 MachineFunction::iterator FirstMBB = BB;
529
530 if (!FastISel)
Chris Lattnerdec9cb52008-01-24 08:07:48 +0000531 PreprocessForRMW(DAG);
532
533 // FIXME: This should only happen when not -fast.
534 PreprocessForFPConvert(DAG);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000535
536 // Codegen the basic block.
537#ifndef NDEBUG
538 DOUT << "===== Instruction selection begins:\n";
539 Indent = 0;
540#endif
541 DAG.setRoot(SelectRoot(DAG.getRoot()));
542#ifndef NDEBUG
543 DOUT << "===== Instruction selection ends:\n";
544#endif
545
546 DAG.RemoveDeadNodes();
547
548 // Emit machine code to BB.
549 ScheduleAndEmitDAG(DAG);
550
551 // If we are emitting FP stack code, scan the basic block to determine if this
552 // block defines any FP values. If so, put an FP_REG_KILL instruction before
553 // the terminator of the block.
Dale Johannesenc428e0f2007-08-07 20:29:26 +0000554
Dale Johannesen684887e2007-09-24 22:52:39 +0000555 // Note that FP stack instructions are used in all modes for long double,
556 // so we always need to do this check.
557 // Also note that it's possible for an FP stack register to be live across
558 // an instruction that produces multiple basic blocks (SSE CMOV) so we
559 // must check all the generated basic blocks.
Dale Johannesenc428e0f2007-08-07 20:29:26 +0000560
561 // Scan all of the machine instructions in these MBBs, checking for FP
562 // stores. (RFP32 and RFP64 will not exist in SSE mode, but RFP80 might.)
563 MachineFunction::iterator MBBI = FirstMBB;
564 do {
Dale Johannesen684887e2007-09-24 22:52:39 +0000565 bool ContainsFPCode = false;
Dale Johannesenc428e0f2007-08-07 20:29:26 +0000566 for (MachineBasicBlock::iterator I = MBBI->begin(), E = MBBI->end();
567 !ContainsFPCode && I != E; ++I) {
568 if (I->getNumOperands() != 0 && I->getOperand(0).isRegister()) {
569 const TargetRegisterClass *clas;
570 for (unsigned op = 0, e = I->getNumOperands(); op != e; ++op) {
571 if (I->getOperand(op).isRegister() && I->getOperand(op).isDef() &&
572 MRegisterInfo::isVirtualRegister(I->getOperand(op).getReg()) &&
Chris Lattner1b989192007-12-31 04:13:23 +0000573 ((clas = RegInfo->getRegClass(I->getOperand(0).getReg())) ==
Dale Johannesenc428e0f2007-08-07 20:29:26 +0000574 X86::RFP32RegisterClass ||
575 clas == X86::RFP64RegisterClass ||
576 clas == X86::RFP80RegisterClass)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000577 ContainsFPCode = true;
578 break;
579 }
580 }
581 }
582 }
Dale Johannesen684887e2007-09-24 22:52:39 +0000583 // Check PHI nodes in successor blocks. These PHI's will be lowered to have
584 // a copy of the input value in this block. In SSE mode, we only care about
585 // 80-bit values.
586 if (!ContainsFPCode) {
587 // Final check, check LLVM BB's that are successors to the LLVM BB
588 // corresponding to BB for FP PHI nodes.
589 const BasicBlock *LLVMBB = BB->getBasicBlock();
590 const PHINode *PN;
591 for (succ_const_iterator SI = succ_begin(LLVMBB), E = succ_end(LLVMBB);
592 !ContainsFPCode && SI != E; ++SI) {
593 for (BasicBlock::const_iterator II = SI->begin();
594 (PN = dyn_cast<PHINode>(II)); ++II) {
595 if (PN->getType()==Type::X86_FP80Ty ||
596 (!Subtarget->hasSSE1() && PN->getType()->isFloatingPoint()) ||
597 (!Subtarget->hasSSE2() && PN->getType()==Type::DoubleTy)) {
598 ContainsFPCode = true;
599 break;
600 }
Dale Johannesenc428e0f2007-08-07 20:29:26 +0000601 }
602 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000603 }
Dale Johannesen684887e2007-09-24 22:52:39 +0000604 // Finally, if we found any FP code, emit the FP_REG_KILL instruction.
605 if (ContainsFPCode) {
606 BuildMI(*MBBI, MBBI->getFirstTerminator(),
607 TM.getInstrInfo()->get(X86::FP_REG_KILL));
608 ++NumFPKill;
609 }
610 } while (&*(MBBI++) != BB);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000611}
612
Anton Korobeynikov34ef31e2007-09-25 21:52:30 +0000613/// EmitSpecialCodeForMain - Emit any code that needs to be executed only in
614/// the main function.
615void X86DAGToDAGISel::EmitSpecialCodeForMain(MachineBasicBlock *BB,
616 MachineFrameInfo *MFI) {
617 const TargetInstrInfo *TII = TM.getInstrInfo();
618 if (Subtarget->isTargetCygMing())
619 BuildMI(BB, TII->get(X86::CALLpcrel32)).addExternalSymbol("__main");
620}
621
622void X86DAGToDAGISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
623 // If this is main, emit special code for main.
624 MachineBasicBlock *BB = MF.begin();
625 if (Fn.hasExternalLinkage() && Fn.getName() == "main")
626 EmitSpecialCodeForMain(BB, MF.getFrameInfo());
627}
628
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000629/// MatchAddress - Add the specified node to the specified addressing mode,
630/// returning true if it cannot be done. This just pattern matches for the
Chris Lattner7f06edd2007-12-08 07:22:58 +0000631/// addressing mode.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000632bool X86DAGToDAGISel::MatchAddress(SDOperand N, X86ISelAddressMode &AM,
633 bool isRoot, unsigned Depth) {
Dan Gohmana60c1b32007-08-13 20:03:06 +0000634 // Limit recursion.
635 if (Depth > 5)
636 return MatchAddressBase(N, AM, isRoot, Depth);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000637
638 // RIP relative addressing: %rip + 32-bit displacement!
639 if (AM.isRIPRel) {
640 if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) {
641 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
642 if (isInt32(AM.Disp + Val)) {
643 AM.Disp += Val;
644 return false;
645 }
646 }
647 return true;
648 }
649
650 int id = N.Val->getNodeId();
Evan Chengf2abee72007-12-13 00:43:27 +0000651 bool AlreadySelected = isSelected(id); // Already selected, not yet replaced.
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000652
653 switch (N.getOpcode()) {
654 default: break;
655 case ISD::Constant: {
656 int64_t Val = cast<ConstantSDNode>(N)->getSignExtended();
657 if (isInt32(AM.Disp + Val)) {
658 AM.Disp += Val;
659 return false;
660 }
661 break;
662 }
663
664 case X86ISD::Wrapper: {
665 bool is64Bit = Subtarget->is64Bit();
666 // Under X86-64 non-small code model, GV (and friends) are 64-bits.
667 if (is64Bit && TM.getCodeModel() != CodeModel::Small)
668 break;
669 if (AM.GV != 0 || AM.CP != 0 || AM.ES != 0 || AM.JT != -1)
670 break;
671 // If value is available in a register both base and index components have
672 // been picked, we can't fit the result available in the register in the
673 // addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
Evan Chengf2abee72007-12-13 00:43:27 +0000674 if (!AlreadySelected || (AM.Base.Reg.Val && AM.IndexReg.Val)) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000675 bool isStatic = TM.getRelocationModel() == Reloc::Static;
676 SDOperand N0 = N.getOperand(0);
Evan Chenga2f7d4e2007-07-26 07:35:15 +0000677 // Mac OS X X86-64 lower 4G address is not available.
Evan Cheng09e13792007-08-01 23:45:51 +0000678 bool isAbs32 = !is64Bit ||
679 (isStatic && Subtarget->hasLow4GUserSpaceAddress());
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000680 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
681 GlobalValue *GV = G->getGlobal();
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000682 if (isAbs32 || isRoot) {
683 AM.GV = GV;
684 AM.Disp += G->getOffset();
685 AM.isRIPRel = !isAbs32;
686 return false;
687 }
688 } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
Evan Chenga2f7d4e2007-07-26 07:35:15 +0000689 if (isAbs32 || isRoot) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000690 AM.CP = CP->getConstVal();
691 AM.Align = CP->getAlignment();
692 AM.Disp += CP->getOffset();
Evan Chengeda2f2b2007-07-26 17:02:45 +0000693 AM.isRIPRel = !isAbs32;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000694 return false;
695 }
696 } else if (ExternalSymbolSDNode *S =dyn_cast<ExternalSymbolSDNode>(N0)) {
Evan Chenga2f7d4e2007-07-26 07:35:15 +0000697 if (isAbs32 || isRoot) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000698 AM.ES = S->getSymbol();
Evan Chengeda2f2b2007-07-26 17:02:45 +0000699 AM.isRIPRel = !isAbs32;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000700 return false;
701 }
702 } else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
Evan Chenga2f7d4e2007-07-26 07:35:15 +0000703 if (isAbs32 || isRoot) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000704 AM.JT = J->getIndex();
Evan Chengeda2f2b2007-07-26 17:02:45 +0000705 AM.isRIPRel = !isAbs32;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000706 return false;
707 }
708 }
709 }
710 break;
711 }
712
713 case ISD::FrameIndex:
714 if (AM.BaseType == X86ISelAddressMode::RegBase && AM.Base.Reg.Val == 0) {
715 AM.BaseType = X86ISelAddressMode::FrameIndexBase;
716 AM.Base.FrameIndex = cast<FrameIndexSDNode>(N)->getIndex();
717 return false;
718 }
719 break;
720
721 case ISD::SHL:
Evan Chengf2abee72007-12-13 00:43:27 +0000722 if (AlreadySelected || AM.IndexReg.Val != 0 || AM.Scale != 1)
Chris Lattner7f06edd2007-12-08 07:22:58 +0000723 break;
724
725 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1))) {
726 unsigned Val = CN->getValue();
727 if (Val == 1 || Val == 2 || Val == 3) {
728 AM.Scale = 1 << Val;
729 SDOperand ShVal = N.Val->getOperand(0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000730
Chris Lattner7f06edd2007-12-08 07:22:58 +0000731 // Okay, we know that we have a scale by now. However, if the scaled
732 // value is an add of something and a constant, we can fold the
733 // constant into the disp field here.
734 if (ShVal.Val->getOpcode() == ISD::ADD && ShVal.hasOneUse() &&
735 isa<ConstantSDNode>(ShVal.Val->getOperand(1))) {
736 AM.IndexReg = ShVal.Val->getOperand(0);
737 ConstantSDNode *AddVal =
738 cast<ConstantSDNode>(ShVal.Val->getOperand(1));
739 uint64_t Disp = AM.Disp + (AddVal->getValue() << Val);
740 if (isInt32(Disp))
741 AM.Disp = Disp;
742 else
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000743 AM.IndexReg = ShVal;
Chris Lattner7f06edd2007-12-08 07:22:58 +0000744 } else {
745 AM.IndexReg = ShVal;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000746 }
Chris Lattner7f06edd2007-12-08 07:22:58 +0000747 return false;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000748 }
749 break;
Chris Lattner7f06edd2007-12-08 07:22:58 +0000750 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000751
Dan Gohman35b99222007-10-22 20:22:24 +0000752 case ISD::SMUL_LOHI:
753 case ISD::UMUL_LOHI:
754 // A mul_lohi where we need the low part can be folded as a plain multiply.
755 if (N.ResNo != 0) break;
756 // FALL THROUGH
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000757 case ISD::MUL:
758 // X*[3,5,9] -> X+X*[2,4,8]
Evan Chengf2abee72007-12-13 00:43:27 +0000759 if (!AlreadySelected &&
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000760 AM.BaseType == X86ISelAddressMode::RegBase &&
761 AM.Base.Reg.Val == 0 &&
762 AM.IndexReg.Val == 0) {
763 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.Val->getOperand(1)))
764 if (CN->getValue() == 3 || CN->getValue() == 5 || CN->getValue() == 9) {
765 AM.Scale = unsigned(CN->getValue())-1;
766
767 SDOperand MulVal = N.Val->getOperand(0);
768 SDOperand Reg;
769
770 // Okay, we know that we have a scale by now. However, if the scaled
771 // value is an add of something and a constant, we can fold the
772 // constant into the disp field here.
773 if (MulVal.Val->getOpcode() == ISD::ADD && MulVal.hasOneUse() &&
774 isa<ConstantSDNode>(MulVal.Val->getOperand(1))) {
775 Reg = MulVal.Val->getOperand(0);
776 ConstantSDNode *AddVal =
777 cast<ConstantSDNode>(MulVal.Val->getOperand(1));
778 uint64_t Disp = AM.Disp + AddVal->getValue() * CN->getValue();
779 if (isInt32(Disp))
780 AM.Disp = Disp;
781 else
782 Reg = N.Val->getOperand(0);
783 } else {
784 Reg = N.Val->getOperand(0);
785 }
786
787 AM.IndexReg = AM.Base.Reg = Reg;
788 return false;
789 }
790 }
791 break;
792
793 case ISD::ADD:
Evan Chengf2abee72007-12-13 00:43:27 +0000794 if (!AlreadySelected) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000795 X86ISelAddressMode Backup = AM;
796 if (!MatchAddress(N.Val->getOperand(0), AM, false, Depth+1) &&
797 !MatchAddress(N.Val->getOperand(1), AM, false, Depth+1))
798 return false;
799 AM = Backup;
800 if (!MatchAddress(N.Val->getOperand(1), AM, false, Depth+1) &&
801 !MatchAddress(N.Val->getOperand(0), AM, false, Depth+1))
802 return false;
803 AM = Backup;
804 }
805 break;
806
807 case ISD::OR:
808 // Handle "X | C" as "X + C" iff X is known to have C bits clear.
Evan Chengf2abee72007-12-13 00:43:27 +0000809 if (AlreadySelected) break;
Chris Lattner7f06edd2007-12-08 07:22:58 +0000810
811 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
812 X86ISelAddressMode Backup = AM;
813 // Start with the LHS as an addr mode.
814 if (!MatchAddress(N.getOperand(0), AM, false) &&
815 // Address could not have picked a GV address for the displacement.
816 AM.GV == NULL &&
817 // On x86-64, the resultant disp must fit in 32-bits.
818 isInt32(AM.Disp + CN->getSignExtended()) &&
819 // Check to see if the LHS & C is zero.
820 CurDAG->MaskedValueIsZero(N.getOperand(0), CN->getValue())) {
821 AM.Disp += CN->getValue();
822 return false;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000823 }
Chris Lattner7f06edd2007-12-08 07:22:58 +0000824 AM = Backup;
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000825 }
826 break;
Evan Chengf2abee72007-12-13 00:43:27 +0000827
828 case ISD::AND: {
829 // Handle "(x << C1) & C2" as "(X & (C2>>C1)) << C1" if safe and if this
830 // allows us to fold the shift into this addressing mode.
831 if (AlreadySelected) break;
832 SDOperand Shift = N.getOperand(0);
833 if (Shift.getOpcode() != ISD::SHL) break;
834
835 // Scale must not be used already.
836 if (AM.IndexReg.Val != 0 || AM.Scale != 1) break;
837
838 ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
839 ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
840 if (!C1 || !C2) break;
841
842 // Not likely to be profitable if either the AND or SHIFT node has more
843 // than one use (unless all uses are for address computation). Besides,
844 // isel mechanism requires their node ids to be reused.
845 if (!N.hasOneUse() || !Shift.hasOneUse())
846 break;
847
848 // Verify that the shift amount is something we can fold.
849 unsigned ShiftCst = C1->getValue();
850 if (ShiftCst != 1 && ShiftCst != 2 && ShiftCst != 3)
851 break;
852
853 // Get the new AND mask, this folds to a constant.
854 SDOperand NewANDMask = CurDAG->getNode(ISD::SRL, N.getValueType(),
855 SDOperand(C2, 0), SDOperand(C1, 0));
856 SDOperand NewAND = CurDAG->getNode(ISD::AND, N.getValueType(),
857 Shift.getOperand(0), NewANDMask);
858 NewANDMask.Val->setNodeId(Shift.Val->getNodeId());
859 NewAND.Val->setNodeId(N.Val->getNodeId());
860
861 AM.Scale = 1 << ShiftCst;
862 AM.IndexReg = NewAND;
863 return false;
864 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000865 }
866
Dan Gohmana60c1b32007-08-13 20:03:06 +0000867 return MatchAddressBase(N, AM, isRoot, Depth);
868}
869
870/// MatchAddressBase - Helper for MatchAddress. Add the specified node to the
871/// specified addressing mode without any further recursion.
872bool X86DAGToDAGISel::MatchAddressBase(SDOperand N, X86ISelAddressMode &AM,
873 bool isRoot, unsigned Depth) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000874 // Is the base register already occupied?
875 if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.Val) {
876 // If so, check to see if the scale index register is set.
877 if (AM.IndexReg.Val == 0) {
878 AM.IndexReg = N;
879 AM.Scale = 1;
880 return false;
881 }
882
883 // Otherwise, we cannot select it.
884 return true;
885 }
886
887 // Default, generate it as a register.
888 AM.BaseType = X86ISelAddressMode::RegBase;
889 AM.Base.Reg = N;
890 return false;
891}
892
893/// SelectAddr - returns true if it is able pattern match an addressing mode.
894/// It returns the operands which make up the maximal addressing mode it can
895/// match by reference.
896bool X86DAGToDAGISel::SelectAddr(SDOperand Op, SDOperand N, SDOperand &Base,
897 SDOperand &Scale, SDOperand &Index,
898 SDOperand &Disp) {
899 X86ISelAddressMode AM;
900 if (MatchAddress(N, AM))
901 return false;
902
903 MVT::ValueType VT = N.getValueType();
904 if (AM.BaseType == X86ISelAddressMode::RegBase) {
905 if (!AM.Base.Reg.Val)
906 AM.Base.Reg = CurDAG->getRegister(0, VT);
907 }
908
909 if (!AM.IndexReg.Val)
910 AM.IndexReg = CurDAG->getRegister(0, VT);
911
912 getAddressOperands(AM, Base, Scale, Index, Disp);
913 return true;
914}
915
916/// isZeroNode - Returns true if Elt is a constant zero or a floating point
917/// constant +0.0.
918static inline bool isZeroNode(SDOperand Elt) {
919 return ((isa<ConstantSDNode>(Elt) &&
920 cast<ConstantSDNode>(Elt)->getValue() == 0) ||
921 (isa<ConstantFPSDNode>(Elt) &&
Dale Johannesendf8a8312007-08-31 04:03:46 +0000922 cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000923}
924
925
926/// SelectScalarSSELoad - Match a scalar SSE load. In particular, we want to
927/// match a load whose top elements are either undef or zeros. The load flavor
928/// is derived from the type of N, which is either v4f32 or v2f64.
929bool X86DAGToDAGISel::SelectScalarSSELoad(SDOperand Op, SDOperand Pred,
930 SDOperand N, SDOperand &Base,
931 SDOperand &Scale, SDOperand &Index,
932 SDOperand &Disp, SDOperand &InChain,
933 SDOperand &OutChain) {
934 if (N.getOpcode() == ISD::SCALAR_TO_VECTOR) {
935 InChain = N.getOperand(0).getValue(1);
936 if (ISD::isNON_EXTLoad(InChain.Val) &&
937 InChain.getValue(0).hasOneUse() &&
938 N.hasOneUse() &&
939 CanBeFoldedBy(N.Val, Pred.Val, Op.Val)) {
940 LoadSDNode *LD = cast<LoadSDNode>(InChain);
941 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
942 return false;
943 OutChain = LD->getChain();
944 return true;
945 }
946 }
947
948 // Also handle the case where we explicitly require zeros in the top
949 // elements. This is a vector shuffle from the zero vector.
950 if (N.getOpcode() == ISD::VECTOR_SHUFFLE && N.Val->hasOneUse() &&
Chris Lattnere6aa3862007-11-25 00:24:49 +0000951 // Check to see if the top elements are all zeros (or bitcast of zeros).
952 ISD::isBuildVectorAllZeros(N.getOperand(0).Val) &&
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000953 N.getOperand(1).getOpcode() == ISD::SCALAR_TO_VECTOR &&
954 N.getOperand(1).Val->hasOneUse() &&
955 ISD::isNON_EXTLoad(N.getOperand(1).getOperand(0).Val) &&
956 N.getOperand(1).getOperand(0).hasOneUse()) {
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000957 // Check to see if the shuffle mask is 4/L/L/L or 2/L, where L is something
958 // from the LHS.
Chris Lattnere6aa3862007-11-25 00:24:49 +0000959 unsigned VecWidth=MVT::getVectorNumElements(N.getOperand(0).getValueType());
Dan Gohmanf17a25c2007-07-18 16:29:46 +0000960 SDOperand ShufMask = N.getOperand(2);
961 assert(ShufMask.getOpcode() == ISD::BUILD_VECTOR && "Invalid shuf mask!");
962 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(ShufMask.getOperand(0))) {
963 if (C->getValue() == VecWidth) {
964 for (unsigned i = 1; i != VecWidth; ++i) {
965 if (ShufMask.getOperand(i).getOpcode() == ISD::UNDEF) {
966 // ok.
967 } else {
968 ConstantSDNode *C = cast<ConstantSDNode>(ShufMask.getOperand(i));
969 if (C->getValue() >= VecWidth) return false;
970 }
971 }
972 }
973
974 // Okay, this is a zero extending load. Fold it.
975 LoadSDNode *LD = cast<LoadSDNode>(N.getOperand(1).getOperand(0));
976 if (!SelectAddr(Op, LD->getBasePtr(), Base, Scale, Index, Disp))
977 return false;
978 OutChain = LD->getChain();
979 InChain = SDOperand(LD, 1);
980 return true;
981 }
982 }
983 return false;
984}
985
986
987/// SelectLEAAddr - it calls SelectAddr and determines if the maximal addressing
988/// mode it matches can be cost effectively emitted as an LEA instruction.
989bool X86DAGToDAGISel::SelectLEAAddr(SDOperand Op, SDOperand N,
990 SDOperand &Base, SDOperand &Scale,
991 SDOperand &Index, SDOperand &Disp) {
992 X86ISelAddressMode AM;
993 if (MatchAddress(N, AM))
994 return false;
995
996 MVT::ValueType VT = N.getValueType();
997 unsigned Complexity = 0;
998 if (AM.BaseType == X86ISelAddressMode::RegBase)
999 if (AM.Base.Reg.Val)
1000 Complexity = 1;
1001 else
1002 AM.Base.Reg = CurDAG->getRegister(0, VT);
1003 else if (AM.BaseType == X86ISelAddressMode::FrameIndexBase)
1004 Complexity = 4;
1005
1006 if (AM.IndexReg.Val)
1007 Complexity++;
1008 else
1009 AM.IndexReg = CurDAG->getRegister(0, VT);
1010
1011 // Don't match just leal(,%reg,2). It's cheaper to do addl %reg, %reg, or with
1012 // a simple shift.
1013 if (AM.Scale > 1)
1014 Complexity++;
1015
1016 // FIXME: We are artificially lowering the criteria to turn ADD %reg, $GA
1017 // to a LEA. This is determined with some expermentation but is by no means
1018 // optimal (especially for code size consideration). LEA is nice because of
1019 // its three-address nature. Tweak the cost function again when we can run
1020 // convertToThreeAddress() at register allocation time.
1021 if (AM.GV || AM.CP || AM.ES || AM.JT != -1) {
1022 // For X86-64, we should always use lea to materialize RIP relative
1023 // addresses.
1024 if (Subtarget->is64Bit())
1025 Complexity = 4;
1026 else
1027 Complexity += 2;
1028 }
1029
1030 if (AM.Disp && (AM.Base.Reg.Val || AM.IndexReg.Val))
1031 Complexity++;
1032
1033 if (Complexity > 2) {
1034 getAddressOperands(AM, Base, Scale, Index, Disp);
1035 return true;
1036 }
1037 return false;
1038}
1039
1040bool X86DAGToDAGISel::TryFoldLoad(SDOperand P, SDOperand N,
1041 SDOperand &Base, SDOperand &Scale,
1042 SDOperand &Index, SDOperand &Disp) {
1043 if (ISD::isNON_EXTLoad(N.Val) &&
1044 N.hasOneUse() &&
1045 CanBeFoldedBy(N.Val, P.Val, P.Val))
1046 return SelectAddr(P, N.getOperand(1), Base, Scale, Index, Disp);
1047 return false;
1048}
1049
1050/// getGlobalBaseReg - Output the instructions required to put the
1051/// base address to use for accessing globals into a register.
1052///
1053SDNode *X86DAGToDAGISel::getGlobalBaseReg() {
1054 assert(!Subtarget->is64Bit() && "X86-64 PIC uses RIP relative addressing");
1055 if (!GlobalBaseReg) {
1056 // Insert the set of GlobalBaseReg into the first MBB of the function
Evan Cheng0729ccf2008-01-05 00:41:47 +00001057 MachineFunction *MF = BB->getParent();
1058 MachineBasicBlock &FirstMBB = MF->front();
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001059 MachineBasicBlock::iterator MBBI = FirstMBB.begin();
Evan Cheng0729ccf2008-01-05 00:41:47 +00001060 MachineRegisterInfo &RegInfo = MF->getRegInfo();
Chris Lattner1b989192007-12-31 04:13:23 +00001061 unsigned PC = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001062
1063 const TargetInstrInfo *TII = TM.getInstrInfo();
Evan Cheng34f93712007-12-22 02:26:46 +00001064 // Operand of MovePCtoStack is completely ignored by asm printer. It's
1065 // only used in JIT code emission as displacement to pc.
Evan Cheng0729ccf2008-01-05 00:41:47 +00001066 BuildMI(FirstMBB, MBBI, TII->get(X86::MOVPC32r), PC).addImm(0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001067
1068 // If we're using vanilla 'GOT' PIC style, we should use relative addressing
1069 // not to pc, but to _GLOBAL_ADDRESS_TABLE_ external
1070 if (TM.getRelocationModel() == Reloc::PIC_ &&
1071 Subtarget->isPICStyleGOT()) {
Chris Lattner1b989192007-12-31 04:13:23 +00001072 GlobalBaseReg = RegInfo.createVirtualRegister(X86::GR32RegisterClass);
Evan Cheng0729ccf2008-01-05 00:41:47 +00001073 BuildMI(FirstMBB, MBBI, TII->get(X86::ADD32ri), GlobalBaseReg)
1074 .addReg(PC).addExternalSymbol("_GLOBAL_OFFSET_TABLE_");
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001075 } else {
1076 GlobalBaseReg = PC;
1077 }
1078
1079 }
1080 return CurDAG->getRegister(GlobalBaseReg, TLI.getPointerTy()).Val;
1081}
1082
1083static SDNode *FindCallStartFromCall(SDNode *Node) {
1084 if (Node->getOpcode() == ISD::CALLSEQ_START) return Node;
1085 assert(Node->getOperand(0).getValueType() == MVT::Other &&
1086 "Node doesn't have a token chain argument!");
1087 return FindCallStartFromCall(Node->getOperand(0).Val);
1088}
1089
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001090SDNode *X86DAGToDAGISel::getTruncate(SDOperand N0, MVT::ValueType VT) {
1091 SDOperand SRIdx;
1092 switch (VT) {
1093 case MVT::i8:
1094 SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1095 // Ensure that the source register has an 8-bit subreg on 32-bit targets
1096 if (!Subtarget->is64Bit()) {
1097 unsigned Opc;
1098 MVT::ValueType VT;
1099 switch (N0.getValueType()) {
1100 default: assert(0 && "Unknown truncate!");
1101 case MVT::i16:
1102 Opc = X86::MOV16to16_;
1103 VT = MVT::i16;
1104 break;
1105 case MVT::i32:
1106 Opc = X86::MOV32to32_;
1107 VT = MVT::i32;
1108 break;
1109 }
Evan Chenge1f39552007-10-12 07:55:53 +00001110 N0 = SDOperand(CurDAG->getTargetNode(Opc, VT, MVT::Flag, N0), 0);
1111 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1112 VT, N0, SRIdx, N0.getValue(1));
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001113 }
1114 break;
1115 case MVT::i16:
1116 SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
1117 break;
1118 case MVT::i32:
1119 SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
1120 break;
Evan Chenge1f39552007-10-12 07:55:53 +00001121 default: assert(0 && "Unknown truncate!"); break;
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001122 }
Evan Chenge1f39552007-10-12 07:55:53 +00001123 return CurDAG->getTargetNode(X86::EXTRACT_SUBREG, VT, N0, SRIdx);
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001124}
1125
1126
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001127SDNode *X86DAGToDAGISel::Select(SDOperand N) {
1128 SDNode *Node = N.Val;
1129 MVT::ValueType NVT = Node->getValueType(0);
1130 unsigned Opc, MOpc;
1131 unsigned Opcode = Node->getOpcode();
1132
1133#ifndef NDEBUG
1134 DOUT << std::string(Indent, ' ') << "Selecting: ";
1135 DEBUG(Node->dump(CurDAG));
1136 DOUT << "\n";
1137 Indent += 2;
1138#endif
1139
1140 if (Opcode >= ISD::BUILTIN_OP_END && Opcode < X86ISD::FIRST_NUMBER) {
1141#ifndef NDEBUG
1142 DOUT << std::string(Indent-2, ' ') << "== ";
1143 DEBUG(Node->dump(CurDAG));
1144 DOUT << "\n";
1145 Indent -= 2;
1146#endif
1147 return NULL; // Already selected.
1148 }
1149
1150 switch (Opcode) {
1151 default: break;
1152 case X86ISD::GlobalBaseReg:
1153 return getGlobalBaseReg();
1154
Evan Cheng931a8f42008-01-29 19:34:22 +00001155 case X86ISD::FP_GET_RESULT2: {
1156 SDOperand Chain = N.getOperand(0);
1157 SDOperand InFlag = N.getOperand(1);
1158 AddToISelQueue(Chain);
1159 AddToISelQueue(InFlag);
1160 std::vector<MVT::ValueType> Tys;
1161 Tys.push_back(MVT::f80);
1162 Tys.push_back(MVT::f80);
1163 Tys.push_back(MVT::Other);
1164 Tys.push_back(MVT::Flag);
1165 SDOperand Ops[] = { Chain, InFlag };
1166 SDNode *ResNode = CurDAG->getTargetNode(X86::FpGETRESULT80x2, Tys,
1167 Ops, 2);
1168 Chain = SDOperand(ResNode, 2);
1169 InFlag = SDOperand(ResNode, 3);
1170 ReplaceUses(SDOperand(N.Val, 2), Chain);
1171 ReplaceUses(SDOperand(N.Val, 3), InFlag);
1172 return ResNode;
1173 }
1174
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001175 case ISD::ADD: {
1176 // Turn ADD X, c to MOV32ri X+c. This cannot be done with tblgen'd
1177 // code and is matched first so to prevent it from being turned into
1178 // LEA32r X+c.
Evan Cheng17e39d62008-01-08 02:06:11 +00001179 // In 64-bit small code size mode, use LEA to take advantage of
1180 // RIP-relative addressing.
1181 if (TM.getCodeModel() != CodeModel::Small)
1182 break;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001183 MVT::ValueType PtrVT = TLI.getPointerTy();
1184 SDOperand N0 = N.getOperand(0);
1185 SDOperand N1 = N.getOperand(1);
1186 if (N.Val->getValueType(0) == PtrVT &&
1187 N0.getOpcode() == X86ISD::Wrapper &&
1188 N1.getOpcode() == ISD::Constant) {
1189 unsigned Offset = (unsigned)cast<ConstantSDNode>(N1)->getValue();
1190 SDOperand C(0, 0);
1191 // TODO: handle ExternalSymbolSDNode.
1192 if (GlobalAddressSDNode *G =
1193 dyn_cast<GlobalAddressSDNode>(N0.getOperand(0))) {
1194 C = CurDAG->getTargetGlobalAddress(G->getGlobal(), PtrVT,
1195 G->getOffset() + Offset);
1196 } else if (ConstantPoolSDNode *CP =
1197 dyn_cast<ConstantPoolSDNode>(N0.getOperand(0))) {
1198 C = CurDAG->getTargetConstantPool(CP->getConstVal(), PtrVT,
1199 CP->getAlignment(),
1200 CP->getOffset()+Offset);
1201 }
1202
1203 if (C.Val) {
1204 if (Subtarget->is64Bit()) {
1205 SDOperand Ops[] = { CurDAG->getRegister(0, PtrVT), getI8Imm(1),
1206 CurDAG->getRegister(0, PtrVT), C };
1207 return CurDAG->SelectNodeTo(N.Val, X86::LEA64r, MVT::i64, Ops, 4);
1208 } else
1209 return CurDAG->SelectNodeTo(N.Val, X86::MOV32ri, PtrVT, C);
1210 }
1211 }
1212
1213 // Other cases are handled by auto-generated code.
1214 break;
1215 }
1216
Dan Gohman5a199552007-10-08 18:33:35 +00001217 case ISD::SMUL_LOHI:
1218 case ISD::UMUL_LOHI: {
1219 SDOperand N0 = Node->getOperand(0);
1220 SDOperand N1 = Node->getOperand(1);
1221
Dan Gohmana5685ba2007-10-09 15:44:37 +00001222 // There are several forms of IMUL that just return the low part and
1223 // don't have fixed-register operands. If we don't need the high part,
1224 // use these instead. They can be selected with the generated ISel code.
Dan Gohman5a199552007-10-08 18:33:35 +00001225 if (NVT != MVT::i8 &&
1226 N.getValue(1).use_empty()) {
1227 N = CurDAG->getNode(ISD::MUL, NVT, N0, N1);
1228 break;
1229 }
1230
1231 bool isSigned = Opcode == ISD::SMUL_LOHI;
1232 if (!isSigned)
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001233 switch (NVT) {
1234 default: assert(0 && "Unsupported VT!");
1235 case MVT::i8: Opc = X86::MUL8r; MOpc = X86::MUL8m; break;
1236 case MVT::i16: Opc = X86::MUL16r; MOpc = X86::MUL16m; break;
1237 case MVT::i32: Opc = X86::MUL32r; MOpc = X86::MUL32m; break;
1238 case MVT::i64: Opc = X86::MUL64r; MOpc = X86::MUL64m; break;
1239 }
1240 else
1241 switch (NVT) {
1242 default: assert(0 && "Unsupported VT!");
1243 case MVT::i8: Opc = X86::IMUL8r; MOpc = X86::IMUL8m; break;
1244 case MVT::i16: Opc = X86::IMUL16r; MOpc = X86::IMUL16m; break;
1245 case MVT::i32: Opc = X86::IMUL32r; MOpc = X86::IMUL32m; break;
1246 case MVT::i64: Opc = X86::IMUL64r; MOpc = X86::IMUL64m; break;
1247 }
1248
1249 unsigned LoReg, HiReg;
1250 switch (NVT) {
1251 default: assert(0 && "Unsupported VT!");
1252 case MVT::i8: LoReg = X86::AL; HiReg = X86::AH; break;
1253 case MVT::i16: LoReg = X86::AX; HiReg = X86::DX; break;
1254 case MVT::i32: LoReg = X86::EAX; HiReg = X86::EDX; break;
1255 case MVT::i64: LoReg = X86::RAX; HiReg = X86::RDX; break;
1256 }
1257
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001258 SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
Evan Cheng508fe8b2007-08-02 05:48:35 +00001259 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
Dan Gohman5a199552007-10-08 18:33:35 +00001260 // multiplty is commmutative
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001261 if (!foldedLoad) {
1262 foldedLoad = TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3);
Evan Cheng508fe8b2007-08-02 05:48:35 +00001263 if (foldedLoad)
1264 std::swap(N0, N1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001265 }
1266
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001267 AddToISelQueue(N0);
Dan Gohman5a199552007-10-08 18:33:35 +00001268 SDOperand InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), LoReg,
1269 N0, SDOperand()).getValue(1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001270
1271 if (foldedLoad) {
Dan Gohman5a199552007-10-08 18:33:35 +00001272 AddToISelQueue(N1.getOperand(0));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001273 AddToISelQueue(Tmp0);
1274 AddToISelQueue(Tmp1);
1275 AddToISelQueue(Tmp2);
1276 AddToISelQueue(Tmp3);
Dan Gohman5a199552007-10-08 18:33:35 +00001277 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001278 SDNode *CNode =
1279 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001280 InFlag = SDOperand(CNode, 1);
Dan Gohman5a199552007-10-08 18:33:35 +00001281 // Update the chain.
1282 ReplaceUses(N1.getValue(1), SDOperand(CNode, 0));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001283 } else {
1284 AddToISelQueue(N1);
1285 InFlag =
1286 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1287 }
1288
Dan Gohman5a199552007-10-08 18:33:35 +00001289 // Copy the low half of the result, if it is needed.
1290 if (!N.getValue(0).use_empty()) {
1291 SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1292 LoReg, NVT, InFlag);
1293 InFlag = Result.getValue(2);
1294 ReplaceUses(N.getValue(0), Result);
1295#ifndef NDEBUG
1296 DOUT << std::string(Indent-2, ' ') << "=> ";
1297 DEBUG(Result.Val->dump(CurDAG));
1298 DOUT << "\n";
1299#endif
Evan Cheng6f0f0dd2007-08-09 21:59:35 +00001300 }
Dan Gohman5a199552007-10-08 18:33:35 +00001301 // Copy the high half of the result, if it is needed.
1302 if (!N.getValue(1).use_empty()) {
1303 SDOperand Result;
1304 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1305 // Prevent use of AH in a REX instruction by referencing AX instead.
1306 // Shift it down 8 bits.
1307 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1308 X86::AX, MVT::i16, InFlag);
1309 InFlag = Result.getValue(2);
1310 Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1311 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1312 // Then truncate it down to i8.
1313 SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1314 Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1315 MVT::i8, Result, SRIdx), 0);
1316 } else {
1317 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1318 HiReg, NVT, InFlag);
1319 InFlag = Result.getValue(2);
1320 }
1321 ReplaceUses(N.getValue(1), Result);
1322#ifndef NDEBUG
1323 DOUT << std::string(Indent-2, ' ') << "=> ";
1324 DEBUG(Result.Val->dump(CurDAG));
1325 DOUT << "\n";
1326#endif
1327 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001328
1329#ifndef NDEBUG
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001330 Indent -= 2;
1331#endif
Dan Gohman5a199552007-10-08 18:33:35 +00001332
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001333 return NULL;
1334 }
1335
Dan Gohman5a199552007-10-08 18:33:35 +00001336 case ISD::SDIVREM:
1337 case ISD::UDIVREM: {
1338 SDOperand N0 = Node->getOperand(0);
1339 SDOperand N1 = Node->getOperand(1);
1340
1341 bool isSigned = Opcode == ISD::SDIVREM;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001342 if (!isSigned)
1343 switch (NVT) {
1344 default: assert(0 && "Unsupported VT!");
1345 case MVT::i8: Opc = X86::DIV8r; MOpc = X86::DIV8m; break;
1346 case MVT::i16: Opc = X86::DIV16r; MOpc = X86::DIV16m; break;
1347 case MVT::i32: Opc = X86::DIV32r; MOpc = X86::DIV32m; break;
1348 case MVT::i64: Opc = X86::DIV64r; MOpc = X86::DIV64m; break;
1349 }
1350 else
1351 switch (NVT) {
1352 default: assert(0 && "Unsupported VT!");
1353 case MVT::i8: Opc = X86::IDIV8r; MOpc = X86::IDIV8m; break;
1354 case MVT::i16: Opc = X86::IDIV16r; MOpc = X86::IDIV16m; break;
1355 case MVT::i32: Opc = X86::IDIV32r; MOpc = X86::IDIV32m; break;
1356 case MVT::i64: Opc = X86::IDIV64r; MOpc = X86::IDIV64m; break;
1357 }
1358
1359 unsigned LoReg, HiReg;
1360 unsigned ClrOpcode, SExtOpcode;
1361 switch (NVT) {
1362 default: assert(0 && "Unsupported VT!");
1363 case MVT::i8:
1364 LoReg = X86::AL; HiReg = X86::AH;
1365 ClrOpcode = 0;
1366 SExtOpcode = X86::CBW;
1367 break;
1368 case MVT::i16:
1369 LoReg = X86::AX; HiReg = X86::DX;
1370 ClrOpcode = X86::MOV16r0;
1371 SExtOpcode = X86::CWD;
1372 break;
1373 case MVT::i32:
1374 LoReg = X86::EAX; HiReg = X86::EDX;
1375 ClrOpcode = X86::MOV32r0;
1376 SExtOpcode = X86::CDQ;
1377 break;
1378 case MVT::i64:
1379 LoReg = X86::RAX; HiReg = X86::RDX;
1380 ClrOpcode = X86::MOV64r0;
1381 SExtOpcode = X86::CQO;
1382 break;
1383 }
1384
Dan Gohman5a199552007-10-08 18:33:35 +00001385 SDOperand Tmp0, Tmp1, Tmp2, Tmp3;
1386 bool foldedLoad = TryFoldLoad(N, N1, Tmp0, Tmp1, Tmp2, Tmp3);
1387
1388 SDOperand InFlag;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001389 if (NVT == MVT::i8 && !isSigned) {
1390 // Special case for div8, just use a move with zero extension to AX to
1391 // clear the upper 8 bits (AH).
1392 SDOperand Tmp0, Tmp1, Tmp2, Tmp3, Move, Chain;
1393 if (TryFoldLoad(N, N0, Tmp0, Tmp1, Tmp2, Tmp3)) {
1394 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N0.getOperand(0) };
1395 AddToISelQueue(N0.getOperand(0));
1396 AddToISelQueue(Tmp0);
1397 AddToISelQueue(Tmp1);
1398 AddToISelQueue(Tmp2);
1399 AddToISelQueue(Tmp3);
1400 Move =
1401 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rm8, MVT::i16, MVT::Other,
1402 Ops, 5), 0);
1403 Chain = Move.getValue(1);
1404 ReplaceUses(N0.getValue(1), Chain);
1405 } else {
1406 AddToISelQueue(N0);
1407 Move =
1408 SDOperand(CurDAG->getTargetNode(X86::MOVZX16rr8, MVT::i16, N0), 0);
1409 Chain = CurDAG->getEntryNode();
1410 }
Dan Gohman5a199552007-10-08 18:33:35 +00001411 Chain = CurDAG->getCopyToReg(Chain, X86::AX, Move, SDOperand());
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001412 InFlag = Chain.getValue(1);
1413 } else {
1414 AddToISelQueue(N0);
1415 InFlag =
Dan Gohman5a199552007-10-08 18:33:35 +00001416 CurDAG->getCopyToReg(CurDAG->getEntryNode(),
1417 LoReg, N0, SDOperand()).getValue(1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001418 if (isSigned) {
1419 // Sign extend the low part into the high part.
1420 InFlag =
1421 SDOperand(CurDAG->getTargetNode(SExtOpcode, MVT::Flag, InFlag), 0);
1422 } else {
1423 // Zero out the high part, effectively zero extending the input.
1424 SDOperand ClrNode = SDOperand(CurDAG->getTargetNode(ClrOpcode, NVT), 0);
Dan Gohman5a199552007-10-08 18:33:35 +00001425 InFlag = CurDAG->getCopyToReg(CurDAG->getEntryNode(), HiReg,
1426 ClrNode, InFlag).getValue(1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001427 }
1428 }
1429
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001430 if (foldedLoad) {
1431 AddToISelQueue(N1.getOperand(0));
1432 AddToISelQueue(Tmp0);
1433 AddToISelQueue(Tmp1);
1434 AddToISelQueue(Tmp2);
1435 AddToISelQueue(Tmp3);
1436 SDOperand Ops[] = { Tmp0, Tmp1, Tmp2, Tmp3, N1.getOperand(0), InFlag };
1437 SDNode *CNode =
1438 CurDAG->getTargetNode(MOpc, MVT::Other, MVT::Flag, Ops, 6);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001439 InFlag = SDOperand(CNode, 1);
Dan Gohman5a199552007-10-08 18:33:35 +00001440 // Update the chain.
1441 ReplaceUses(N1.getValue(1), SDOperand(CNode, 0));
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001442 } else {
1443 AddToISelQueue(N1);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001444 InFlag =
1445 SDOperand(CurDAG->getTargetNode(Opc, MVT::Flag, N1, InFlag), 0);
1446 }
1447
Dan Gohman242a5ba2007-09-25 18:23:27 +00001448 // Copy the division (low) result, if it is needed.
1449 if (!N.getValue(0).use_empty()) {
Dan Gohman5a199552007-10-08 18:33:35 +00001450 SDOperand Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1451 LoReg, NVT, InFlag);
Dan Gohman242a5ba2007-09-25 18:23:27 +00001452 InFlag = Result.getValue(2);
1453 ReplaceUses(N.getValue(0), Result);
1454#ifndef NDEBUG
1455 DOUT << std::string(Indent-2, ' ') << "=> ";
1456 DEBUG(Result.Val->dump(CurDAG));
1457 DOUT << "\n";
1458#endif
Evan Cheng6f0f0dd2007-08-09 21:59:35 +00001459 }
Dan Gohman242a5ba2007-09-25 18:23:27 +00001460 // Copy the remainder (high) result, if it is needed.
1461 if (!N.getValue(1).use_empty()) {
1462 SDOperand Result;
1463 if (HiReg == X86::AH && Subtarget->is64Bit()) {
1464 // Prevent use of AH in a REX instruction by referencing AX instead.
1465 // Shift it down 8 bits.
Dan Gohman5a199552007-10-08 18:33:35 +00001466 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1467 X86::AX, MVT::i16, InFlag);
Dan Gohman242a5ba2007-09-25 18:23:27 +00001468 InFlag = Result.getValue(2);
1469 Result = SDOperand(CurDAG->getTargetNode(X86::SHR16ri, MVT::i16, Result,
1470 CurDAG->getTargetConstant(8, MVT::i8)), 0);
1471 // Then truncate it down to i8.
1472 SDOperand SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1473 Result = SDOperand(CurDAG->getTargetNode(X86::EXTRACT_SUBREG,
1474 MVT::i8, Result, SRIdx), 0);
1475 } else {
Dan Gohman5a199552007-10-08 18:33:35 +00001476 Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(),
1477 HiReg, NVT, InFlag);
Dan Gohman242a5ba2007-09-25 18:23:27 +00001478 InFlag = Result.getValue(2);
1479 }
1480 ReplaceUses(N.getValue(1), Result);
1481#ifndef NDEBUG
1482 DOUT << std::string(Indent-2, ' ') << "=> ";
1483 DEBUG(Result.Val->dump(CurDAG));
1484 DOUT << "\n";
1485#endif
1486 }
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001487
1488#ifndef NDEBUG
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001489 Indent -= 2;
1490#endif
1491
1492 return NULL;
1493 }
Christopher Lamb422213d2007-08-10 22:22:41 +00001494
1495 case ISD::ANY_EXTEND: {
1496 SDOperand N0 = Node->getOperand(0);
1497 AddToISelQueue(N0);
1498 if (NVT == MVT::i64 || NVT == MVT::i32 || NVT == MVT::i16) {
1499 SDOperand SRIdx;
1500 switch(N0.getValueType()) {
1501 case MVT::i32:
1502 SRIdx = CurDAG->getTargetConstant(3, MVT::i32); // SubRegSet 3
1503 break;
1504 case MVT::i16:
1505 SRIdx = CurDAG->getTargetConstant(2, MVT::i32); // SubRegSet 2
1506 break;
1507 case MVT::i8:
1508 if (Subtarget->is64Bit())
1509 SRIdx = CurDAG->getTargetConstant(1, MVT::i32); // SubRegSet 1
1510 break;
1511 default: assert(0 && "Unknown any_extend!");
1512 }
1513 if (SRIdx.Val) {
Evan Chenge1f39552007-10-12 07:55:53 +00001514 SDNode *ResNode = CurDAG->getTargetNode(X86::INSERT_SUBREG,
1515 NVT, N0, SRIdx);
Christopher Lamb422213d2007-08-10 22:22:41 +00001516
1517#ifndef NDEBUG
1518 DOUT << std::string(Indent-2, ' ') << "=> ";
1519 DEBUG(ResNode->dump(CurDAG));
1520 DOUT << "\n";
1521 Indent -= 2;
1522#endif
1523 return ResNode;
1524 } // Otherwise let generated ISel handle it.
1525 }
1526 break;
1527 }
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001528
1529 case ISD::SIGN_EXTEND_INREG: {
1530 SDOperand N0 = Node->getOperand(0);
1531 AddToISelQueue(N0);
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001532
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001533 MVT::ValueType SVT = cast<VTSDNode>(Node->getOperand(1))->getVT();
1534 SDOperand TruncOp = SDOperand(getTruncate(N0, SVT), 0);
Bill Wendling79bb1a22007-11-01 08:51:44 +00001535 unsigned Opc = 0;
Christopher Lamb444336c2007-07-29 01:24:57 +00001536 switch (NVT) {
Christopher Lamb444336c2007-07-29 01:24:57 +00001537 case MVT::i16:
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001538 if (SVT == MVT::i8) Opc = X86::MOVSX16rr8;
1539 else assert(0 && "Unknown sign_extend_inreg!");
Christopher Lamb444336c2007-07-29 01:24:57 +00001540 break;
1541 case MVT::i32:
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001542 switch (SVT) {
1543 case MVT::i8: Opc = X86::MOVSX32rr8; break;
1544 case MVT::i16: Opc = X86::MOVSX32rr16; break;
1545 default: assert(0 && "Unknown sign_extend_inreg!");
1546 }
Christopher Lamb444336c2007-07-29 01:24:57 +00001547 break;
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001548 case MVT::i64:
1549 switch (SVT) {
1550 case MVT::i8: Opc = X86::MOVSX64rr8; break;
1551 case MVT::i16: Opc = X86::MOVSX64rr16; break;
1552 case MVT::i32: Opc = X86::MOVSX64rr32; break;
1553 default: assert(0 && "Unknown sign_extend_inreg!");
1554 }
1555 break;
1556 default: assert(0 && "Unknown sign_extend_inreg!");
Christopher Lamb444336c2007-07-29 01:24:57 +00001557 }
Christopher Lamb0a7c8662007-08-10 21:48:46 +00001558
1559 SDNode *ResNode = CurDAG->getTargetNode(Opc, NVT, TruncOp);
1560
1561#ifndef NDEBUG
1562 DOUT << std::string(Indent-2, ' ') << "=> ";
1563 DEBUG(TruncOp.Val->dump(CurDAG));
1564 DOUT << "\n";
1565 DOUT << std::string(Indent-2, ' ') << "=> ";
1566 DEBUG(ResNode->dump(CurDAG));
1567 DOUT << "\n";
1568 Indent -= 2;
1569#endif
1570 return ResNode;
1571 break;
1572 }
1573
1574 case ISD::TRUNCATE: {
1575 SDOperand Input = Node->getOperand(0);
1576 AddToISelQueue(Node->getOperand(0));
1577 SDNode *ResNode = getTruncate(Input, NVT);
1578
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001579#ifndef NDEBUG
1580 DOUT << std::string(Indent-2, ' ') << "=> ";
1581 DEBUG(ResNode->dump(CurDAG));
1582 DOUT << "\n";
1583 Indent -= 2;
1584#endif
Christopher Lamb444336c2007-07-29 01:24:57 +00001585 return ResNode;
Dan Gohmanf17a25c2007-07-18 16:29:46 +00001586 break;
1587 }
1588 }
1589
1590 SDNode *ResNode = SelectCode(N);
1591
1592#ifndef NDEBUG
1593 DOUT << std::string(Indent-2, ' ') << "=> ";
1594 if (ResNode == NULL || ResNode == N.Val)
1595 DEBUG(N.Val->dump(CurDAG));
1596 else
1597 DEBUG(ResNode->dump(CurDAG));
1598 DOUT << "\n";
1599 Indent -= 2;
1600#endif
1601
1602 return ResNode;
1603}
1604
1605bool X86DAGToDAGISel::
1606SelectInlineAsmMemoryOperand(const SDOperand &Op, char ConstraintCode,
1607 std::vector<SDOperand> &OutOps, SelectionDAG &DAG){
1608 SDOperand Op0, Op1, Op2, Op3;
1609 switch (ConstraintCode) {
1610 case 'o': // offsetable ??
1611 case 'v': // not offsetable ??
1612 default: return true;
1613 case 'm': // memory
1614 if (!SelectAddr(Op, Op, Op0, Op1, Op2, Op3))
1615 return true;
1616 break;
1617 }
1618
1619 OutOps.push_back(Op0);
1620 OutOps.push_back(Op1);
1621 OutOps.push_back(Op2);
1622 OutOps.push_back(Op3);
1623 AddToISelQueue(Op0);
1624 AddToISelQueue(Op1);
1625 AddToISelQueue(Op2);
1626 AddToISelQueue(Op3);
1627 return false;
1628}
1629
1630/// createX86ISelDag - This pass converts a legalized DAG into a
1631/// X86-specific DAG, ready for instruction scheduling.
1632///
1633FunctionPass *llvm::createX86ISelDag(X86TargetMachine &TM, bool Fast) {
1634 return new X86DAGToDAGISel(TM, Fast);
1635}