blob: 090d3ef53e3711830e993dacd138b4c601ccaabe [file] [log] [blame]
Chris Lattner1c08c712005-01-07 07:47:53 +00001//===-- SelectionDAGISel.cpp - Implement the SelectionDAGISel class -------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This implements the SelectionDAGISel class.
11//
12//===----------------------------------------------------------------------===//
13
14#define DEBUG_TYPE "isel"
15#include "llvm/CodeGen/SelectionDAGISel.h"
16#include "llvm/Constants.h"
17#include "llvm/DerivedTypes.h"
18#include "llvm/Function.h"
19#include "llvm/Instructions.h"
20#include "llvm/Intrinsics.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineFrameInfo.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/SelectionDAG.h"
25#include "llvm/CodeGen/SSARegMap.h"
26#include "llvm/Target/TargetData.h"
27#include "llvm/Target/TargetFrameInfo.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Target/TargetLowering.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/Debug.h"
32#include <map>
33#include <iostream>
34using namespace llvm;
35
36namespace llvm {
37 //===--------------------------------------------------------------------===//
38 /// FunctionLoweringInfo - This contains information that is global to a
39 /// function that is used when lowering a region of the function.
Chris Lattnerf26bc8e2005-01-08 19:52:31 +000040 class FunctionLoweringInfo {
41 public:
Chris Lattner1c08c712005-01-07 07:47:53 +000042 TargetLowering &TLI;
43 Function &Fn;
44 MachineFunction &MF;
45 SSARegMap *RegMap;
46
47 FunctionLoweringInfo(TargetLowering &TLI, Function &Fn,MachineFunction &MF);
48
49 /// MBBMap - A mapping from LLVM basic blocks to their machine code entry.
50 std::map<const BasicBlock*, MachineBasicBlock *> MBBMap;
51
52 /// ValueMap - Since we emit code for the function a basic block at a time,
53 /// we must remember which virtual registers hold the values for
54 /// cross-basic-block values.
55 std::map<const Value*, unsigned> ValueMap;
56
57 /// StaticAllocaMap - Keep track of frame indices for fixed sized allocas in
58 /// the entry block. This allows the allocas to be efficiently referenced
59 /// anywhere in the function.
60 std::map<const AllocaInst*, int> StaticAllocaMap;
61
62 unsigned MakeReg(MVT::ValueType VT) {
63 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
64 }
65
66 unsigned CreateRegForValue(const Value *V) {
67 MVT::ValueType VT = TLI.getValueType(V->getType());
68 // The common case is that we will only create one register for this
69 // value. If we have that case, create and return the virtual register.
70 unsigned NV = TLI.getNumElements(VT);
71 if (NV == 1) return MakeReg(VT);
72
73 // If this value is represented with multiple target registers, make sure
74 // to create enough consequtive registers of the right (smaller) type.
75 unsigned NT = VT-1; // Find the type to use.
76 while (TLI.getNumElements((MVT::ValueType)NT) != 1)
77 --NT;
78
79 unsigned R = MakeReg((MVT::ValueType)NT);
80 for (unsigned i = 1; i != NV; ++i)
81 MakeReg((MVT::ValueType)NT);
82 return R;
83 }
84
85 unsigned InitializeRegForValue(const Value *V) {
86 unsigned &R = ValueMap[V];
87 assert(R == 0 && "Already initialized this value register!");
88 return R = CreateRegForValue(V);
89 }
90 };
91}
92
93/// isUsedOutsideOfDefiningBlock - Return true if this instruction is used by
94/// PHI nodes or outside of the basic block that defines it.
95static bool isUsedOutsideOfDefiningBlock(Instruction *I) {
96 if (isa<PHINode>(I)) return true;
97 BasicBlock *BB = I->getParent();
98 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E; ++UI)
99 if (cast<Instruction>(*UI)->getParent() != BB || isa<PHINode>(*UI))
100 return true;
101 return false;
102}
103
104FunctionLoweringInfo::FunctionLoweringInfo(TargetLowering &tli,
105 Function &fn, MachineFunction &mf)
106 : TLI(tli), Fn(fn), MF(mf), RegMap(MF.getSSARegMap()) {
107
108 // Initialize the mapping of values to registers. This is only set up for
109 // instruction values that are used outside of the block that defines
110 // them.
111 for (Function::aiterator AI = Fn.abegin(), E = Fn.aend(); AI != E; ++AI)
112 InitializeRegForValue(AI);
113
114 Function::iterator BB = Fn.begin(), E = Fn.end();
115 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
116 if (AllocaInst *AI = dyn_cast<AllocaInst>(I))
117 if (ConstantUInt *CUI = dyn_cast<ConstantUInt>(AI->getArraySize())) {
118 const Type *Ty = AI->getAllocatedType();
119 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
120 unsigned Align = TLI.getTargetData().getTypeAlignment(Ty);
121 TySize *= CUI->getValue(); // Get total allocated size.
122 StaticAllocaMap[AI] =
Chris Lattnerf26bc8e2005-01-08 19:52:31 +0000123 MF.getFrameInfo()->CreateStackObject((unsigned)TySize, Align);
Chris Lattner1c08c712005-01-07 07:47:53 +0000124 }
125
126 for (; BB != E; ++BB)
Chris Lattnerf26bc8e2005-01-08 19:52:31 +0000127 for (BasicBlock::iterator I = BB->begin(), e = BB->end(); I != e; ++I)
Chris Lattner1c08c712005-01-07 07:47:53 +0000128 if (!I->use_empty() && isUsedOutsideOfDefiningBlock(I))
129 if (!isa<AllocaInst>(I) ||
130 !StaticAllocaMap.count(cast<AllocaInst>(I)))
131 InitializeRegForValue(I);
132
133 // Create an initial MachineBasicBlock for each LLVM BasicBlock in F. This
134 // also creates the initial PHI MachineInstrs, though none of the input
135 // operands are populated.
136 for (Function::iterator BB = Fn.begin(), E = Fn.end(); BB != E; ++BB) {
137 MachineBasicBlock *MBB = new MachineBasicBlock(BB);
138 MBBMap[BB] = MBB;
139 MF.getBasicBlockList().push_back(MBB);
140
141 // Create Machine PHI nodes for LLVM PHI nodes, lowering them as
142 // appropriate.
143 PHINode *PN;
144 for (BasicBlock::iterator I = BB->begin();
Chris Lattnerf44fd882005-01-07 21:34:19 +0000145 (PN = dyn_cast<PHINode>(I)); ++I)
146 if (!PN->use_empty()) {
147 unsigned NumElements =
148 TLI.getNumElements(TLI.getValueType(PN->getType()));
149 unsigned PHIReg = ValueMap[PN];
150 assert(PHIReg &&"PHI node does not have an assigned virtual register!");
151 for (unsigned i = 0; i != NumElements; ++i)
152 BuildMI(MBB, TargetInstrInfo::PHI, PN->getNumOperands(), PHIReg+i);
153 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000154 }
155}
156
157
158
159//===----------------------------------------------------------------------===//
160/// SelectionDAGLowering - This is the common target-independent lowering
161/// implementation that is parameterized by a TargetLowering object.
162/// Also, targets can overload any lowering method.
163///
164namespace llvm {
165class SelectionDAGLowering {
166 MachineBasicBlock *CurMBB;
167
168 std::map<const Value*, SDOperand> NodeMap;
169
170public:
171 // TLI - This is information that describes the available target features we
172 // need for lowering. This indicates when operations are unavailable,
173 // implemented with a libcall, etc.
174 TargetLowering &TLI;
175 SelectionDAG &DAG;
176 const TargetData &TD;
177
178 /// FuncInfo - Information about the function as a whole.
179 ///
180 FunctionLoweringInfo &FuncInfo;
181
182 SelectionDAGLowering(SelectionDAG &dag, TargetLowering &tli,
183 FunctionLoweringInfo &funcinfo)
184 : TLI(tli), DAG(dag), TD(DAG.getTarget().getTargetData()),
185 FuncInfo(funcinfo) {
186 }
187
188 void visit(Instruction &I) { visit(I.getOpcode(), I); }
189
190 void visit(unsigned Opcode, User &I) {
191 switch (Opcode) {
192 default: assert(0 && "Unknown instruction type encountered!");
193 abort();
194 // Build the switch statement using the Instruction.def file.
195#define HANDLE_INST(NUM, OPCODE, CLASS) \
196 case Instruction::OPCODE:return visit##OPCODE((CLASS&)I);
197#include "llvm/Instruction.def"
198 }
199 }
200
201 void setCurrentBasicBlock(MachineBasicBlock *MBB) { CurMBB = MBB; }
202
203
204 SDOperand getIntPtrConstant(uint64_t Val) {
205 return DAG.getConstant(Val, TLI.getPointerTy());
206 }
207
208 SDOperand getValue(const Value *V) {
209 SDOperand &N = NodeMap[V];
210 if (N.Val) return N;
211
212 MVT::ValueType VT = TLI.getValueType(V->getType());
213 if (Constant *C = const_cast<Constant*>(dyn_cast<Constant>(V)))
214 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
215 visit(CE->getOpcode(), *CE);
216 assert(N.Val && "visit didn't populate the ValueMap!");
217 return N;
218 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(C)) {
219 return N = DAG.getGlobalAddress(GV, VT);
220 } else if (isa<ConstantPointerNull>(C)) {
221 return N = DAG.getConstant(0, TLI.getPointerTy());
222 } else if (isa<UndefValue>(C)) {
223 /// FIXME: Implement UNDEFVALUE better.
224 if (MVT::isInteger(VT))
225 return N = DAG.getConstant(0, VT);
226 else if (MVT::isFloatingPoint(VT))
227 return N = DAG.getConstantFP(0, VT);
228 else
229 assert(0 && "Unknown value type!");
230
231 } else if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
232 return N = DAG.getConstantFP(CFP->getValue(), VT);
233 } else {
234 // Canonicalize all constant ints to be unsigned.
235 return N = DAG.getConstant(cast<ConstantIntegral>(C)->getRawValue(),VT);
236 }
237
238 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
239 std::map<const AllocaInst*, int>::iterator SI =
240 FuncInfo.StaticAllocaMap.find(AI);
241 if (SI != FuncInfo.StaticAllocaMap.end())
242 return DAG.getFrameIndex(SI->second, TLI.getPointerTy());
243 }
244
245 std::map<const Value*, unsigned>::const_iterator VMI =
246 FuncInfo.ValueMap.find(V);
247 assert(VMI != FuncInfo.ValueMap.end() && "Value not in map!");
248 return N = DAG.getCopyFromReg(VMI->second, VT);
249 }
250
251 const SDOperand &setValue(const Value *V, SDOperand NewN) {
252 SDOperand &N = NodeMap[V];
253 assert(N.Val == 0 && "Already set a value for this node!");
254 return N = NewN;
255 }
256
257 // Terminator instructions.
258 void visitRet(ReturnInst &I);
259 void visitBr(BranchInst &I);
260 void visitUnreachable(UnreachableInst &I) { /* noop */ }
261
262 // These all get lowered before this pass.
263 void visitSwitch(SwitchInst &I) { assert(0 && "TODO"); }
264 void visitInvoke(InvokeInst &I) { assert(0 && "TODO"); }
265 void visitUnwind(UnwindInst &I) { assert(0 && "TODO"); }
266
267 //
268 void visitBinary(User &I, unsigned Opcode);
269 void visitAdd(User &I) { visitBinary(I, ISD::ADD); }
270 void visitSub(User &I) { visitBinary(I, ISD::SUB); }
271 void visitMul(User &I) { visitBinary(I, ISD::MUL); }
272 void visitDiv(User &I) {
273 visitBinary(I, I.getType()->isUnsigned() ? ISD::UDIV : ISD::SDIV);
274 }
275 void visitRem(User &I) {
276 visitBinary(I, I.getType()->isUnsigned() ? ISD::UREM : ISD::SREM);
277 }
278 void visitAnd(User &I) { visitBinary(I, ISD::AND); }
279 void visitOr (User &I) { visitBinary(I, ISD::OR); }
280 void visitXor(User &I) { visitBinary(I, ISD::XOR); }
281 void visitShl(User &I) { visitBinary(I, ISD::SHL); }
282 void visitShr(User &I) {
283 visitBinary(I, I.getType()->isUnsigned() ? ISD::SRL : ISD::SRA);
284 }
285
286 void visitSetCC(User &I, ISD::CondCode SignedOpc, ISD::CondCode UnsignedOpc);
287 void visitSetEQ(User &I) { visitSetCC(I, ISD::SETEQ, ISD::SETEQ); }
288 void visitSetNE(User &I) { visitSetCC(I, ISD::SETNE, ISD::SETNE); }
289 void visitSetLE(User &I) { visitSetCC(I, ISD::SETLE, ISD::SETULE); }
290 void visitSetGE(User &I) { visitSetCC(I, ISD::SETGE, ISD::SETUGE); }
291 void visitSetLT(User &I) { visitSetCC(I, ISD::SETLT, ISD::SETULT); }
292 void visitSetGT(User &I) { visitSetCC(I, ISD::SETGT, ISD::SETUGT); }
293
294 void visitGetElementPtr(User &I);
295 void visitCast(User &I);
296 void visitSelect(User &I);
297 //
298
299 void visitMalloc(MallocInst &I);
300 void visitFree(FreeInst &I);
301 void visitAlloca(AllocaInst &I);
302 void visitLoad(LoadInst &I);
303 void visitStore(StoreInst &I);
304 void visitPHI(PHINode &I) { } // PHI nodes are handled specially.
305 void visitCall(CallInst &I);
306
Chris Lattner1c08c712005-01-07 07:47:53 +0000307 void visitVAStart(CallInst &I);
308 void visitVANext(VANextInst &I);
309 void visitVAArg(VAArgInst &I);
310 void visitVAEnd(CallInst &I);
311 void visitVACopy(CallInst &I);
Chris Lattner39ae3622005-01-09 00:00:49 +0000312 void visitFrameReturnAddress(CallInst &I, bool isFrameAddress);
Chris Lattner1c08c712005-01-07 07:47:53 +0000313
314 void visitMemSet(CallInst &I);
315 void visitMemCpy(CallInst &I);
316 void visitMemMove(CallInst &I);
317
318 void visitUserOp1(Instruction &I) {
319 assert(0 && "UserOp1 should not exist at instruction selection time!");
320 abort();
321 }
322 void visitUserOp2(Instruction &I) {
323 assert(0 && "UserOp2 should not exist at instruction selection time!");
324 abort();
325 }
326};
327} // end namespace llvm
328
329void SelectionDAGLowering::visitRet(ReturnInst &I) {
330 if (I.getNumOperands() == 0) {
331 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, DAG.getRoot()));
332 return;
333 }
334
335 SDOperand Op1 = getValue(I.getOperand(0));
336 switch (Op1.getValueType()) {
337 default: assert(0 && "Unknown value type!");
338 case MVT::i1:
339 case MVT::i8:
340 case MVT::i16:
341 // Extend integer types to 32-bits.
342 if (I.getOperand(0)->getType()->isSigned())
343 Op1 = DAG.getNode(ISD::SIGN_EXTEND, MVT::i32, Op1);
344 else
345 Op1 = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Op1);
346 break;
347 case MVT::f32:
348 // Extend float to double.
349 Op1 = DAG.getNode(ISD::FP_EXTEND, MVT::f64, Op1);
350 break;
351 case MVT::i32:
352 case MVT::i64:
353 case MVT::f64:
354 break; // No extension needed!
355 }
356
357 DAG.setRoot(DAG.getNode(ISD::RET, MVT::Other, DAG.getRoot(), Op1));
358}
359
360void SelectionDAGLowering::visitBr(BranchInst &I) {
361 // Update machine-CFG edges.
362 MachineBasicBlock *Succ0MBB = FuncInfo.MBBMap[I.getSuccessor(0)];
363 CurMBB->addSuccessor(Succ0MBB);
364
365 // Figure out which block is immediately after the current one.
366 MachineBasicBlock *NextBlock = 0;
367 MachineFunction::iterator BBI = CurMBB;
368 if (++BBI != CurMBB->getParent()->end())
369 NextBlock = BBI;
370
371 if (I.isUnconditional()) {
372 // If this is not a fall-through branch, emit the branch.
373 if (Succ0MBB != NextBlock)
374 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, DAG.getRoot(),
375 DAG.getBasicBlock(Succ0MBB)));
376 } else {
377 MachineBasicBlock *Succ1MBB = FuncInfo.MBBMap[I.getSuccessor(1)];
378 CurMBB->addSuccessor(Succ1MBB);
379
380 SDOperand Cond = getValue(I.getCondition());
381
382 if (Succ1MBB == NextBlock) {
383 // If the condition is false, fall through. This means we should branch
384 // if the condition is true to Succ #0.
385 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, DAG.getRoot(),
386 Cond, DAG.getBasicBlock(Succ0MBB)));
387 } else if (Succ0MBB == NextBlock) {
388 // If the condition is true, fall through. This means we should branch if
389 // the condition is false to Succ #1. Invert the condition first.
390 SDOperand True = DAG.getConstant(1, Cond.getValueType());
391 Cond = DAG.getNode(ISD::XOR, Cond.getValueType(), Cond, True);
392 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, DAG.getRoot(),
393 Cond, DAG.getBasicBlock(Succ1MBB)));
394 } else {
395 // Neither edge is a fall through. If the comparison is true, jump to
396 // Succ#0, otherwise branch unconditionally to succ #1.
397 DAG.setRoot(DAG.getNode(ISD::BRCOND, MVT::Other, DAG.getRoot(),
398 Cond, DAG.getBasicBlock(Succ0MBB)));
399 DAG.setRoot(DAG.getNode(ISD::BR, MVT::Other, DAG.getRoot(),
400 DAG.getBasicBlock(Succ1MBB)));
401 }
402 }
403}
404
405void SelectionDAGLowering::visitBinary(User &I, unsigned Opcode) {
406 SDOperand Op1 = getValue(I.getOperand(0));
407 SDOperand Op2 = getValue(I.getOperand(1));
408 setValue(&I, DAG.getNode(Opcode, Op1.getValueType(), Op1, Op2));
409}
410
411void SelectionDAGLowering::visitSetCC(User &I,ISD::CondCode SignedOpcode,
412 ISD::CondCode UnsignedOpcode) {
413 SDOperand Op1 = getValue(I.getOperand(0));
414 SDOperand Op2 = getValue(I.getOperand(1));
415 ISD::CondCode Opcode = SignedOpcode;
416 if (I.getOperand(0)->getType()->isUnsigned())
417 Opcode = UnsignedOpcode;
418 setValue(&I, DAG.getSetCC(Opcode, Op1, Op2));
419}
420
421void SelectionDAGLowering::visitSelect(User &I) {
422 SDOperand Cond = getValue(I.getOperand(0));
423 SDOperand TrueVal = getValue(I.getOperand(1));
424 SDOperand FalseVal = getValue(I.getOperand(2));
425 setValue(&I, DAG.getNode(ISD::SELECT, TrueVal.getValueType(), Cond,
426 TrueVal, FalseVal));
427}
428
429void SelectionDAGLowering::visitCast(User &I) {
430 SDOperand N = getValue(I.getOperand(0));
431 MVT::ValueType SrcTy = TLI.getValueType(I.getOperand(0)->getType());
432 MVT::ValueType DestTy = TLI.getValueType(I.getType());
433
434 if (N.getValueType() == DestTy) {
435 setValue(&I, N); // noop cast.
Chris Lattnerae0aacb2005-01-08 08:08:56 +0000436 } else if (isInteger(SrcTy)) {
437 if (isInteger(DestTy)) { // Int -> Int cast
438 if (DestTy < SrcTy) // Truncating cast?
439 setValue(&I, DAG.getNode(ISD::TRUNCATE, DestTy, N));
440 else if (I.getOperand(0)->getType()->isSigned())
441 setValue(&I, DAG.getNode(ISD::SIGN_EXTEND, DestTy, N));
442 else
443 setValue(&I, DAG.getNode(ISD::ZERO_EXTEND, DestTy, N));
444 } else { // Int -> FP cast
445 if (I.getOperand(0)->getType()->isSigned())
446 setValue(&I, DAG.getNode(ISD::SINT_TO_FP, DestTy, N));
447 else
448 setValue(&I, DAG.getNode(ISD::UINT_TO_FP, DestTy, N));
449 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000450 } else {
Chris Lattnerae0aacb2005-01-08 08:08:56 +0000451 assert(isFloatingPoint(SrcTy) && "Unknown value type!");
452 if (isFloatingPoint(DestTy)) { // FP -> FP cast
453 if (DestTy < SrcTy) // Rounding cast?
454 setValue(&I, DAG.getNode(ISD::FP_ROUND, DestTy, N));
455 else
456 setValue(&I, DAG.getNode(ISD::FP_EXTEND, DestTy, N));
457 } else { // FP -> Int cast.
458 if (I.getType()->isSigned())
459 setValue(&I, DAG.getNode(ISD::FP_TO_SINT, DestTy, N));
460 else
461 setValue(&I, DAG.getNode(ISD::FP_TO_UINT, DestTy, N));
462 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000463 }
464}
465
466void SelectionDAGLowering::visitGetElementPtr(User &I) {
467 SDOperand N = getValue(I.getOperand(0));
468 const Type *Ty = I.getOperand(0)->getType();
469 const Type *UIntPtrTy = TD.getIntPtrType();
470
471 for (GetElementPtrInst::op_iterator OI = I.op_begin()+1, E = I.op_end();
472 OI != E; ++OI) {
473 Value *Idx = *OI;
474 if (const StructType *StTy = dyn_cast<StructType> (Ty)) {
475 unsigned Field = cast<ConstantUInt>(Idx)->getValue();
476 if (Field) {
477 // N = N + Offset
478 uint64_t Offset = TD.getStructLayout(StTy)->MemberOffsets[Field];
479 N = DAG.getNode(ISD::ADD, N.getValueType(), N,
480 getIntPtrConstant(Offset));
481 }
482 Ty = StTy->getElementType(Field);
483 } else {
484 Ty = cast<SequentialType>(Ty)->getElementType();
485 if (!isa<Constant>(Idx) || !cast<Constant>(Idx)->isNullValue()) {
486 // N = N + Idx * ElementSize;
487 uint64_t ElementSize = TD.getTypeSize(Ty);
Chris Lattner7cc47772005-01-07 21:56:57 +0000488 SDOperand IdxN = getValue(Idx), Scale = getIntPtrConstant(ElementSize);
489
490 // If the index is smaller or larger than intptr_t, truncate or extend
491 // it.
492 if (IdxN.getValueType() < Scale.getValueType()) {
493 if (Idx->getType()->isSigned())
494 IdxN = DAG.getNode(ISD::SIGN_EXTEND, Scale.getValueType(), IdxN);
495 else
496 IdxN = DAG.getNode(ISD::ZERO_EXTEND, Scale.getValueType(), IdxN);
497 } else if (IdxN.getValueType() > Scale.getValueType())
498 IdxN = DAG.getNode(ISD::TRUNCATE, Scale.getValueType(), IdxN);
499
500 IdxN = DAG.getNode(ISD::MUL, N.getValueType(), IdxN, Scale);
501
Chris Lattner1c08c712005-01-07 07:47:53 +0000502 N = DAG.getNode(ISD::ADD, N.getValueType(), N, IdxN);
503 }
504 }
505 }
506 setValue(&I, N);
507}
508
509void SelectionDAGLowering::visitAlloca(AllocaInst &I) {
510 // If this is a fixed sized alloca in the entry block of the function,
511 // allocate it statically on the stack.
512 if (FuncInfo.StaticAllocaMap.count(&I))
513 return; // getValue will auto-populate this.
514
515 const Type *Ty = I.getAllocatedType();
516 uint64_t TySize = TLI.getTargetData().getTypeSize(Ty);
517 unsigned Align = TLI.getTargetData().getTypeAlignment(Ty);
518
519 SDOperand AllocSize = getValue(I.getArraySize());
520
521 assert(AllocSize.getValueType() == TLI.getPointerTy() &&
522 "FIXME: should extend or truncate to pointer size!");
523
524 AllocSize = DAG.getNode(ISD::MUL, TLI.getPointerTy(), AllocSize,
525 getIntPtrConstant(TySize));
526
527 // Handle alignment. If the requested alignment is less than or equal to the
528 // stack alignment, ignore it and round the size of the allocation up to the
529 // stack alignment size. If the size is greater than the stack alignment, we
530 // note this in the DYNAMIC_STACKALLOC node.
531 unsigned StackAlign =
532 TLI.getTargetMachine().getFrameInfo()->getStackAlignment();
533 if (Align <= StackAlign) {
534 Align = 0;
535 // Add SA-1 to the size.
536 AllocSize = DAG.getNode(ISD::ADD, AllocSize.getValueType(), AllocSize,
537 getIntPtrConstant(StackAlign-1));
538 // Mask out the low bits for alignment purposes.
539 AllocSize = DAG.getNode(ISD::AND, AllocSize.getValueType(), AllocSize,
540 getIntPtrConstant(~(uint64_t)(StackAlign-1)));
541 }
542
543 SDOperand DSA = DAG.getNode(ISD::DYNAMIC_STACKALLOC, AllocSize.getValueType(),
544 DAG.getRoot(), AllocSize,
545 getIntPtrConstant(Align));
546 DAG.setRoot(setValue(&I, DSA).getValue(1));
547
548 // Inform the Frame Information that we have just allocated a variable-sized
549 // object.
550 CurMBB->getParent()->getFrameInfo()->CreateVariableSizedObject();
551}
552
553
554void SelectionDAGLowering::visitLoad(LoadInst &I) {
555 SDOperand Ptr = getValue(I.getOperand(0));
556 SDOperand L = DAG.getLoad(TLI.getValueType(I.getType()), DAG.getRoot(), Ptr);
557 DAG.setRoot(setValue(&I, L).getValue(1));
558}
559
560
561void SelectionDAGLowering::visitStore(StoreInst &I) {
562 Value *SrcV = I.getOperand(0);
563 SDOperand Src = getValue(SrcV);
564 SDOperand Ptr = getValue(I.getOperand(1));
565 DAG.setRoot(DAG.getNode(ISD::STORE, MVT::Other, DAG.getRoot(), Src, Ptr));
566 return;
567}
568
569void SelectionDAGLowering::visitCall(CallInst &I) {
Chris Lattner64e14b12005-01-08 22:48:57 +0000570 const char *RenameFn = 0;
Chris Lattner1c08c712005-01-07 07:47:53 +0000571 if (Function *F = I.getCalledFunction())
572 switch (F->getIntrinsicID()) {
573 case 0: break; // Not an intrinsic.
574 case Intrinsic::vastart: visitVAStart(I); return;
575 case Intrinsic::vaend: visitVAEnd(I); return;
576 case Intrinsic::vacopy: visitVACopy(I); return;
Chris Lattner39ae3622005-01-09 00:00:49 +0000577 case Intrinsic::returnaddress: visitFrameReturnAddress(I, false); return;
578 case Intrinsic::frameaddress: visitFrameReturnAddress(I, true); return;
Chris Lattner1c08c712005-01-07 07:47:53 +0000579 default:
580 // FIXME: IMPLEMENT THESE.
581 // readport, writeport, readio, writeio
582 assert(0 && "This intrinsic is not implemented yet!");
583 return;
Chris Lattner64e14b12005-01-08 22:48:57 +0000584 case Intrinsic::setjmp: RenameFn = "setjmp"; break;
585 case Intrinsic::longjmp: RenameFn = "longjmp"; break;
Chris Lattner1c08c712005-01-07 07:47:53 +0000586 case Intrinsic::memcpy: visitMemCpy(I); return;
587 case Intrinsic::memset: visitMemSet(I); return;
588 case Intrinsic::memmove: visitMemMove(I); return;
589
590 case Intrinsic::isunordered:
591 setValue(&I, DAG.getSetCC(ISD::SETUO, getValue(I.getOperand(1)),
592 getValue(I.getOperand(2))));
593 return;
594 }
595
Chris Lattner64e14b12005-01-08 22:48:57 +0000596 SDOperand Callee;
597 if (!RenameFn)
598 Callee = getValue(I.getOperand(0));
599 else
600 Callee = DAG.getExternalSymbol(RenameFn, TLI.getPointerTy());
Chris Lattner1c08c712005-01-07 07:47:53 +0000601 std::vector<std::pair<SDOperand, const Type*> > Args;
602
603 for (unsigned i = 1, e = I.getNumOperands(); i != e; ++i) {
604 Value *Arg = I.getOperand(i);
605 SDOperand ArgNode = getValue(Arg);
606 Args.push_back(std::make_pair(ArgNode, Arg->getType()));
607 }
608
Chris Lattnercf5734d2005-01-08 19:26:18 +0000609 std::pair<SDOperand,SDOperand> Result =
610 TLI.LowerCallTo(DAG.getRoot(), I.getType(), Callee, Args, DAG);
Chris Lattner1c08c712005-01-07 07:47:53 +0000611 if (I.getType() != Type::VoidTy)
Chris Lattnercf5734d2005-01-08 19:26:18 +0000612 setValue(&I, Result.first);
613 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000614}
615
616void SelectionDAGLowering::visitMalloc(MallocInst &I) {
617 SDOperand Src = getValue(I.getOperand(0));
618
619 MVT::ValueType IntPtr = TLI.getPointerTy();
620 // FIXME: Extend or truncate to the intptr size.
621 assert(Src.getValueType() == IntPtr && "Need to adjust the amount!");
622
623 // Scale the source by the type size.
624 uint64_t ElementSize = TD.getTypeSize(I.getType()->getElementType());
625 Src = DAG.getNode(ISD::MUL, Src.getValueType(),
626 Src, getIntPtrConstant(ElementSize));
627
628 std::vector<std::pair<SDOperand, const Type*> > Args;
629 Args.push_back(std::make_pair(Src, TLI.getTargetData().getIntPtrType()));
Chris Lattnercf5734d2005-01-08 19:26:18 +0000630
631 std::pair<SDOperand,SDOperand> Result =
632 TLI.LowerCallTo(DAG.getRoot(), I.getType(),
633 DAG.getExternalSymbol("malloc", IntPtr),
634 Args, DAG);
635 setValue(&I, Result.first); // Pointers always fit in registers
636 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000637}
638
639void SelectionDAGLowering::visitFree(FreeInst &I) {
640 std::vector<std::pair<SDOperand, const Type*> > Args;
641 Args.push_back(std::make_pair(getValue(I.getOperand(0)),
642 TLI.getTargetData().getIntPtrType()));
643 MVT::ValueType IntPtr = TLI.getPointerTy();
Chris Lattnercf5734d2005-01-08 19:26:18 +0000644 std::pair<SDOperand,SDOperand> Result =
645 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
646 DAG.getExternalSymbol("free", IntPtr), Args, DAG);
647 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000648}
649
Chris Lattner39ae3622005-01-09 00:00:49 +0000650std::pair<SDOperand, SDOperand>
651TargetLowering::LowerVAStart(SDOperand Chain, SelectionDAG &DAG) {
Chris Lattner1c08c712005-01-07 07:47:53 +0000652 // We have no sane default behavior, just emit a useful error message and bail
653 // out.
Chris Lattner39ae3622005-01-09 00:00:49 +0000654 std::cerr << "Variable arguments handling not implemented on this target!\n";
Chris Lattner1c08c712005-01-07 07:47:53 +0000655 abort();
656}
657
Chris Lattner39ae3622005-01-09 00:00:49 +0000658SDOperand TargetLowering::LowerVAEnd(SDOperand Chain, SDOperand L,
659 SelectionDAG &DAG) {
660 // Default to a noop.
661 return Chain;
662}
663
664std::pair<SDOperand,SDOperand>
665TargetLowering::LowerVACopy(SDOperand Chain, SDOperand L, SelectionDAG &DAG) {
666 // Default to returning the input list.
667 return std::make_pair(L, Chain);
668}
669
670std::pair<SDOperand,SDOperand>
671TargetLowering::LowerVAArgNext(bool isVANext, SDOperand Chain, SDOperand VAList,
672 const Type *ArgTy, SelectionDAG &DAG) {
673 // We have no sane default behavior, just emit a useful error message and bail
674 // out.
675 std::cerr << "Variable arguments handling not implemented on this target!\n";
676 abort();
677}
678
679
680void SelectionDAGLowering::visitVAStart(CallInst &I) {
681 std::pair<SDOperand,SDOperand> Result = TLI.LowerVAStart(DAG.getRoot(), DAG);
682 setValue(&I, Result.first);
683 DAG.setRoot(Result.second);
684}
685
686void SelectionDAGLowering::visitVAArg(VAArgInst &I) {
687 std::pair<SDOperand,SDOperand> Result =
688 TLI.LowerVAArgNext(false, DAG.getRoot(), getValue(I.getOperand(0)),
689 I.getType(), DAG);
690 setValue(&I, Result.first);
691 DAG.setRoot(Result.second);
692}
693
Chris Lattner1c08c712005-01-07 07:47:53 +0000694void SelectionDAGLowering::visitVANext(VANextInst &I) {
Chris Lattner39ae3622005-01-09 00:00:49 +0000695 std::pair<SDOperand,SDOperand> Result =
696 TLI.LowerVAArgNext(true, DAG.getRoot(), getValue(I.getOperand(0)),
697 I.getArgType(), DAG);
698 setValue(&I, Result.first);
699 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000700}
701
702void SelectionDAGLowering::visitVAEnd(CallInst &I) {
Chris Lattner39ae3622005-01-09 00:00:49 +0000703 DAG.setRoot(TLI.LowerVAEnd(DAG.getRoot(), getValue(I.getOperand(1)), DAG));
Chris Lattner1c08c712005-01-07 07:47:53 +0000704}
705
706void SelectionDAGLowering::visitVACopy(CallInst &I) {
Chris Lattner39ae3622005-01-09 00:00:49 +0000707 std::pair<SDOperand,SDOperand> Result =
708 TLI.LowerVACopy(DAG.getRoot(), getValue(I.getOperand(1)), DAG);
709 setValue(&I, Result.first);
710 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000711}
712
Chris Lattner39ae3622005-01-09 00:00:49 +0000713
714// It is always conservatively correct for llvm.returnaddress and
715// llvm.frameaddress to return 0.
716std::pair<SDOperand, SDOperand>
717TargetLowering::LowerFrameReturnAddress(bool isFrameAddr, SDOperand Chain,
718 unsigned Depth, SelectionDAG &DAG) {
719 return std::make_pair(DAG.getConstant(0, getPointerTy()), Chain);
Chris Lattner1c08c712005-01-07 07:47:53 +0000720}
721
Chris Lattner39ae3622005-01-09 00:00:49 +0000722void SelectionDAGLowering::visitFrameReturnAddress(CallInst &I, bool isFrame) {
723 unsigned Depth = (unsigned)cast<ConstantUInt>(I.getOperand(1))->getValue();
724 std::pair<SDOperand,SDOperand> Result =
725 TLI.LowerFrameReturnAddress(isFrame, DAG.getRoot(), Depth, DAG);
726 setValue(&I, Result.first);
727 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000728}
729
Chris Lattner1c08c712005-01-07 07:47:53 +0000730void SelectionDAGLowering::visitMemSet(CallInst &I) {
731 MVT::ValueType IntPtr = TLI.getPointerTy();
732 const Type *IntPtrTy = TLI.getTargetData().getIntPtrType();
733
734 // Extend the ubyte argument to be an int value for the call.
735 SDOperand Val = getValue(I.getOperand(2));
736 Val = DAG.getNode(ISD::ZERO_EXTEND, MVT::i32, Val);
737
738 std::vector<std::pair<SDOperand, const Type*> > Args;
739 Args.push_back(std::make_pair(getValue(I.getOperand(1)), IntPtrTy));
740 Args.push_back(std::make_pair(Val, Type::IntTy));
741 Args.push_back(std::make_pair(getValue(I.getOperand(3)), IntPtrTy));
742
Chris Lattnercf5734d2005-01-08 19:26:18 +0000743 std::pair<SDOperand,SDOperand> Result =
744 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
745 DAG.getExternalSymbol("memset", IntPtr), Args, DAG);
746 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000747}
748
749void SelectionDAGLowering::visitMemCpy(CallInst &I) {
750 MVT::ValueType IntPtr = TLI.getPointerTy();
751 const Type *IntPtrTy = TLI.getTargetData().getIntPtrType();
752
753 std::vector<std::pair<SDOperand, const Type*> > Args;
754 Args.push_back(std::make_pair(getValue(I.getOperand(1)), IntPtrTy));
755 Args.push_back(std::make_pair(getValue(I.getOperand(2)), IntPtrTy));
756 Args.push_back(std::make_pair(getValue(I.getOperand(3)), IntPtrTy));
757
Chris Lattnercf5734d2005-01-08 19:26:18 +0000758 std::pair<SDOperand,SDOperand> Result =
759 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
760 DAG.getExternalSymbol("memcpy", IntPtr), Args, DAG);
761 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000762}
763
764void SelectionDAGLowering::visitMemMove(CallInst &I) {
765 MVT::ValueType IntPtr = TLI.getPointerTy();
766 const Type *IntPtrTy = TLI.getTargetData().getIntPtrType();
767
768 std::vector<std::pair<SDOperand, const Type*> > Args;
769 Args.push_back(std::make_pair(getValue(I.getOperand(1)), IntPtrTy));
770 Args.push_back(std::make_pair(getValue(I.getOperand(2)), IntPtrTy));
771 Args.push_back(std::make_pair(getValue(I.getOperand(3)), IntPtrTy));
772
Chris Lattnercf5734d2005-01-08 19:26:18 +0000773 std::pair<SDOperand,SDOperand> Result =
774 TLI.LowerCallTo(DAG.getRoot(), Type::VoidTy,
775 DAG.getExternalSymbol("memmove", IntPtr), Args, DAG);
776 DAG.setRoot(Result.second);
Chris Lattner1c08c712005-01-07 07:47:53 +0000777}
778
779unsigned SelectionDAGISel::MakeReg(MVT::ValueType VT) {
780 return RegMap->createVirtualRegister(TLI.getRegClassFor(VT));
781}
782
783
784
785bool SelectionDAGISel::runOnFunction(Function &Fn) {
786 MachineFunction &MF = MachineFunction::construct(&Fn, TLI.getTargetMachine());
787 RegMap = MF.getSSARegMap();
788 DEBUG(std::cerr << "\n\n\n=== " << Fn.getName() << "\n");
789
790 FunctionLoweringInfo FuncInfo(TLI, Fn, MF);
791
792 for (Function::iterator I = Fn.begin(), E = Fn.end(); I != E; ++I)
793 SelectBasicBlock(I, MF, FuncInfo);
794
795 return true;
796}
797
798
799void SelectionDAGISel::CopyValueToVirtualRegister(SelectionDAGLowering &SDL,
800 Value *V, unsigned Reg) {
801 SelectionDAG &DAG = SDL.DAG;
802 DAG.setRoot(DAG.getCopyToReg(DAG.getRoot(), SDL.getValue(V), Reg));
803}
804
805void SelectionDAGISel::BuildSelectionDAG(SelectionDAG &DAG, BasicBlock *LLVMBB,
806 std::vector<std::pair<MachineInstr*, unsigned> > &PHINodesToUpdate,
807 FunctionLoweringInfo &FuncInfo) {
808 SelectionDAGLowering SDL(DAG, TLI, FuncInfo);
809
810 // If this is the entry block, emit arguments.
811 Function *F = LLVMBB->getParent();
812 if (LLVMBB == &F->front()) {
813 // FIXME: If an argument is only used in one basic block, we could directly
814 // emit it (ONLY) into that block, not emitting the COPY_TO_VREG node. This
815 // would improve codegen in several cases on X86 by allowing the loads to be
816 // folded into the user operation.
817 std::vector<SDOperand> Args = TLI.LowerArguments(*LLVMBB->getParent(), DAG);
818
819 unsigned a = 0;
820 for (Function::aiterator AI = F->abegin(), E = F->aend(); AI != E; ++AI,++a)
821 if (!AI->use_empty()) {
822 SDL.setValue(AI, Args[a]);
823 CopyValueToVirtualRegister(SDL, AI, FuncInfo.ValueMap[AI]);
824 }
825 }
826
827 BB = FuncInfo.MBBMap[LLVMBB];
828 SDL.setCurrentBasicBlock(BB);
829
830 // Lower all of the non-terminator instructions.
831 for (BasicBlock::iterator I = LLVMBB->begin(), E = --LLVMBB->end();
832 I != E; ++I)
833 SDL.visit(*I);
834
835 // Ensure that all instructions which are used outside of their defining
836 // blocks are available as virtual registers.
837 for (BasicBlock::iterator I = LLVMBB->begin(), E = LLVMBB->end(); I != E;++I)
838 if (!I->use_empty()) {
Chris Lattneree749d72005-01-09 01:16:24 +0000839 std::map<const Value*, unsigned>::iterator VMI =FuncInfo.ValueMap.find(I);
Chris Lattner1c08c712005-01-07 07:47:53 +0000840 if (VMI != FuncInfo.ValueMap.end())
841 CopyValueToVirtualRegister(SDL, I, VMI->second);
842 }
843
844 // Handle PHI nodes in successor blocks. Emit code into the SelectionDAG to
845 // ensure constants are generated when needed. Remember the virtual registers
846 // that need to be added to the Machine PHI nodes as input. We cannot just
847 // directly add them, because expansion might result in multiple MBB's for one
848 // BB. As such, the start of the BB might correspond to a different MBB than
849 // the end.
850 //
851
852 // Emit constants only once even if used by multiple PHI nodes.
853 std::map<Constant*, unsigned> ConstantsOut;
854
855 // Check successor nodes PHI nodes that expect a constant to be available from
856 // this block.
857 TerminatorInst *TI = LLVMBB->getTerminator();
858 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
859 BasicBlock *SuccBB = TI->getSuccessor(succ);
860 MachineBasicBlock::iterator MBBI = FuncInfo.MBBMap[SuccBB]->begin();
861 PHINode *PN;
862
863 // At this point we know that there is a 1-1 correspondence between LLVM PHI
864 // nodes and Machine PHI nodes, but the incoming operands have not been
865 // emitted yet.
866 for (BasicBlock::iterator I = SuccBB->begin();
Chris Lattnerf44fd882005-01-07 21:34:19 +0000867 (PN = dyn_cast<PHINode>(I)); ++I)
868 if (!PN->use_empty()) {
869 unsigned Reg;
870 Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
871 if (Constant *C = dyn_cast<Constant>(PHIOp)) {
872 unsigned &RegOut = ConstantsOut[C];
873 if (RegOut == 0) {
874 RegOut = FuncInfo.CreateRegForValue(C);
875 CopyValueToVirtualRegister(SDL, C, RegOut);
876 }
877 Reg = RegOut;
878 } else {
879 Reg = FuncInfo.ValueMap[PHIOp];
Chris Lattneree749d72005-01-09 01:16:24 +0000880 if (Reg == 0) {
881 assert(isa<AllocaInst>(PHIOp) &&
882 FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(PHIOp)) &&
883 "Didn't codegen value into a register!??");
884 Reg = FuncInfo.CreateRegForValue(PHIOp);
885 CopyValueToVirtualRegister(SDL, PHIOp, Reg);
886 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000887 }
Chris Lattnerf44fd882005-01-07 21:34:19 +0000888
889 // Remember that this register needs to added to the machine PHI node as
890 // the input for this MBB.
891 unsigned NumElements =
892 TLI.getNumElements(TLI.getValueType(PN->getType()));
893 for (unsigned i = 0, e = NumElements; i != e; ++i)
894 PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg+i));
Chris Lattner1c08c712005-01-07 07:47:53 +0000895 }
Chris Lattner1c08c712005-01-07 07:47:53 +0000896 }
897 ConstantsOut.clear();
898
899 // Lower the terminator after the copies are emitted.
900 SDL.visit(*LLVMBB->getTerminator());
901}
902
903void SelectionDAGISel::SelectBasicBlock(BasicBlock *LLVMBB, MachineFunction &MF,
904 FunctionLoweringInfo &FuncInfo) {
905 SelectionDAG DAG(TLI.getTargetMachine(), MF);
906 CurDAG = &DAG;
907 std::vector<std::pair<MachineInstr*, unsigned> > PHINodesToUpdate;
908
909 // First step, lower LLVM code to some DAG. This DAG may use operations and
910 // types that are not supported by the target.
911 BuildSelectionDAG(DAG, LLVMBB, PHINodesToUpdate, FuncInfo);
912
913 DEBUG(std::cerr << "Lowered selection DAG:\n");
914 DEBUG(DAG.dump());
915
916 // Second step, hack on the DAG until it only uses operations and types that
917 // the target supports.
918 DAG.Legalize(TLI);
919
920 DEBUG(std::cerr << "Legalized selection DAG:\n");
921 DEBUG(DAG.dump());
922
923 // Finally, instruction select all of the operations to machine code, adding
924 // the code to the MachineBasicBlock.
925 InstructionSelectBasicBlock(DAG);
926
927 DEBUG(std::cerr << "Selected machine code:\n");
928 DEBUG(BB->dump());
929
930 // Finally, now that we know what the last MBB the LLVM BB expanded is, update
931 // PHI nodes in successors.
932 for (unsigned i = 0, e = PHINodesToUpdate.size(); i != e; ++i) {
933 MachineInstr *PHI = PHINodesToUpdate[i].first;
934 assert(PHI->getOpcode() == TargetInstrInfo::PHI &&
935 "This is not a machine PHI node that we are updating!");
936 PHI->addRegOperand(PHINodesToUpdate[i].second);
937 PHI->addMachineBasicBlockOperand(BB);
938 }
939}